commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
fd87c6225f9f6be19b651d700644829430cbda77
fix typo
tests/test_detector_spacy.py
tests/test_detector_spacy.py
import sys import unittest from scrubadub import Scrubber from scrubadub.filth import NameFilth, OrganizationFilth, LocationFilth, Filth from base import BaseTestCase class NamedEntityTestCase(unittest.TestCase, BaseTestCase): """ Tests whether the detector is performing correctly from a function point of view. For accuracy tests use .benchmark_accuracy instead """ def setUp(self): unsupported_python_version = (sys.version_info.major, sys.version_info.minor) < (3, 6) unsupported_spacy_version = False try: from scrubadub.detectors.spacy import SpacyEntityDetector SpacyEntityDetector.check_spacy_version() except ImportError: unsupported_spacy_version = True if unsupported_python_version: self.skipTest( "Named entity detector not supported for python<3.6" ) elif unsupported_spacy_version: self.skipTest( "Need spacy version >= 3" ) else: from scrubadub.detectors.spacy import SpacyEntityDetector self.detector = SpacyEntityDetector(model='en_core_web_sm') def _assert_filth_type_and_pos(self, doc_list, beg_end_list, filth_class): doc_names = [str(x) for x in range(len(doc_list))] filth_list = list(self.detector.iter_filth_documents(document_list=doc_names, document_names=doc_list)) for filth, beg_end in zip(filth_list, beg_end_list): self.assertIsInstance(filth, filth_class) self.assertEqual((filth.beg, filth.end), beg_end) def test_names(self): doc_list = ["John is a cat", "When was Maria born?", "john is a cat", "when was maria born"] beg_end_list = [(0, 4), (9, 14), (0, 4), (9, 14)] self._assert_filth_type_and_pos(doc_list, beg_end_list, NameFilth) def test_organisations(self): doc_list = ["She started working for Apple this year", "But used to work for Google"] beg_end_list = [(24, 29), (21, 27)] self._assert_filth_type_and_pos(doc_list, beg_end_list, OrganizationFilth) def test_location(self): self.detector.named_entities = {"GPE"} doc_list = ["London is a city in England"] beg_end_list = [(0, 6), (20, 27)] self._assert_filth_type_and_pos(doc_list, beg_end_list, LocationFilth) def test_unknown_entity(self): self.detector.named_entities = {"PERCENT"} doc_list = ["20% of the city is in ruins."] beg_end_list = [(0, 3)] self._assert_filth_type_and_pos(doc_list, beg_end_list, Filth) def test_wrong_model(self): """Test that it raises an error if user inputs invalid spacy model""" from scrubadub.detectors.spacy import SpacyEntityDetector with self.assertRaises(SystemExit): SpacyEntityDetector(model='not_a_valid_spacy_model') def test_iter_filth(self): doc = "John is a cat" output_iter_docs = list(self.detector.iter_filth_documents(document_list=[doc], documnet_names=["0"])) output_iter = list(self.detector.iter_filth(text=doc, document_name="0")) self.assertListEqual(output_iter, output_iter_docs) def test_compatibility_with_scrubber(self): doc_list = ["John is a cat", "When was Maria born?"] result = ["{{NAME}} is a cat", "When was {{NAME}} born?"] s = Scrubber(detector_list=[self.detector]) clean_docs = s.clean_documents(documents=doc_list) self.assertIsInstance(clean_docs, list) self.assertListEqual(result, clean_docs)
Python
0.999991
@@ -3267,10 +3267,10 @@ ocum -n e +n t_na
9a1eb2dbe37c13c82477ed5787eeb985994cac8f
add Python2 shebang to helper.py
scripts/helper.py
scripts/helper.py
# -*- coding: utf-8 -*- """ scripts.init_webhook ~~~~~~~~~~~~~~~~~~~~ A simple script to manage the webhook. :copyright: (c) 2016 by Lujeni. :license: BSD, see LICENSE for more details. """ import argparse import sys from trello import TrelloClient from slugify import slugify from matterllo.utils import config from matterllo.utils import logger SETTINGS = config() LOGGING = logger() def main(): try: parser = argparse.ArgumentParser(description="Webhook helpers") parser.add_argument('--cleanup', dest='cleanup', action='store_true', help='delete webhook from your SETTINGS.') parser.add_argument('--update', dest='update', action='store_true', help='upsert webhook from your SETTINGS.') parser.add_argument('--init', dest='init', action='store_true', help='delete and create webhook from your SETTINGS.') args = parser.parse_args() if not args.cleanup and not args.update and not args.init: print parser.print_help() sys.exit(0) client = TrelloClient(api_key=SETTINGS['trello_api_key'], token=SETTINGS['trello_api_token']) trello_boards = client.list_boards() boards_name = [slugify(b['name']) for b in SETTINGS.get('boards', {}).values()] # cleanup part if args.cleanup or args.init: result = [h.delete() for h in client.list_hooks()] LOGGING.info('delete {} webhook'.format(len(result))) # update / init part if args.update or args.init: for board in trello_boards: board_name = slugify(board.name) if board_name not in boards_name: continue LOGGING.info('try to create webhook board :: {}'.format(board_name)) url = SETTINGS['callback_url'] + '/trelloCallbacks/' result = client.create_hook(url, board.id) LOGGING.info('create webhook board :: {} :: {}'.format(board_name, result)) except Exception as e: LOGGING.error('unable init webhook :: {}'.format(e)) sys.exit(1) if __name__ == '__main__': main()
Python
0.000002
@@ -1,8 +1,31 @@ +#!/usr/bin/env python2%0A # -*- co
35ee18926743b6ab0356ef278da9cb14a3263246
Print field in output
jjvm.py
jjvm.py
#!/usr/bin/python import argparse import os import struct import sys CP_STRUCT_SIZES = { 7:3, 10:5 } ############### ### CLASSES ### ############### class MyParser(argparse.ArgumentParser): def error(self, message): sys.stderr.write('error: %s\n' % message) self.print_help() sys.exit(2) ################### ### SUBROUTINES ### ################### def lenCpStruct(tag): if tag in CP_STRUCT_SIZES: return CP_STRUCT_SIZES[tag] else: return -1 ############ ### MAIN ### ############ parser = MyParser('Run bytecode in jjvm') parser.add_argument('path', help='path to class') args = parser.parse_args() with open(args.path, "rb") as c: c.seek(8) cpCount = struct.unpack(">H", c.read(2))[0] - 1 print "Constant pool count: %d" % cpCount; while cpCount >= 0: cpTag = ord(c.read(1)) print "Got tag: %d" % cpTag cpStructSize = lenCpStruct(cpTag) if cpStructSize < 0: print "ERROR: cpStructSize %d for tag %d" % (cpStructSize, cpTag) sys.exit(1) print "Size: %d" % cpStructSize cpCount -= 1 c.seek(cpStructSize - 1, os.SEEK_CUR)
Python
0.000279
@@ -732,16 +732,30 @@ )%5B0%5D - 1 +%0A cpIndex = 1 %0A%0A prin @@ -808,18 +808,24 @@ e cp +Index %3C= cp Count - %3E= 0 :%0A @@ -865,28 +865,40 @@ nt %22 -Got tag: %25d%22 %25 +Field %25d: %25d%22 %25 (cpIndex, cpTag +) %0A @@ -1048,24 +1048,26 @@ exit(1)%0A%0A + # print %22Size @@ -1100,15 +1100,15 @@ cp -Count - +Index + = 1%0A
78e4e0294673e71b552e2b793a2188e3f419206e
Update emon_AZURE_HA.py
azure/ha/emon_AZURE_HA.py
azure/ha/emon_AZURE_HA.py
#!/usr/bin/python # F5 Networks - External Monitor: Azure HA # https://github.com/ArtiomL/f5networks # Artiom Lichtenstein # v0.4, 04/08/2016 import json import os import requests from signal import SIGKILL from subprocess import call import sys # Log level to /var/log/ltm intLogLevel = 2 strLogID = '[-v0.4.160804-] emon_AZURE_HA.py - ' # Azure RM Auth strSubs = '' strTenantID = '' strAppID = '' strPass = '' strTokenEP = 'https://login.microsoftonline.com/%s/oauth2/token' % strTenantID strMgmtURI = 'https://management.azure.com/' strBearer = '' # Logger command strLogger = 'logger -p local0.error ' class clsExCodes: intArgs = 8 intArmAuth = 4 def funLog(intMesLevel, strMessage): if intLogLevel >= intMesLevel: lstCmd = strLogger.split(' ') lstCmd.append(strLogID + strMessage) call(lstCmd) def funARMAuth(): objPayload = { 'grant_type': 'client_credentials', 'client_id': strAppID, 'client_secret': strPass, 'resource': strMgmtURI } try: objAuthResp = requests.post(url=strTokenEP, data=objPayload) dicAJSON = json.loads(objAuthResp.content) if 'access_token' in dicAJSON.keys(): return dicAJSON['access_token'] except requests.exceptions.RequestException as e: funLog(2, str(e)) return 'BearERROR' def funCurState(): funLog(1, 'Current local state: ') def funFailover(): funLog(1, 'Azure failover...') def main(): if len(sys.argv) < 3: funLog(1, 'Not enough arguments!') sys.exit(clsExCodes.intArgs) # Remove IPv6/IPv4 compatibility prefix (LTM passes addresses in IPv6 format) strIP = sys.argv[1].strip(':f') strPort = sys.argv[2] # PID file strPFile = '_'.join(['/var/run/', os.path.basename(sys.argv[0]), strIP, strPort + '.pid']) # PID strPID = str(os.getpid()) funLog(2, strPFile + ' ' + strPID) # Kill the last instance of this monitor if hung if os.path.isfile(strPFile): try: os.kill(int(file(strPFile, 'r').read()), SIGKILL) funLog(1, 'Killed the last hung instance of this monitor.') except OSError: pass # Record current PID file(strPFile, 'w').write(str(os.getpid())) # Health Monitor try: objResp = requests.head(''.join(['https://', strIP, ':', strPort]), verify = False) if objResp.status_code == 200: os.unlink(strPFile) # Any standard output stops the script from running. Clean up any temporary files before the standard output operation funLog(2, 'Peer: ' + strIP + ' is up.' ) print 'UP' sys.exit() except requests.exceptions.RequestException as e: funLog(2, str(e)) # Peer down, ARM action needed global strBearer strBearer = funARMAuth() funLog(2, 'ARM Bearer: ' + strBearer) if strBearer == 'BearERROR': funLog(1, 'ARM Auth Error!') sys.exit(clsExCodes.intArmAuth) funCurState() funFailover() os.unlink(strPFile) sys.exit(1) if __name__ == '__main__': main()
Python
0.000001
@@ -358,24 +358,41 @@ %0AstrSubs = ' +%3CSubscription ID%3E '%0AstrTenantI @@ -392,24 +392,34 @@ TenantID = ' +%3CTenantID%3E '%0AstrAppID = @@ -416,24 +416,32 @@ strAppID = ' +%3CApp ID%3E '%0AstrPass = @@ -441,16 +441,26 @@ Pass = ' +%3CPassword%3E '%0AstrTok
6ef4d27f2c6fd1f6a5b48ff3e07e7613a3c9d49e
fix cloning query
django_sphinx_db/backend/models.py
django_sphinx_db/backend/models.py
import re from django.conf import settings from django.db import models, connections from django.db.models.sql import Query, AND from django.db.models.query import QuerySet from django_sphinx_db.backend.sphinx.compiler import SphinxWhereNode, SphinxExtraWhere def sphinx_escape(value): if type(value) not in (str, unicode): return value value = re.sub(r"([=<>()|!@~&/^$\-\"\\])", r'\\\1', value) value = re.sub(r'(SENTENCE|PARAGRAPH)', r'\\\1', value, flags=re.I) return value class SphinxQuery(Query): _clonable = ('options', 'match', 'group_limit', 'group_order_by', 'with_meta') def __init__(self, *args, **kwargs): kwargs.setdefault('where', SphinxWhereNode) super(SphinxQuery, self).__init__(*args, **kwargs) def clone(self, klass=None, memo=None, **kwargs): query = super(SphinxQuery, self).clone(klass=None, memo=None, **kwargs) for attr_name in self._clonable: value = getattr(self, attr_name, None) if value: setattr(query, attr_name, value) return query class SphinxQuerySet(QuerySet): def __init__(self, model, **kwargs): kwargs.setdefault('query', SphinxQuery(model)) super(SphinxQuerySet, self).__init__(model, **kwargs) def using(self, alias): # Ignore the alias. This will allow the Django router to decide # what db receives the query. Otherwise, when dealing with related # models, Django tries to force all queries to the same database. # This is the right thing to do in cases of master/slave or sharding # but with Sphinx, we want all related queries to flow to Sphinx, # never another configured database. return self._clone() def with_meta(self): """ Allows to execute SHOW META immediately after main query.""" clone = self._clone() setattr(clone.query, 'with_meta', True) return clone def filter(self, *args, **kwargs): """ String attributes can't be compared with = term, so they are replaced with MATCH('@field_name "value"').""" match_args = [] for field_name, value in kwargs.items(): try: if field_name.endswith('__exact'): field_name = field_name[:-7] field = self.model._meta.get_field(field_name) if isinstance(field, models.CharField): match_args.append( '@%s "%s"' % (field.db_column, sphinx_escape(value))) del kwargs[field_name] except models.FieldDoesNotExist: continue if match_args: match_expression = ' '.join(match_args) return self.match(match_expression).filter(*args, **kwargs) return super(SphinxQuerySet, self).filter(*args, **kwargs) def match(self, expression): """ Enables full-text searching in sphinx (MATCH expression).""" qs = self._clone() try: qs.query.match.add(expression) except AttributeError: qs.query.match = {expression} return qs def notequal(self, **kw): """ Support for <> term, NOT(@id=value) doesn't work.""" qs = self._clone() where = [] for field_name, value in kw.items(): field = self.model._meta.get_field(field_name) if type(field) is SphinxField: col = '@%s' % field.attname else: col = field.db_column or field.attname value = field.get_prep_value(sphinx_escape(value)) where.append('%s <> %s' % (col, value)) qs.query.where.add(SphinxExtraWhere(where, []), AND) return qs def options(self, **kw): """ Setup OPTION clause for query.""" qs = self._clone() try: qs.query.options.update(kw) except AttributeError: qs.query.options = kw return qs def group_by(self, *args, **kw): """ Adds GROUP BY clause to query. *args: field names or aliases to group by keyword group_limit: (GROUP <N> BY) int, limits number of group member to N keyword group_order_by: (WITHIN GROUP ORDER BY) string list, sets sort order within group in example: group_order_by=('-my_weight', 'title') """ group_limit = kw.get('group_limit', 0) group_order_by = kw.get('group_order_by', ()) qs = self._clone() qs.query.group_by = qs.query.group_by or [] for field_name in args: if field_name not in qs.query.extra_select: field = self.model._meta.get_field_by_name(field_name)[0] qs.query.group_by.append(field.column) else: qs.query.group_by.append(field_name) qs.query.group_limit = group_limit qs.query.group_order_by = group_order_by return qs def _clone(self, klass=None, setup=False, **kwargs): """ Add support of cloning self.query.options.""" result = super(SphinxQuerySet, self)._clone(klass, setup, **kwargs) return result def iterator(self): for row in super(SphinxQuerySet, self).iterator(): yield row if getattr(self.query, 'with_meta', False): c = connections[settings.SPHINX_DATABASE_NAME].cursor() try: c.execute("SHOW META") self.meta = dict([c.fetchone()]) except UnicodeDecodeError: self.meta = {} finally: c.close() class SphinxManager(models.Manager): use_for_related_fields = True def get_query_set(self): # Determine which fields are sphinx fields (full-text data) and # defer loading them. Sphinx won't return them. # TODO: we probably need a way to keep these from being loaded # later if the attr is accessed. sphinx_fields = [field.name for field in self.model._meta.fields if isinstance(field, SphinxField)] return SphinxQuerySet(self.model).defer(*sphinx_fields) def options(self, **kw): return self.get_query_set().options(**kw) def match(self, expression): return self.get_query_set().match(expression) def notequal(self, **kw): return self.get_query_set().notequal(**kw) def group_by(self, *args, **kw): return self.get_query_set().group_by(*args, **kw) class SphinxField(models.TextField): pass class SphinxModel(models.Model): class Meta: abstract = True objects = SphinxManager()
Python
0.000006
@@ -881,39 +881,40 @@ clone(klass= -None +klass , memo= -None +memo , **kwargs)%0A
c1e71c1dce83b5c5de28c69a8eea4634df599d52
Update worker state transitions
plugins/worker/server/__init__.py
plugins/worker/server/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # Copyright Kitware Inc. # # Licensed under the Apache License, Version 2.0 ( the "License" ); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### import celery from celery.result import AsyncResult from girder import events, logger from girder.constants import AccessType from girder.models.model_base import ValidationException from girder.plugins.jobs.constants import JobStatus from girder.utility import setting_utilities from girder.utility.model_importer import ModelImporter from .constants import PluginSettings from .utils import getWorkerApiUrl, jobInfoSpec _celeryapp = None class CustomJobStatus(object): """ The custom job status flags for the worker. """ FETCHING_INPUT = 820 CONVERTING_INPUT = 821 CONVERTING_OUTPUT = 822 PUSHING_OUTPUT = 823 CANCELING = 824 valid_transitions = { FETCHING_INPUT: [JobStatus.RUNNING], CONVERTING_INPUT: [JobStatus.RUNNING, FETCHING_INPUT], CONVERTING_OUTPUT: [JobStatus.RUNNING], PUSHING_OUTPUT: [JobStatus.RUNNING, CONVERTING_OUTPUT], CANCELING: [JobStatus.INACTIVE, JobStatus.QUEUED, JobStatus.RUNNING], JobStatus.ERROR: [FETCHING_INPUT, CONVERTING_INPUT, CONVERTING_OUTPUT, PUSHING_OUTPUT, CANCELING, JobStatus.QUEUED, JobStatus.RUNNING], # The last two are allow for revoke called from outside Girder JobStatus.CANCELED: [CANCELING, JobStatus.QUEUED, JobStatus.RUNNING] } @classmethod def isValid(cls, status): return status in ( cls.FETCHING_INPUT, cls.CONVERTING_INPUT, cls.CONVERTING_OUTPUT, cls.PUSHING_OUTPUT, cls.CANCELING ) @classmethod def validTransitions(cls, status): return cls.valid_transitions.get(status) def getCeleryApp(): """ Lazy loader for the celery app. Reloads anytime the settings are updated. """ global _celeryapp if _celeryapp is None: settings = ModelImporter.model('setting') backend = settings.get(PluginSettings.BACKEND) or 'amqp://guest@localhost/' broker = settings.get(PluginSettings.BROKER) or 'amqp://guest@localhost/' _celeryapp = celery.Celery('girder_worker', backend=backend, broker=broker) return _celeryapp def schedule(event): """ This is bound to the "jobs.schedule" event, and will be triggered any time a job is scheduled. This handler will process any job that has the handler field set to "worker_handler". """ job = event.info if job['handler'] == 'worker_handler': # Stop event propagation since we have taken care of scheduling. event.stopPropagation() task = job.get('celeryTaskName', 'girder_worker.run') # Send the task to celery asyncResult = getCeleryApp().send_task( task, job['args'], job['kwargs'], queue=job.get('celeryQueue'), headers={ 'jobInfoSpec': jobInfoSpec(job, job.get('token', None)), 'apiUrl': getWorkerApiUrl() }) # Set the job status to queued and record the task ID from celery. ModelImporter.model('job', 'jobs').updateJob(job, status=JobStatus.QUEUED, otherFields={ 'celeryTaskId': asyncResult.task_id }) def cancel(event): """ This is bound to the "jobs.cancel" event, and will be triggered any time a job is canceled. This handler will process any job that has the handler field set to "worker_handler". """ job = event.info if job['handler'] == 'worker_handler': # Stop event propagation and prevent default, we are using a custom state event.stopPropagation().preventDefault() celeryTaskId = job.get('celeryTaskId') if celeryTaskId is None: msg = ("Unable to cancel Celery task. Job '%s' doesn't have a Celery task id." % job['_id']) logger.warn(msg) return if job['status'] not in [JobStatus.COMPLETE, JobStatus.ERROR]: # Set the job status to canceling ModelImporter.model('job', 'jobs').updateJob(job, status=CustomJobStatus.CANCELING) # Send the revoke request. asyncResult = AsyncResult(celeryTaskId) asyncResult.revoke() @setting_utilities.validator({ PluginSettings.BROKER, PluginSettings.BACKEND }) def validateSettings(doc): """ Handle plugin-specific system settings. Right now we don't do any validation for the broker or backend URL settings, but we do reinitialize the celery app object with the new values. """ global _celeryapp _celeryapp = None @setting_utilities.validator({ PluginSettings.API_URL }) def validateApiUrl(doc): val = doc['value'] if val and not val.startswith('http://') and not val.startswith('https://'): raise ValidationException('API URL must start with http:// or https://.', 'value') def validateJobStatus(event): """Allow our custom job status values.""" if CustomJobStatus.isValid(event.info): event.preventDefault().addResponse(True) def validTransitions(event): """Allow our custom job transitions.""" if event.info['job']['handler'] == 'worker_handler': states = CustomJobStatus.validTransitions(event.info['status']) event.preventDefault().addResponse(states) def load(info): events.bind('jobs.schedule', 'worker', schedule) events.bind('jobs.status.validate', 'worker', validateJobStatus) events.bind('jobs.status.validTransitions', 'worker', validTransitions) events.bind('jobs.cancel', 'worker', cancel) ModelImporter.model('job', 'jobs').exposeFields( AccessType.SITE_ADMIN, {'celeryTaskId', 'celeryQueue'})
Python
0.000001
@@ -1474,16 +1474,111 @@ ons = %7B%0A + JobStatus.QUEUED: %5BJobStatus.INACTIVE%5D,%0A JobStatus.RUNNING: %5BJobStatus.QUEUED%5D,%0A @@ -2095,16 +2095,18 @@ re allow +ed for rev @@ -2212,16 +2212,80 @@ RUNNING%5D +,%0A JobStatus.SUCCESS: %5BJobStatus.RUNNING, PUSHING_OUTPUT%5D %0A %7D%0A%0A
43f502122ae617bae1c63d44692d590fbb5d53ab
fix weights
scripts/tester.py
scripts/tester.py
#!/usr/bin/env python import itertools import platform import subprocess import sys import math def normalize(x): denom = sum(x) return [e/denom for e in x] def scale(x, a): return [e * a for e in x] def argcmp(x, comp, predicate): idx = None val = None for i in xrange(len(x)): if not predicate(x[i]): continue if idx is None or comp(x[i], val): idx = i val = x[i] if idx is None: # couldn't find it raise Exception("no argmin satisfiying predicate") return idx def argmin(x, predicate): return argcmp(x, lambda a, b: a < b, predicate) def argmax(x, predicate): return argcmp(x, lambda a, b: a > b, predicate) def allocate(nworkers, weights): approx = map(int, map(math.ceil, scale(weights, nworkers))) diff = sum(approx) - nworkers if diff > 0: while diff > 0: i = argmin(approx, predicate=lambda x: x > 0) approx[i] -= 1 diff -= 1 elif diff < 0: i = argmax(approx, lambda x: True) approx[i] += -diff acc = 0 ret = [] for x in approx: ret.append(range(acc, acc + x)) acc += x return ret def run(cmd): print >>sys.stderr, '[INFO] running command %s' % str(cmd) p = subprocess.Popen(cmd, stdin=open('/dev/null', 'r'), stdout=subprocess.PIPE) r = p.stdout.read() p.wait() return r if __name__ == '__main__': NCORES = [1, 2, 4, 8, 16, 24, 32] WSET = [18] node = platform.node() if node == 'modis2': LOGGERS = [ ('data.log', 1.), ('/data/scidb/001/2/stephentu/data.log', 1.), ('/data/scidb/001/3/stephentu/data.log', 1.), ] elif node == 'istc3': LOGGERS = [ ('data.log', 2./3.), ('/f0/stephentu/data.log', 1.), ] else: print "unknown node", node assert False, "Unknown node!" weights = normalize([x[1] for x in LOGGERS]) logfile_cmds = list(itertools.chain.from_iterable([['--logfile', f] for f, _ in LOGGERS])) for ncores, ws in itertools.product(NCORES, WSET): allocations = allocate(ncores, weights) alloc_cmds = list( itertools.chain.from_iterable([['--assignment', ','.join(map(str, alloc))] for alloc in allocations])) cmd = ['./persist_test'] + \ logfile_cmds + \ alloc_cmds + \ ['--num-threads', str(ncores), '--strategy', 'epoch', '--writeset', str(ws), '--valuesize', '32'] output = run(cmd) print output
Python
0.000006
@@ -1644,17 +1644,17 @@ a.log', -2 +1 ./3.),%0A @@ -1679,33 +1679,36 @@ entu/data.log', -1 +2./3 .),%0A %5D%0A else
2c6ccdacc2c4e54cf0a12618d60c963d9c67ef62
Fix for DjangoCMS 3.5: get_cms_setting
djangocms_page_sitemap/settings.py
djangocms_page_sitemap/settings.py
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals from cms.sitemaps import CMSSitemap from cms.utils import get_cms_setting from django.conf import settings from django.utils.translation import ugettext_lazy as _ PAGE_SITEMAP_CHANGEFREQ_DEFAULT_LIST = { 'always': _('always'), 'hourly': _('hourly'), 'daily': _('daily'), 'weekly': _('weekly'), 'monthly': _('monthly'), 'yearly': _('yearly'), 'never': _('never'), } PAGE_SITEMAP_CHANGEFREQ_LIST = getattr( settings, 'PAGE_SITEMAP_CHANGEFREQ_LIST', PAGE_SITEMAP_CHANGEFREQ_DEFAULT_LIST ) PAGE_SITEMAP_DEFAULT_CHANGEFREQ = getattr( settings, 'PAGE_SITEMAP_DEFAULT_CHANGEFREQ', CMSSitemap.changefreq ) PAGE_SITEMAP_CACHE_DURATION = get_cms_setting('CACHE_DURATIONS')['menus']
Python
0.00069
@@ -141,16 +141,21 @@ ms.utils +.conf import
5ca7ffd1df650568b39ae18bfbeacacff573678f
Arguments need to be separated when implementing gyp-actions.
test/test.gyp
test/test.gyp
# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. # # Use of this source code is governed by a BSD-style license # that can be found in the LICENSE file in the root of the source # tree. An additional intellectual property rights grant can be found # in the file PATENTS. All contributing project authors may # be found in the AUTHORS file in the root of the source tree. # TODO(andrew): consider moving test_support to src/base/test. { 'includes': [ '../build/common.gypi', ], 'targets': [ { 'target_name': 'channel_transport', 'type': 'static_library', 'dependencies': [ '<(DEPTH)/testing/gtest.gyp:gtest', '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers', ], 'sources': [ 'channel_transport/channel_transport.cc', 'channel_transport/include/channel_transport.h', 'channel_transport/traffic_control_win.cc', 'channel_transport/traffic_control_win.h', 'channel_transport/udp_socket_manager_posix.cc', 'channel_transport/udp_socket_manager_posix.h', 'channel_transport/udp_socket_manager_wrapper.cc', 'channel_transport/udp_socket_manager_wrapper.h', 'channel_transport/udp_socket_posix.cc', 'channel_transport/udp_socket_posix.h', 'channel_transport/udp_socket_wrapper.cc', 'channel_transport/udp_socket_wrapper.h', 'channel_transport/udp_socket2_manager_win.cc', 'channel_transport/udp_socket2_manager_win.h', 'channel_transport/udp_socket2_win.cc', 'channel_transport/udp_socket2_win.h', 'channel_transport/udp_transport.h', 'channel_transport/udp_transport_impl.cc', 'channel_transport/udp_transport_impl.h', ], }, { 'target_name': 'test_support', 'type': 'static_library', 'include_dirs': [ # TODO(kjellander): Remove this by making all includes use full paths. '.', ], 'dependencies': [ '<(DEPTH)/testing/gtest.gyp:gtest', '<(DEPTH)/testing/gmock.gyp:gmock', '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers', ], 'all_dependent_settings': { 'include_dirs': [ # TODO(kjellander): Remove this by making all includes use full paths. '.', ], }, 'sources': [ 'test_suite.cc', 'test_suite.h', 'testsupport/android/root_path_android.cc', 'testsupport/android/root_path_android_chromium.cc', 'testsupport/fileutils.cc', 'testsupport/fileutils.h', 'testsupport/frame_reader.cc', 'testsupport/frame_reader.h', 'testsupport/frame_writer.cc', 'testsupport/frame_writer.h', 'testsupport/gtest_prod_util.h', 'testsupport/gtest_disable.h', 'testsupport/mock/mock_frame_reader.h', 'testsupport/mock/mock_frame_writer.h', 'testsupport/packet_reader.cc', 'testsupport/packet_reader.h', 'testsupport/perf_test.cc', 'testsupport/perf_test.h', 'testsupport/trace_to_stderr.cc', 'testsupport/trace_to_stderr.h', ], 'conditions': [ # TODO(henrike): remove build_with_chromium==1 when the bots are using # Chromium's buildbots. ['build_with_chromium==1 and OS=="android" and gtest_target_type=="shared_library"', { 'dependencies': [ '<(DEPTH)/base/base.gyp:base', ], 'sources!': [ 'testsupport/android/root_path_android.cc', ], # WebRTC tests use resource files for testing. These files are not # hosted in WebRTC. The script ensures that the needed resources # are downloaded. In stand alone WebRTC the script is called by # the DEPS file. In Chromium, i.e. here, the files are pulled down # only if tests requiring the resources are being built. 'actions': [ { 'action_name': 'get_resources', 'inputs': ['<(webrtc_root)/tools/update_resources.py'], 'outputs': ['../../resources'], 'action': ['python', '<(webrtc_root)/tools/update_resources.py', '-p ../../../'], }], }, { 'sources!': [ 'testsupport/android/root_path_android_chromium.cc', ], }], ], }, { # Depend on this target when you want to have test_support but also the # main method needed for gtest to execute! 'target_name': 'test_support_main', 'type': 'static_library', 'dependencies': [ 'test_support', ], 'sources': [ 'run_all_unittests.cc', ], }, { # Depend on this target when you want to have test_support and a special # main for mac which will run your test on a worker thread and consume # events on the main thread. Useful if you want to access a webcam. # This main will provide all the scaffolding and objective-c black magic # for you. All you need to do is to implement a function in the # run_threaded_main_mac.h file (ImplementThisToRunYourTest). 'target_name': 'test_support_main_threaded_mac', 'type': 'static_library', 'dependencies': [ 'test_support', ], 'sources': [ 'testsupport/mac/run_threaded_main_mac.h', 'testsupport/mac/run_threaded_main_mac.mm', ], }, { 'target_name': 'test_support_unittests', 'type': 'executable', 'dependencies': [ 'channel_transport', 'test_support_main', '<(DEPTH)/testing/gmock.gyp:gmock', '<(DEPTH)/testing/gtest.gyp:gtest', ], 'sources': [ 'channel_transport/udp_transport_unittest.cc', 'channel_transport/udp_socket_manager_unittest.cc', 'channel_transport/udp_socket_wrapper_unittest.cc', 'testsupport/unittest_utils.h', 'testsupport/fileutils_unittest.cc', 'testsupport/frame_reader_unittest.cc', 'testsupport/frame_writer_unittest.cc', 'testsupport/packet_reader_unittest.cc', 'testsupport/perf_test_unittest.cc', ], # Disable warnings to enable Win64 build, issue 1323. 'msvs_disabled_warnings': [ 4267, # size_t to int truncation. ], }, { 'target_name': 'buildbot_tests_scripts', 'type': 'none', 'copies': [ { 'destination': '<(PRODUCT_DIR)', 'files': [ 'buildbot_tests.py', '<(DEPTH)/tools/e2e_quality/audio/run_audio_test.py', ], }, { 'destination': '<(PRODUCT_DIR)/perf', 'files': [ '<(DEPTH)/tools/perf/__init__.py', '<(DEPTH)/tools/perf/perf_utils.py', ], }, ], }, # target buildbot_tests_scripts ], }
Python
0.999987
@@ -4150,16 +4150,19 @@ %5B'../../ +../ resource @@ -4298,17 +4298,45 @@ '-p - +',%0A ' ../../..
f603e8b394ea2b3ed9329b6948119970eb6aaa46
add test for transition
lbworkflow/tests/test_transition.py
lbworkflow/tests/test_transition.py
from django.contrib.auth import get_user_model from lbworkflow.core.transition import TransitionExecutor from lbworkflow.views.helper import user_wf_info_as_dict from .test_base import BaseTests User = get_user_model() class TransitionExecutorTests(BaseTests): def test_submit(self): leave = self.leave instance = self.leave.pinstance leave.submit_process() # A1 will auto agree self.assertEqual(leave.pinstance.cur_activity.name, 'A2') self.assertEqual(leave.pinstance.get_operators_display(), 'tom') # A3 not auto agree workitem = instance.get_todo_workitem() transition = instance.get_agree_transition() TransitionExecutor(self.users['tom'], instance, workitem, transition).execute() self.assertEqual(leave.pinstance.cur_activity.name, 'A3') class ViewTests(BaseTests): def setUp(self): super(ViewTests, self).setUp() self.leave.submit_process() def test_execute_transition(self): self.client.login(username='tom', password='password') leave = self.leave ctx = user_wf_info_as_dict(leave, self.users['tom']) transitions = ctx['transitions'] transition = transitions[0] url = transition.get_app_url(ctx['workitem']) resp = self.client.get(url) self.assertEqual(resp.status_code, 200)
Python
0
@@ -39,16 +39,61 @@ er_model +%0Afrom django.core.urlresolvers import reverse %0A%0Afrom l @@ -234,16 +234,48 @@ aseTests +%0Afrom .leave.models import Leave %0A%0AUser = @@ -1047,111 +1047,8 @@ ()%0A%0A - def test_execute_transition(self):%0A self.client.login(username='tom', password='password')%0A%0A @@ -1131,16 +1131,17 @@ 'tom'%5D)%0A +%0A @@ -1217,16 +1217,32 @@ +self.transition_ url = tr @@ -1276,24 +1276,168 @@ orkitem'%5D)%0A%0A + self.workitem = ctx%5B'workitem'%5D%0A%0A self.client.login(username='tom', password='password')%0A%0A def test_execute_transition(self):%0A resp @@ -1455,16 +1455,32 @@ ent.get( +self.transition_ url)%0A @@ -1525,8 +1525,573 @@ , 200)%0A%0A + def test_execute_transition(self):%0A resp = self.client.post(self.transition_url)%0A self.assertRedirects(resp, '/wf/todo/')%0A leave = Leave.objects.get(pk=self.leave.pk)%0A self.assertEqual('A3', leave.pinstance.cur_activity.name)%0A%0A def test_simple_agree(self):%0A url = reverse('wf_agree')%0A resp = self.client.post('%25s?wi_id=%25s' %25 (url, self.workitem.pk))%0A self.assertRedirects(resp, '/wf/todo/')%0A leave = Leave.objects.get(pk=self.leave.pk)%0A self.assertEqual('A3', leave.pinstance.cur_activity.name)%0A
51d8d354f1a75b83becad880eec7cbac86d52e74
Convert test to pytest syntax
tests/test_generate_files.py
tests/test_generate_files.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ test_generate_files ------------------- Test formerly known from a unittest residing in test_generate.py named TestGenerateFiles.test_generate_files_nontemplated_exception TestGenerateFiles.test_generate_files """ from __future__ import unicode_literals import os import io import pytest from cookiecutter import generate from cookiecutter import exceptions from cookiecutter import utils @pytest.fixture(scope="function") def clean_system_remove_additional_folders(request, clean_system): """ Use the global clean_system fixture and run additional teardown code to remove some special folders. For a better understanding - order of fixture calls: clean_system setup code clean_system_remove_additional_folders setup code clean_system_remove_additional_folders teardown code clean_system teardown code """ def remove_additional_folders(): if os.path.exists('inputpizzä'): utils.rmtree('inputpizzä') if os.path.exists('inputgreen'): utils.rmtree('inputgreen') if os.path.exists('inputbinary_files'): utils.rmtree('inputbinary_files') if os.path.exists('tests/custom_output_dir'): utils.rmtree('tests/custom_output_dir') if os.path.exists('inputpermissions'): utils.rmtree('inputpermissions') request.addfinalizer(remove_additional_folders) @pytest.mark.usefixtures("clean_system_remove_additional_folders") def test_generate_files_nontemplated_exception(): with pytest.raises(exceptions.NonTemplatedInputDirException): generate.generate_files( context={'cookiecutter': {'food': 'pizza'}}, repo_dir='tests/test-generate-files-nontemplated' ) def test_generate_files(self): generate.generate_files( context={ 'cookiecutter': {'food': 'pizzä'} }, repo_dir='tests/test-generate-files' ) self.assertTrue(os.path.isfile('inputpizzä/simple.txt')) simple_text = io.open('inputpizzä/simple.txt', 'rt', encoding='utf-8').read() self.assertEqual(simple_text, u'I eat pizzä')
Python
0.999999
@@ -1672,16 +1672,33 @@ ontext=%7B +%0A 'cookiec @@ -1722,16 +1722,29 @@ 'pizza'%7D +%0A %7D,%0A @@ -1812,16 +1812,83 @@ )%0A%0A%0A +@pytest.mark.usefixtures(%22clean_system_remove_additional_folders%22)%0A def test @@ -1903,20 +1903,16 @@ e_files( -self ):%0A g @@ -2065,28 +2065,62 @@ )%0A +%0A s -elf. +imple_file = 'inputpizz%C3%A4/simple.txt'%0A assert -True( + os.p @@ -2134,33 +2134,21 @@ ile( -'inputpizz%C3%A4/simple.txt')) +simple_file)%0A %0A @@ -2170,39 +2170,27 @@ io.open( -'inputpizz%C3%A4/ simple -.txt' +_file , 'rt', @@ -2222,25 +2222,15 @@ -self. assert -Equal( + simp @@ -2236,17 +2236,19 @@ ple_text -, + == u'I eat @@ -2250,14 +2250,13 @@ I eat pizz%C3%A4' -) %0A
c4963df740e82d476500d2d998b288d0213806ee
Allow searching in the authorization code admin.
src/commoner/promocodes/admin.py
src/commoner/promocodes/admin.py
from django.contrib import admin from django import forms from django.utils.translation import ugettext_lazy as _ from django.forms.widgets import HiddenInput from commoner.promocodes.models import PromoCode class PromoCodeAdminForm(forms.ModelForm): code = forms.CharField(initial='', widget=HiddenInput()) send_email = forms.BooleanField(label=_(u'Send invitation letter?'), required=False) def __init__(self, *args, **kwargs): # if not done here, unique_code_string is only loaded when admin is bootstrapped if 'instance' not in kwargs: kwargs['initial'] = {'code': PromoCode.objects.unique_code_string()} super(PromoCodeAdminForm, self).__init__(*args, **kwargs) def save(self, force_insert=False, force_update=False, commit=True): code = super(PromoCodeAdminForm, self).save(commit) if self.cleaned_data['send_email']: PromoCode.objects.send_invite_letter(code) return code class Meta: model = PromoCode class PromoCodeAdmin(admin.ModelAdmin): form = PromoCodeAdminForm list_display = ('recipient', 'code', 'created', 'used') fields = ('code', 'recipient', 'expires', 'transaction_id', 'contribution_id', 'send_email',) ordering = ('-created',) # get the pretty admin boolean icons, still no filter abilities def used(self, object): return object.used used.short_description = _(u'Redeemed code') used.boolean = True admin.site.register(PromoCode, PromoCodeAdmin)
Python
0
@@ -1299,24 +1299,96 @@ -created',)%0A + search_fields = ('recipient', 'transaction_id', 'contribution_id',)%0A %0A # g
a415ac43b49fd4485b4f4be5d1286efed8c432cf
Fix broken airship with no signing key
airship/__init__.py
airship/__init__.py
import os import json import time from flask import Flask, render_template, request from groundstation import logger log = logger.getLogger(__name__) # XXX We won't always be using the github adaptor!! from groundstation.protocols.github.read_adaptor import GithubReadAdaptor from groundstation.gref import Gref, Tip import pygit2 from groundstation.utils import oid2hex from groundstation.objects.root_object import RootObject from groundstation.objects.update_object import UpdateObject def jsonate(obj, escaped): jsonbody = json.dumps(obj) if escaped: jsonbody = jsonbody.replace("</", "<\\/") return jsonbody def channels_json(station, escaped=False): channels = [{"name": channel} for channel in station.channels()] return jsonate(channels, escaped) def grefs_json(station, channel, escaped=False): grefs = [gref.as_dict() for gref in station.grefs(channel)] return jsonate(grefs, escaped) def make_airship(station): app = Flask(__name__) def set_signing_key(self, keyname): self.private_crypto_adaptor = \ station.get_private_crypto_adaptor(keyname) app.set_signing_key = lambda key: set_signing_key(app, key) def _update_gref(gref, tips, parents): if app.private_crypto_adaptor: tips = map(lambda tip: Tip(tip.tip, app.private_crypto_adaptor.sign(tip.tip)), tips) station.update_gref(gref, tips, parents) @app.route("/") def index(): return render_template("index.html", channels_json=channels_json(station, True), current_time=time.time()) @app.route("/channels") def list_channels(): return channels_json(station) @app.route("/grefs/<channel>") def list_grefs(channel): return grefs_json(station, channel) @app.route("/gref/<channel>/<path:identifier>") def fetch_gref(channel, identifier): crypto_adaptor = station.get_crypto_adaptor() adaptor = GithubReadAdaptor(station, channel) gref = Gref(station.store, channel, identifier) log.info("Trying to fetch channel: %s identifier: %s" % (channel, identifier)) marshalled_thread = adaptor.get_issue(gref, crypto_adaptor=crypto_adaptor) root_obj = marshalled_thread["roots"].pop() root = root_obj.as_json() root["hash"] = oid2hex(pygit2.hash(root_obj.as_object())) response = [] while marshalled_thread["thread"]: node = marshalled_thread["thread"].pop() data = json.loads(node.data) data["parents"] = list(node.parents) data["hash"] = oid2hex(pygit2.hash(node.as_object())) response.append(data) return jsonate({"content": response, "root": root, "tips": marshalled_thread["tips"], "signatures": marshalled_thread["signatures"]}, False) @app.route("/gref/<channel>/<path:identifier>", methods=['POST']) def update_gref(channel, identifier): # adaptor = github_protocol.GithubWriteAdaptor(station, channel) gref = Gref(station.store, channel, identifier) # Ugly type coercion user = request.form["user"] body = request.form["body"] parents = map(str, json.loads(request.form["parents"])) payload = { "type": "comment", "id": None, "body": body, "user": user } update_object = UpdateObject(parents, json.dumps(payload)) oid = station.write(update_object.as_object()) _update_gref(gref, [Tip(oid,"")], parents) return jsonate({"response": "ok"}, False) @app.route("/grefs/<channel>", methods=['PUT']) def create_gref(channel): def _write_object(obj): return station.write(obj.as_object()) name = request.form["name"] protocol = request.form["protocol"] user = request.form["user"] body = request.form["body"] title = request.form["title"] gref = Gref(station.store, channel, name) root = RootObject(name, channel, protocol) root_oid = _write_object(root) _title = UpdateObject([root_oid], json.dumps({ "type": "title", "id": None, "body": title, "user": user })) title_oid = _write_object(_title) _body = UpdateObject([title_oid], json.dumps({ "type": "body", "id": None, "body": body })) body_oid = _write_object(_body) _update_gref(gref, [Tip(body_oid, "")], []) return "" return app
Python
0.000004
@@ -988,16 +988,48 @@ _name__) +%0A app.has_signing_key = False %0A%0A de @@ -1285,38 +1285,31 @@ if app. -private_crypto_adaptor +has_signing_key :%0A
1037524e5dbeb4482beb41b2d951690c77ce2316
Fix invalid syntax
lowfat/management/commands/load2018applications.py
lowfat/management/commands/load2018applications.py
import pandas as pd from django.contrib.auth.models import User, BaseUserManager from django.core.management.base import BaseCommand from lowfat.models import Claimant class Command(BaseCommand): help = "Import CSV with 2018 applications." def add_arguments(self, parser): parser.add_argument('csv', nargs='?', default='2018.csv') # pylint: disable=too-many-branches,too-many-locals def handle(self, *args, **options): fail_list = [] success_list = [] user_manager = BaseUserManager() data = pd.read_csv(options['csv']) for index, line in data.iterrows(): # pylint: disable=no-member,unused-variable try: received_offer = line['Fellow'] == 'Yes' jacs = line["Research Classification"][1:3] applicants_dict = { "application_year": 2017, "fellow": False, "received_offer": received_offer, "forenames": line["First name"], "surname": line["Surname"], "affiliation": line["Home institution"], "department": line["Department"] if pd.notnull(line["Department"]) else "", "group": line["Group within Department (if any)"] if pd.notnull(line["Group within Department (if any)"]) else "", "career_stage_when_apply": line["Career stage"][6], "job_title_when_apply": line["Job Title"], "research_area": line["Area of work"], "research_area_code": jacs, "email": line["Email Address"], "phone": line["Telephone number"], "gender": line["Gender"][0] if pd.notnull(line["Gender"]) else 'R', "home_country": "GB", "home_city": "Unknow", "funding": line["Which primary funding body/charity/organisation would you normally turn to if seeking financial support for your research/work?"], "funding_notes": line["Any additional funders?"] if pd.notnull(line["Any additional funders?"]) else "", "claimantship_grant": 3000 if received_offer else 0, "institutional_website": line["Institutional web page"] if pd.notnull(line["Institutional web page"]) else "", "website": line["Personal web page"] if pd.notnull(line["Personal web page"]) else "", "orcid": line["ORCID"] if pd.notnull(line["ORCID"]) else "", "google_scholar": line["Google Scholar"] if pd.notnull(line["Google Scholar"]) else "", "github": line["GitHub"] if pd.notnull(line["GitHub"]) else "", "gitlab": line["GitLab"] if pd.notnull(line["GitLab"]) else "", "twitter": line["Twitter handle"] if pd.notnull(line["Twitter handle"]) else "", "is_into_training": line["Have training in plans - added by AN"] == "Yes", "carpentries_instructor": line["Carpentry instructor - added by AN"] == "Yes", "research_software_engineer": if line["RSE - added by AN"] == "Yes", "screencast_url": line["Application Screencast URL"] if pd.notnull(line["Application Screencast URL"]) else "", "example_of_writing_url": line["Example of writing"] if pd.notnull(line["Example of writing"]) else "", } applicant = Claimant(**applicants_dict) applicant.save() success_list.append(index) if received_offer: new_user = User.objects.create_user( username=applicant.slug, email=applicant.email, password=user_manager.make_random_password(), first_name=line["First name"], last_name=line["Surname"] ) applicant.user = new_user applicant.save() except BaseException as exception: print("Error: {}\n{}\n{}".format(exception, line, 80 * "-")) fail_list.append(index) print(80 * "-") print("Success: {}".format(success_list)) print("Fail: {}".format(fail_list))
Python
0.999586
@@ -3192,19 +3192,16 @@ gineer%22: - if line%5B%22R
3e504469c823fb3f655505444df78dc17f44ba07
Add support for port ranges on ASA
aclhound/targets/asa.py
aclhound/targets/asa.py
#!/usr/bin/env python2.7 # Copyright (C) 2014 Job Snijders <job@instituut.net> # # This file is part of ACLHound # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from ipaddr import IPNetwork from grako.contexts import Closure #FIXME figure out extended versus standard access-lists #FIXME deal with deny any any any which ASA compresses def render(self, **kwargs): policy = self.data afi = kwargs['afi'] config_blob = [] def afi_match(host): if host == "any": return True elif IPNetwork(host).version == afi: return True else: return False for rule in policy: rule = rule[0] s_hosts = rule['source']['l3']['ip'] d_hosts = rule['destination']['l3']['ip'] logging = rule['keywords']['log'] # deal with ICMP if "icmp" in rule['protocol']: policy = rule['protocol']['icmp'] # FIXME this should happen in render or aclsemantics if not isinstance(policy, Closure): policy = [policy] # cycle through all ICMP related elements in the AST for entry in policy: for s_host in s_hosts: if not afi_match(s_host): continue for d_host in d_hosts: if not afi_match(d_host): continue if rule['action'] == "allow": action = "permit" else: action = "deny" extended = "extended " if afi == 4 else "" yes_v6 = "ipv6 " if afi == 6 else "" line = "%saccess-list %s %s%s icmp " \ % (yes_v6, self.name + "-v%s" % afi, extended, action) if s_host == u'any': line += "any " elif IPNetwork(s_host).prefixlen in [32, 128]: line += "host %s " % s_host.split('/')[0] # IPv4 must be with netmask, IPv6 in CIDR notation elif afi == 4: line += "%s " % IPNetwork(s_host).with_netmask.replace('/', ' ') else: line += s_host + " " if d_host == u'any': line += "any " elif IPNetwork(d_host).prefixlen in [32, 128]: line += "host %s " % d_host.split('/')[0] # IPv4 must be with netmask, IPv6 in CIDR notation elif afi == 4: line += "%s " % IPNetwork(d_host).with_netmask.replace('/', ' ') else: line += d_host + " " if not entry == "any": line += str(entry['icmp_type']) if logging: line += " log" if line not in config_blob: config_blob.append(line) # jump out of the loop because we have nothing to do with # L4 when doing ICMP continue # layer 3 and 4 s_ports = rule['source']['l4']['ports'] d_ports = rule['destination']['l4']['ports'] for s_port in s_ports: for d_port in d_ports: for s_host in s_hosts: if not afi_match(s_host): continue for d_host in d_hosts: if not afi_match(d_host): continue extended = "extended " if afi == 4 else "" yes_v6 = "ipv6 " if afi == 6 else "" line = "%saccess-list %s %s" \ % (yes_v6, self.name + "-v%s" % afi, extended) if rule['action'] == "allow": action = "permit " else: action = "deny " line += action if rule['protocol'] == "any": line += "ip " else: line += rule['protocol'] + " " if s_host == u'any': line += "any " elif IPNetwork(s_host).prefixlen in [32, 128]: line += "host %s " % s_host.split('/')[0] # IPv4 must be with netmask, IPv6 in CIDR notation elif afi == 4: line += "%s " % IPNetwork(s_host).with_netmask.replace('/', ' ') else: line += s_host + " " if not s_port == "any": line += "eq %s " % str(s_port) if d_host == u'any': line += "any " elif IPNetwork(d_host).prefixlen in [32, 128]: line += "host %s " % d_host.split('/')[0] # IPv4 must be with netmask, IPv6 in CIDR notation elif afi == 4: line += "%s " % IPNetwork(d_host).with_netmask.replace('/', ' ') else: line += d_host + " " if d_port != u"any": line += "eq %s" % str(d_port) if logging: line += " log" if line not in config_blob: config_blob.append(line) # add final deny any any at the end of each policy extended = "extended " if afi == 4 else "" yes_v6 = "ipv6 " if afi == 6 else "" line = "%saccess-list %s %sdeny ip any any" \ % (yes_v6, self.name + "-v%s" % afi, extended) config_blob.append(line) return config_blob
Python
0
@@ -6199,24 +6199,152 @@ if +type(s_port) == tuple:%0A line += %22range %25s %25s %22 %25 (s_port%5B0%5D, s_port%5B1%5D)%0A elif not s_port = @@ -6959,19 +6959,149 @@ if +type( d_port - != u +) == tuple:%0A line += %22range %25s %25s%22 %25 (d_port%5B0%5D, d_port%5B1%5D)%0A elif not d_port == %22any
07764dba867b7da57e4134237aeaf65429b8a0ef
Fix problem with nan for social ID
lowfat/management/commands/load2018applications.py
lowfat/management/commands/load2018applications.py
import pandas as pd from django.contrib.auth.models import User, BaseUserManager from django.core.management.base import BaseCommand from lowfat.models import Claimant class Command(BaseCommand): help = "Import CSV with 2018 applications." def add_arguments(self, parser): parser.add_argument('csv', nargs='?', default='2018.csv') def handle(self, *args, **options): fail_list = [] success_list = [] user_manager = BaseUserManager() data = pd.read_csv(options['csv']) for index, line in data.iterrows(): # pylint: disable=no-member,unused-variable try: received_offer = True if line['Fellow'] == 'Yes' else False jacs = line["Research Classification"][1:3] applicants_dict = { "application_year": 2017, "selected": False, "received_offer": received_offer, "forenames": line["First name"], "surname": line["Surname"], "affiliation": line["Home institution"], "department": line["Department"], "group": line["Group within Department (if any)"], "career_stage_when_apply": line["Career stage"][6], "job_title_when_apply": line["Job Title"], "research_area": line["Area of work"], "research_area_code": jacs, "email": line["Email Address"], "phone": line["Telephone number"], "gender": line["Gender"][0] if pd.notnull(line["Gender"]) else 'R', "home_country": "GB", "home_city": "Unknow", "funding": line["Which primary funding body/charity/organisation would you normally turn to if seeking financial support for your research/work?"], "funding_notes": line["Any additional funders?"] if pd.notnull(line["Any additional funders?"]) else "", "claimantship_grant": 3000 if received_offer else 0, "institutional_website": line["Institutional web page"] if pd.notnull(line["Institutional web page"]) else "", "website": line["Personal web page"] if pd.notnull(line["Personal web page"]) else "", "orcid": line["ORCID"], "google_scholar": line["Google Scholar"], "github": line["GitHub"], "gitlab": line["GitLab"], "twitter": line["Twitter handle"], "is_into_training": True if line["Have training in plans - added by AN"] == "Yes" else False, "carpentries_instructor": True if line["Carpentry instructor - added by AN"] == "Yes" else False, "research_software_engineer": True if line["RSE - added by AN"] == "Yes" else False, "screencast_url": line["Application Screencast URL"] if pd.notnull(line["Application Screencast URL"]) else "", "example_of_writing_url": line["Example of writing"] if pd.notnull(line["Example of writing"]) else "", } applicant = Claimant(**applicants_dict) applicant.save() success_list.append(index) if received_offer: new_user = User.objects.create_user( username=applicant.slug, email=applicant.email, password=user_manager.make_random_password(), first_name=line["First name"], last_name=line["Surname"] ) applicant.user = new_user applicant.save() except BaseException as exception: print("Error: {}\n{}\n{}".format(exception, line, 80 * "-")) fail_list.append(index) print(80 * "-") print("Success: {}".format(success_list)) print("Fail: {}".format(fail_list))
Python
0.00012
@@ -2392,16 +2392,53 @@ %22ORCID%22%5D + if pd.notnull(line%5B%22ORCID%22%5D) else %22%22 ,%0A @@ -2491,16 +2491,62 @@ cholar%22%5D + if pd.notnull(line%5B%22Google Scholar%22%5D) else %22%22 ,%0A @@ -2583,16 +2583,54 @@ GitHub%22%5D + if pd.notnull(line%5B%22GitHub%22%5D) else %22%22 ,%0A @@ -2667,16 +2667,54 @@ GitLab%22%5D + if pd.notnull(line%5B%22GitLab%22%5D) else %22%22 ,%0A @@ -2756,24 +2756,70 @@ ter handle%22%5D + if pd.notnull(line%5B%22Twitter handle%22%5D) else %22%22 ,%0A
442f6c9eae5c64c3438f89c2968b0343c1f4ed6e
Revise script docstring
alg_find_peak_1D.py
alg_find_peak_1D.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np """Find a peak in 1D array. Support a is an array of length n. If a is an array of length 1, a[0] is a peak. In general k, a[k] is a peak iff a[k] >= a[k - 1] and a[k] >= a[k + 1]. If a[0] >= a[1], then a[0] is a peak. If a[n - 1] >= a[n - 2], then a[n - 1] is a peak. """ def find_peak_naive(arr): """Find peak by naive iteration. Time complexity: O(n). """ for i in range(len(arr)): if i == 0: if arr[i] >= arr[i + 1]: return arr[i] elif i == (len(arr) - 1): if arr[i] >= arr[i - 1]: return arr[i] else: if arr[i] >= arr[i - 1] and arr[i] >= arr[i + 1]: return arr[i] def find_peak(arr): """Find peak by divide-end-conquer algorithm. Time complexity: O(logn). """ if len(arr) == 1: return arr[0] else: mid = len(arr) // 2 if arr[mid] <= arr[mid - 1]: return find_peak(arr[:mid-1]) elif arr[mid] <= arr[mid + 1]: return find_peak(arr[mid+1:]) else: return arr[mid] def main(): import time # Array of length 5 with peak 4. arr = [0, 1, 4, 3, 2] time_start = time.time() peak = find_peak_naive(arr) time_run = time.time() - time_start print('Peak: {}'.format(peak)) print('Time for find_peak_naive(): {}'.format(time_run)) time_start = time.time() peak = find_peak(arr) time_run = time.time() - time_start print('Peak: {}'.format(peak)) print('Time for find_peak_naive(): {}'.format(time_run)) # Array of long length. arr = np.random.permutation(10000000) time_start = time.time() peak = find_peak_naive(arr) time_run = time.time() - time_start print('Peak: {}'.format(peak)) print('Time for find_peak_naive(): {}'.format(time_run)) time_start = time.time() peak = find_peak(arr) time_run = time.time() - time_start print('Peak: {}'.format(peak)) print('Time for find_peak_naive(): {}'.format(time_run)) if __name__ == '__main__': main()
Python
0.000002
@@ -247,10 +247,8 @@ eral - k , a%5B
1f58a07ba228cd1475ff057cca16920e6a698a01
Add additional test vectors (from exact trig values)
tests/test_vector2_rotate.py
tests/test_vector2_rotate.py
from ppb_vector import Vector2 from utils import angle_isclose, vectors import pytest # type: ignore import math from hypothesis import assume, given, note, example import hypothesis.strategies as st data_exact = [ (Vector2(1, 1), -90, Vector2(1, -1)), (Vector2(1, 1), 0, Vector2(1, 1)), (Vector2(1, 1), 90, Vector2(-1, 1)), (Vector2(1, 1), 180, Vector2(-1, -1)), ] data_close = [ (Vector2(3, -20), 53, Vector2(17.77816, -9.64039)), (Vector2(math.pi, -1 * math.e), 30, Vector2(4.07984, -0.7833)), (Vector2(math.pi, math.e), 67, Vector2(-1.27467, 3.95397)), ] @pytest.mark.parametrize('input, angle, expected', data_exact) def test_exact_rotations(input, angle, expected): assert input.rotate(angle) == expected assert input.angle(expected) == angle @pytest.mark.parametrize('input, angle, expected', data_close) def test_close_rotations(input, angle, expected): assert input.rotate(angle).isclose(expected) assert angle_isclose(input.angle(expected), angle) def test_for_exception(): with pytest.raises(TypeError): Vector2('gibberish', 1).rotate(180) @given(angle=st.floats(min_value=-360, max_value=360)) def test_trig_stability(angle): r_cos, r_sin = Vector2._trig(angle) # Don't use exponents here. Multiplication is generally more stable. assert math.isclose(r_cos * r_cos + r_sin * r_sin, 1, rel_tol=1e-18) @given( initial=vectors(), angle=st.floats(min_value=-360, max_value=360), ) def test_rotation_angle(initial, angle): assume(initial.length > 1e-5) rotated = initial.rotate(angle) note(f"Rotated: {rotated}") measured_angle = initial.angle(rotated) d = measured_angle - angle % 360 note(f"Angle: {measured_angle} = {angle} + {d if d<180 else d-360}") assert angle_isclose(angle, measured_angle) @given( increment=st.floats(min_value=1e-3, max_value=360), loops=st.integers(min_value=0) ) def test_rotation_stability(increment, loops): initial = Vector2(1, 0) fellswoop = initial.rotate(increment * loops) note(f"One Fell Swoop: {fellswoop}") stepwise = initial for _ in range(loops): stepwise = stepwise.rotate(increment) note(f"Step-wise: {stepwise}") assert fellswoop.isclose(stepwise) assert math.isclose(fellswoop.length, initial.length) @given( initial=vectors(), angles=st.lists(st.floats(min_value=-360, max_value=360)), ) def test_rotation_stability2(initial, angles): total_angle = sum(angles) fellswoop = initial.rotate(total_angle) note(f"One Fell Swoop: {fellswoop}") stepwise = initial for angle in angles: stepwise = stepwise.rotate(angle) note(f"Step-wise: {stepwise}") assert fellswoop.isclose(stepwise) assert math.isclose(fellswoop.length, initial.length) @given( a=vectors(max_magnitude=1e150), b=vectors(), l=st.floats(min_value=-1e150, max_value=1e150), angle=st.floats(min_value=-360, max_value=360), ) # In this example: # * a * l == -b # * Rotation must not be an multiple of 90deg # * Must be sufficiently large @example( a=Vector2(1e10, 1e10), b=Vector2(1e19, 1e19), l=-1e9, angle=45, ) def test_rotation_linearity(a, b, l, angle): inner = (l * a + b).rotate(angle) outer = l * a.rotate(angle) + b.rotate(angle) note(f"l * a + b: {l * a + b}") note(f"l * a.rotate(): {l * a.rotate(angle)}") note(f"b.rotate(): {b.rotate(angle)}") note(f"Inner: {inner}") note(f"Outer: {outer}") assert inner.isclose(outer, rel_to=[a, l * a, b])
Python
0
@@ -584,16 +584,129 @@ 95397)), +%0A%0A (Vector2(1, 0), 30, Vector2(math.sqrt(3)/2, 0.5)),%0A (Vector2(1, 0), 60, Vector2(0.5, math.sqrt(3)/2)), %0A%5D%0A%0A@pyt
545f04982267a34daaacc3afb94cd50db3821550
Update ghost.py
home/Humanoid/ghost.py
home/Humanoid/ghost.py
################################################### # This is a basic script to carry on a conversation # with ghost ################################################### # create service ghost = Runtime.start("ghost", "WebGui") ear = Runtime.start("ear", "WebkitSpeechRecognition") ghostchat = Runtime.start("ghostchat", "ProgramAB") htmlfilter = Runtime.start("htmlfilter", "HtmlFilter") mouth = Runtime.start("mouth", "NaturalReaderSpeech") # start a chatbot session ghostchat.startSession("ProgramAB/bots", "ghostchat") voices = mouth.getVoices() # I've also tried removing this because I got an iteration error for this line # for voice in voices: # NaturalReaderSpeech.setVoice("Ryan") # - I'll need to check on these - might # need to just "attach" some services together ear.addTextListener(ghostchat) ghostchat.addTextListener(htmlfilter) htmlfilter.addTextListener(mouth)
Python
0.000001
@@ -438,16 +438,245 @@ eech%22)%0A%0A +# creating the connections and routes%0A# - I'll need to check on these - might%0A# need to just %22attach%22 some services together%0Aear.addTextListener(ghostchat)%0Aghostchat.addTextListener(htmlfilter)%0Ahtmlfilter.addTextListener(mouth)%0A%0A # start @@ -931,195 +931,4 @@ n%22)%0A -%0A# - I'll need to check on these - might%0A# need to just %22attach%22 some services together%0Aear.addTextListener(ghostchat)%0Aghostchat.addTextListener(htmlfilter)%0Ahtmlfilter.addTextListener(mouth)%0A
64938b5bb185f7f38716c166a2aa59a0713bc989
fix for sqlite test db
tests/runtests.py
tests/runtests.py
""" Test support harness for doing setup.py test. See http://ericholscher.com/blog/2009/jun/29/enable-setuppy-test-your-django-apps/. """ import sys import os os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.test_settings' # Bootstrap Django's settings. from django.conf import settings settings.DATABASES = { 'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory;'} } settings.TEST_RUNNER = "django_nose.NoseTestSuiteRunner" settings.NOSE_PLUGINS = ['tests.noseplugins.TestDiscoveryPlugin'] def runtests(): """Test runner for setup.py test.""" # Run you some tests. import django.test.utils runner_class = django.test.utils.get_runner(settings) test_runner = runner_class(verbosity=1, interactive=True) failures = test_runner.run_tests(['hyperadmin']) # Okay, so this is a nasty hack. If this isn't here, `setup.py test` craps out # when generating a coverage report via Nose. I have no idea why, or what's # supposed to be going on here, but this seems to fix the problem, and I # *really* want coverage, so, unless someone can tell me *why* I shouldn't # do this, I'm going to just whistle innocently and keep on doing this. sys.exitfunc = lambda: 0 sys.exit(failures)
Python
0
@@ -379,9 +379,9 @@ mory -; +: '%7D%0A%7D
05f28064187c56d70d8f50c920676b81b7eb9f32
make test run faster
bdot/tests/test_carray.py
bdot/tests/test_carray.py
import nose import bdot import bcolz import numpy as np from numpy.testing import assert_array_equal def test_dot_int64(): matrix = np.random.random_integers(0, 12000, size=(300000, 100)) bcarray = bdot.carray(matrix, chunklen=2**13, cparams=bcolz.cparams(clevel=2)) v = bcarray[0] result = bcarray.dot(v) expected = matrix.dot(v) assert_array_equal(expected, result)
Python
0.000003
@@ -177,17 +177,16 @@ e=(30000 -0 , 100))%0A
5e2f393238d976e576b390b668c7ce2f13a1e0c1
Update to use Py3 print() (#1142)
example/scripts/add-line.py
example/scripts/add-line.py
#!/usr/bin/python # -*- coding: UTF-8 -*- import sys import getopt import re def findLine(pattern, fp): line = fp.readline() line_number = 1 while line: #print("Line {}: {}".format(line_number, line.strip())) if pattern in line: return line_number line = fp.readline() line_number += 1 return -1 def insertBefore(filename, pattern, text): with open(filename, 'r+') as fp: line_number = findLine(pattern, fp) if(line_number > 0): print 'Insert', text,'to line', line_number fp.seek(0) lines = fp.readlines() fp.seek(0) lines.insert(line_number - 1, text + '\n') fp.writelines(lines) return print 'pattern',text,'not found!' def replaceText(filename, pattern, text): with open(filename, 'r') as fp: lines = fp.read() fp.close() lines = (re.sub(pattern, text, lines)) print 'Replace', pattern ,'to', text fp = open(filename, 'w') fp.write(lines) fp.close() def main(argv): inputfile = '' string = '' text = '' replace = False try: opts, args = getopt.getopt(argv, "hi:s:t:r") except getopt.GetoptError: print 'add-line.py -i <inputfile> -s <string> -t <text>' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'add-line.py -i <inputfile> -s <string> -t <text>' sys.exit() elif opt in ("-i"): inputfile = arg elif opt in ("-s"): string = arg elif opt in ("-t"): text = arg elif opt in ("-r"): replace = True if(replace): replaceText(inputfile, string, text) else: insertBefore(inputfile, string, text) if __name__ == "__main__": main(sys.argv[1:])
Python
0.9995
@@ -36,16 +36,54 @@ -8 -*-%0A%0A +from __future__ import print_function%0A import s @@ -561,17 +561,17 @@ print - +( 'Insert' @@ -591,32 +591,33 @@ ne', line_number +) %0A fp. @@ -802,17 +802,17 @@ print - +( 'pattern @@ -830,16 +830,17 @@ found!' +) %0A%0Adef re @@ -1016,17 +1016,17 @@ print - +( 'Replace @@ -1047,16 +1047,17 @@ o', text +) %0A @@ -1309,33 +1309,33 @@ r:%0A print - +( 'add-line.py -i @@ -1360,32 +1360,33 @@ ring%3E -t %3Ctext%3E' +) %0A sys.exi @@ -1457,17 +1457,17 @@ print - +( 'add-lin @@ -1508,16 +1508,17 @@ %3Ctext%3E' +) %0A
06cb55639d2bc504d0ec1b9fb073c40e00751328
Disable output example_pic.png if exists
doc/sample_code/demo_plot_state.py
doc/sample_code/demo_plot_state.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from pyogi.board import Board from pyogi.plot import plot_board if __name__ == '__main__': board = Board() board.set_initial_state() board.players = ['先手', '後手'] board.move('+7776FU') board.move('-3334FU') board.move('+2868HI') board.move('-2288UM') board.move('+7988GI') # Plot by materials plot_board(board, savepath='example_pic.png', mode='pic') # Plot using matplotlib board.plot_state_mpl(figsize=(8, 9))
Python
0.999949
@@ -40,16 +40,26 @@ -8 -*-%0A%0A +import os%0A from pyo @@ -384,16 +384,107 @@ terials%0A + savepath = 'example_pic.png'%0A if os.path.exists(savepath):%0A savepath = None%0A%0A plot @@ -506,33 +506,24 @@ avepath= -'example_pic.png' +savepath , mode='
75f8a41c00e06f52102bf5f87a093d4ffef34f97
simplify the saving/loading of the lists
lib/maintain_lists_of_entries.py
lib/maintain_lists_of_entries.py
from .load_data_from_file import load_data_from_file from .save_data import save_data from .paths import mappings_path import json import os def maintain_lists_of_entries(all_courses): data_sets = { 'departments': set(), 'instructors': set(), 'times': set(), 'locations': set(), 'gereqs': set(), 'types': set(), } for key in data_sets: filename = os.path.join(mappings_path, 'valid_%s.json' % key) data = load_data_from_file(filename) data_sets[key] = set(json.loads(data)[key]) for course in all_courses: data_sets['departments'].update(course.get('depts', [])) data_sets['instructors'].update(course.get('instructors', [])) data_sets['times'].update(course.get('times', [])) data_sets['locations'].update(course.get('places', [])) data_sets['gereqs'].update(course.get('gereqs', [])) data_sets['types'].add(course.get('type', '')) for key in data_sets: data_sets[key] = sorted(data_sets[key]) for key, data in data_sets.items(): filename = os.path.join(mappings_path, 'valid_%s.json' % key) json_data = json.dumps({key: data}, indent='\t', separators=(',', ': ')) save_data(json_data, filename)
Python
0.000237
@@ -552,21 +552,16 @@ ds(data) -%5Bkey%5D )%0A%0A f @@ -1182,51 +1182,13 @@ mps( -%7Bkey: data%7D,%0A +data, ind @@ -1248,16 +1248,23 @@ son_data + + '%5Cn' , filena
f3c6a888b4462e2fab43faba6dbe2af4bafff1bb
Update add-snmpproxy-collector.py
scripts/monitoring/proxy_snmp/add-snmpproxy-collector.py
scripts/monitoring/proxy_snmp/add-snmpproxy-collector.py
from cloudify import ctx from cloudify import exceptions import diamond_agent.tasks as diamond import os paths = diamond.get_paths(ctx.plugin.workdir) name = 'SNMPProxyCollector' collector_dir = os.path.join(paths['collectors'], name) if not os.path.exists(collector_dir): os.mkdir(collector_dir) collector_file = os.path.join(collector_dir, '{0}.py'.format(name)) ctx.download_resource('scripts/monitoring/proxy_snmp/snmpproxy.py', collector_file) config = ctx.target.instance.runtime_properties.get('snmp_collector_config', {}) config.update({'enabled': True, 'hostname': '{0}.{1}.{2}'.format(diamond.get_host_id(ctx.target), ctx.target.node.name, ctx.target.instance.id) }) config_full_path = os.path.join(paths['collectors_config'], '{0}.conf'.format(name)) diamond.write_config(config_full_path, config) try: diamond.stop_diamond(paths['config']) except: pass try: diamond.start_diamond(paths['config']) except: exceptions.RecoverableError("Failed to start diamond", 30) pass
Python
0
@@ -103,53 +103,98 @@ os%0A -%0Apaths = diamond.get_paths(ctx.plugin.workdir +workdir = ctx.plugin.workdir%0Apaths = diamond.get_paths(workdir.replace(%22script%22,%22diamond%22) )%0Ana
c9027e8aebe853d1c85fcac24b09caeb8ea5f403
Bump version to 0.3.0
bands_inspect/__init__.py
bands_inspect/__init__.py
# -*- coding: utf-8 -*- # (c) 2017-2019, ETH Zurich, Institut fuer Theoretische Physik # Author: Dominik Gresch <greschd@gmx.ch> """ A tool for modifying, comparing and plotting electronic bandstructures. """ from . import kpoints from . import eigenvals from . import compare from . import lattice from . import plot __version__ = '0.2.3'
Python
0.000001
@@ -335,9 +335,9 @@ '0. -2.3 +3.0 '%0A
92752810380e38658de4a18eb0d06ff62395c38b
Fix path for gs upload credentials.
scripts/slave/chromium/archive_gpu_pixel_test_results.py
scripts/slave/chromium/archive_gpu_pixel_test_results.py
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A tool to archive Chrome gpu pixel test results generated by buildbots. Pushes generated images, their associated reference images, and the diffs between the two to Google Storage. For a list of command-line options, call this script with '--help'. """ import optparse import os import subprocess import sys from common import chromium_utils GOOGLE_STORAGE_BUCKET = 'chromium-browser-gpu-tests' def CopyToGoogleStorage(src, dst): """Copies a file to the given Google Storage destination url. Args: src: path to file to be copied dst: Google Storage destination url (i.e., gs://...) Returns: whether the copy was successful """ if not os.path.exists(src): print 'No such file', src return False gsutil = os.environ.get('GSUTIL', 'gsutil') # gsutil will look in the $HOME directory for the config file. Set the # environment variable if needed. old_env = os.environ.copy() if 'HOME' not in os.environ: os.environ['HOME'] = os.path.expanduser('~') retcode = subprocess.call([gsutil, 'cp', '-a', 'public-read', src, dst]) os.environ = old_env if retcode == 0: return True return False def Archive(run_id, gen_dir, gpu_ref_dir): """Archive the gpu pixel test results to Google Storage. These results consist of all the mismatching images associated with the failing tests. The corresponding reference image and diff is uploaded with each mismatching image also. Args: run_id: the unique identifier of this run gen_dir: the path to the generated image directory gpu_ref_dir: the path to the gpu-rendered reference image directory Returns: whether all the failing images were correctly uploaded """ if not os.path.exists(gen_dir): print 'No failing test images to archive' return True view_url = 'http://%s.commondatastorage.googleapis.com/' \ 'view_test_results.html?%s' % (GOOGLE_STORAGE_BUCKET, run_id) print 'See %s for this run\'s test results' % view_url run_url = 'gs://%s/runs/%s/' % (GOOGLE_STORAGE_BUCKET, run_id) print 'Pushing results to %s...' % run_url success = True filenames = os.listdir(gen_dir) for filename in filenames: base_filename = filename[5:] full_path = os.path.join(gen_dir, filename) if filename.startswith('DIFF_'): if not CopyToGoogleStorage(full_path, run_url + 'diff/' + base_filename): success = False elif filename.startswith('FAIL_'): if not CopyToGoogleStorage(full_path, run_url + 'gen/' + base_filename): success = False # Copy the appropriate reference image. ref = os.path.join(gpu_ref_dir, base_filename) if not os.path.exists(ref): print 'Cannot find reference image ', base_filename success = False continue if not CopyToGoogleStorage(ref, run_url + 'ref/' + base_filename): success = False return success def main(): option_parser = optparse.OptionParser() option_parser.add_option('', '--run-id', default=None, help='unique id for this run') option_parser.add_option('', '--generated-dir', default=None, help='path to the generated images directory') option_parser.add_option('', '--gpu-reference-dir', default=None, help=('path to the directory holding the reference' 'images generated by the gpu')) # --sw-reference-dir is ignored. We keep it here so we don't have to # modify master side script. option_parser.add_option('', '--sw-reference-dir', default=None, help=('path to the directory holding the reference' 'images generated by the software renderer')) options = option_parser.parse_args()[0] if (options.run_id is None or options.generated_dir is None or options.gpu_reference_dir is None): print 'All command options are required. Use --help.' return 1 if Archive(options.run_id, options.generated_dir, options.gpu_reference_dir): retcode = 0 else: retcode = 2 chromium_utils.RemoveDirectory(options.generated_dir) return retcode if '__main__' == __name__: sys.exit(main())
Python
0.000005
@@ -1031,160 +1031,233 @@ ile. +%0A # Set -the%0A # environment variable if needed.%0A old_env = os.environ.copy()%0A if 'HOME' not in os.environ:%0A os.environ%5B'HOME'%5D = os.path.expanduser('~ +$HOME to /b/build/site_config/ temporarily where .boto stays.%0A old_env = os.environ.copy()%0A # This script is under /b/build/scripts/slave/chromium/%0A os.environ%5B'HOME'%5D = os.path.join('..', '..', '..', 'site_config ')%0A
889a2608d1d4038a8c7ee1c445530fd1750c00e0
Optimize styling according to pylint
preprocessing/collect_unigrams.py
preprocessing/collect_unigrams.py
# -*- coding: utf-8 -*- """ File to collect all unigrams and all name-unigrams (label PER) from a corpus file. The corpus file must have one document/article per line. The words must be labeled in the form word/LABEL. Example file content: Yestarday John/PER Doe/PER said something amazing. Washington/LOC D.C./LOC is the capital of the U.S. The foobird is a special species of birds. It's commonly found on mars. ... Execute via: python -m preprocessing/collect_unigrams """ from __future__ import absolute_import, division, print_function, unicode_literals import os from model.unigrams import Unigrams # All capitalized constants come from this file from config import * def main(): """Main function. Gathers all unigrams and name-unigrams, see documantation at the top.""" # collect all unigrams (all labels, including "O") print("Collecting unigrams...") ug_all = Unigrams() ug_all.fill_from_articles(ARTICLES_FILEPATH, verbose=True) ug_all.write_to_file(UNIGRAMS_FILEPATH) ug_all = None # collect only unigrams of label PER print("Collecting person names (label=PER)...") ug_names = Unigrams() ug_names.fill_from_articles_labels(ARTICLES_FILEPATH, ["PER"], verbose=True) ug_names.write_to_file(UNIGRAMS_PERSON_FILEPATH) print("Finished.") # --------------- if __name__ == "__main__": main()
Python
0
@@ -459,21 +459,17 @@ ...%0A - %0A + Exec @@ -617,18 +617,8 @@ als%0A -import os%0A from @@ -702,20 +702,22 @@ ile%0A -from +import config impo @@ -716,16 +716,14 @@ fig -import * +as cfg %0A%0Ade @@ -827,21 +827,17 @@ top.%22%22%22%0A - %0A + # co @@ -973,16 +973,20 @@ rticles( +cfg. ARTICLES @@ -1027,32 +1027,36 @@ l.write_to_file( +cfg. UNIGRAMS_FILEPAT @@ -1076,21 +1076,17 @@ = None%0A - %0A + # co @@ -1235,16 +1235,20 @@ _labels( +cfg. ARTICLES @@ -1308,16 +1308,20 @@ to_file( +cfg. UNIGRAMS @@ -1338,20 +1338,16 @@ LEPATH)%0A - %0A pri
9b5fd8dba4885cd0cc2de10f7ff6c8066aee0277
Fix possibles issues with pulseaudiowidget
barython/widgets/audio.py
barython/widgets/audio.py
#!/usr/bin/env python3 import logging from .base import SubprocessWidget from barython.hooks.audio import PulseAudioHook logger = logging.getLogger("barython") class PulseAudioWidget(SubprocessWidget): def handler(self, event, *args, **kwargs): """ Filter events sent by the notifications """ # Only notify if there is something changes in pulseaudio event_change_msg = "Event 'change' on destination" if event_change_msg in event: logger.debug("PA: line \"{}\" catched.".format(event)) return self.update() def organize_result(self, volume, output_mute=None, input_mute=None, *args, **kwargs): """ Override this method to change the infos to print """ return "{}".format(volume) def handle_result(self, output=None, *args, **kwargs): # As pulseaudio-ctl add events in pactl subscribe, flush output try: if output != "" and output is not None: output = self.organize_result(*output.split()) super().handle_result(output=output) except Exception as e: logger.error("Error in PulseAudioWidget: {}", e) def __init__(self, cmd=["pulseaudio-ctl", "full-status"], *args, **kwargs): super().__init__(cmd, infinite=False, *args, **kwargs) # Update the widget when PA volume changes self.hooks.subscribe(self.handler, PulseAudioHook)
Python
0.000002
@@ -290,20 +290,16 @@ sent by -the notifica @@ -1336,16 +1336,37 @@ _init__( +*args, **kwargs, cmd= cmd, inf @@ -1376,33 +1376,16 @@ te=False -, *args, **kwargs )%0A%0A
add3baff745a276da424b3e73bcb7619ba5ca061
Fix holding config variable in database through proxy objects
analytical/utils.py
analytical/utils.py
""" Utility function for django-analytical. """ from django.conf import settings from django.core.exceptions import ImproperlyConfigured HTML_COMMENT = "<!-- %(service)s disabled on internal IP " \ "address\n%(html)s\n-->" def get_required_setting(setting, value_re, invalid_msg): """ Return a constant from ``django.conf.settings``. The `setting` argument is the constant name, the `value_re` argument is a regular expression used to validate the setting value and the `invalid_msg` argument is used as exception message if the value is not valid. """ try: value = getattr(settings, setting) except AttributeError: raise AnalyticalException("%s setting: not found" % setting) if value is None: raise AnalyticalException("%s setting is set to None" % setting) value = str(value) if not value_re.search(value): raise AnalyticalException("%s setting: %s: '%s'" % (setting, invalid_msg, value)) return value def get_user_from_context(context): """ Get the user instance from the template context, if possible. If the context does not contain a `request` or `user` attribute, `None` is returned. """ try: return context['user'] except KeyError: pass try: request = context['request'] return request.user except (KeyError, AttributeError): pass return None def get_identity(context, prefix=None, identity_func=None, user=None): """ Get the identity of a logged in user from a template context. The `prefix` argument is used to provide different identities to different analytics services. The `identity_func` argument is a function that returns the identity of the user; by default the identity is the username. """ if prefix is not None: try: return context['%s_identity' % prefix] except KeyError: pass try: return context['analytical_identity'] except KeyError: pass if getattr(settings, 'ANALYTICAL_AUTO_IDENTIFY', True): try: if user is None: user = get_user_from_context(context) if user.is_authenticated(): if identity_func is not None: return identity_func(user) else: return user.get_username() except (KeyError, AttributeError): pass return None def get_domain(context, prefix): """ Return the domain used for the tracking code. Each service may be configured with its own domain (called `<name>_domain`), or a django-analytical-wide domain may be set (using `analytical_domain`. If no explicit domain is found in either the context or the settings, try to get the domain from the contrib sites framework. """ domain = context.get('%s_domain' % prefix) if domain is None: domain = context.get('analytical_domain') if domain is None: domain = getattr(settings, '%s_DOMAIN' % prefix.upper(), None) if domain is None: domain = getattr(settings, 'ANALYTICAL_DOMAIN', None) if domain is None: if 'django.contrib.sites' in settings.INSTALLED_APPS: from django.contrib.sites.models import Site try: domain = Site.objects.get_current().domain except (ImproperlyConfigured, Site.DoesNotExist): pass return domain def is_internal_ip(context, prefix=None): """ Return whether the visitor is coming from an internal IP address, based on information from the template context. The prefix is used to allow different analytics services to have different notions of internal addresses. """ try: request = context['request'] remote_ip = request.META.get('HTTP_X_FORWARDED_FOR', '') if not remote_ip: remote_ip = request.META.get('REMOTE_ADDR', '') if not remote_ip: return False internal_ips = None if prefix is not None: internal_ips = getattr(settings, '%s_INTERNAL_IPS' % prefix, None) if internal_ips is None: internal_ips = getattr(settings, 'ANALYTICAL_INTERNAL_IPS', None) if internal_ips is None: internal_ips = getattr(settings, 'INTERNAL_IPS', None) return remote_ip in (internal_ips or []) except (KeyError, AttributeError): return False def disable_html(html, service): """ Disable HTML code by commenting it out. The `service` argument is used to display a friendly message. """ return HTML_COMMENT % {'html': html, 'service': service} class AnalyticalException(Exception): """ Raised when an exception occurs in any django-analytical code that should be silenced in templates. """ silent_variable_failure = True
Python
0.000003
@@ -750,20 +750,16 @@ if +not valu -e is Non e:%0A @@ -810,19 +810,15 @@ is -set to None +not set %22 %25
7f113399e4277ecbbfdde41d683c22082f7e19bd
Add DOI parsing to identifiers
scrapi/harvesters/smithsonian.py
scrapi/harvesters/smithsonian.py
''' Harvester for the Smithsonian Digital Repository for the SHARE project Example API call: http://repository.si.edu/oai/request?verb=ListRecords&metadataPrefix=oai_dc ''' from __future__ import unicode_literals from scrapi.base import OAIHarvester class SiHarvester(OAIHarvester): short_name = 'smithsonian' long_name = 'Smithsonian Digital Repository' url = 'http://repository.si.edu/oai/request' base_url = 'http://repository.si.edu/oai/request' property_list = ['date', 'identifier', 'type', 'format', 'setSpec'] timezone_granularity = True
Python
0.00003
@@ -208,16 +208,59 @@ terals%0A%0A +import re%0A%0Afrom scrapi.base import helpers%0A from scr @@ -461,158 +461,769 @@ -base_url = 'http://repository.si.edu/oai/request'%0A property_list = %5B'date', 'identifier', 'type', 'format', 'setSpec'%5D%0A timezone_granularity = Tr +@property%0A def schema(self):%0A return helpers.updated_schema(self._schema, %7B%0A %22uris%22: %7B%0A %22objectUris%22: %5B('//dc:identifier/node()', get_doi_from_identifier)%5D%0A %7D%0A %7D)%0A%0A base_url = 'http://repository.si.edu/oai/request'%0A property_list = %5B'date', 'identifier', 'type', 'format', 'setSpec'%5D%0A timezone_granularity = True%0A%0A%0Adef get_doi_from_identifier(identifiers):%0A doi_re = re.compile(r'10%5C.%5CS*%5C/%5CS*')%0A identifiers = %5Bidentifiers%5D if not isinstance(identifiers, list) else identifiers%0A for identifier in identifiers:%0A try:%0A found_doi = doi_re.search(identifier).group()%0A return 'http://dx.doi.org/%7B%7D'.format(found_doi)%0A except AttributeError:%0A contin ue%0A
b65a2ee41d16efd1a056727e59c229eb8258070f
set deafult DB_host as localhost
tests/settings.py
tests/settings.py
import os INSTALLED_APPS = ( 'model_utils', 'tests', ) DATABASES = { "default": { "ENGINE": "django.db.backends.postgresql_psycopg2", "NAME": os.environ.get("DB_NAME", "modelutils"), "USER": os.environ.get("DB_USER", 'postgres'), "PASSWORD": os.environ.get("DB_PASSWORD", ""), "HOST": os.environ.get("DB_HOST", ""), "PORT": os.environ.get("DB_PORT", 5432) }, } SECRET_KEY = 'dummy' CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', } } DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
Python
0.999466
@@ -354,24 +354,33 @@ %22DB_HOST%22, %22 +localhost %22),%0A
4a6e33eec89f88604cec47c9d0aff24b94b6d87b
Add setting to disable SECCOMP via environment variable
tests/settings.py
tests/settings.py
""" Django settings for running tests for Resolwe package. """ from __future__ import absolute_import, division, print_function, unicode_literals import os import re from distutils.util import strtobool # pylint: disable=import-error,no-name-in-module PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__)) SECRET_KEY = 'secret' # TODO: Remove this setting completely and only set it in the tests that require it. RESOLWE_HOST_URL = 'https://dummy.host.local' DEBUG = True MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.staticfiles', 'channels', 'rest_framework', 'guardian', 'mathfilters', 'versionfield', 'resolwe', 'resolwe.permissions', 'resolwe.flow', 'resolwe.elastic', 'resolwe.toolkit', 'resolwe.test_helpers', 'resolwe_bio', 'resolwe_bio.kb', ) ROOT_URLCONF = 'tests.urls' TEST_RUNNER = 'resolwe.test_helpers.test_runner.ResolweRunner' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', ], }, }, ] AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', 'guardian.backends.ObjectPermissionBackend', ) ANONYMOUS_USER_ID = -1 # Check if PostgreSQL settings are set via environment variables pgname = os.environ.get('RESOLWE_POSTGRESQL_NAME', 'resolwe-bio') pguser = os.environ.get('RESOLWE_POSTGRESQL_USER', 'resolwe') pghost = os.environ.get('RESOLWE_POSTGRESQL_HOST', 'localhost') pgport = int(os.environ.get('RESOLWE_POSTGRESQL_PORT', 55433)) DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': pgname, 'USER': pguser, 'HOST': pghost, 'PORT': pgport, } } STATIC_URL = '/static/' REDIS_CONNECTION = { 'host': 'localhost', 'port': int(os.environ.get('RESOLWE_REDIS_PORT', 56380)), 'db': int(os.environ.get('RESOLWE_REDIS_DATABASE', 0)), } FLOW_EXECUTOR = { 'NAME': 'resolwe.flow.executors.docker', # XXX: Change to a stable resolwe image when it will include all the required tools 'CONTAINER_IMAGE': 'resolwe/bio-linux8-resolwe-preview', 'CONTAINER_NAME_PREFIX': 'resolwebio', 'REDIS_CONNECTION': REDIS_CONNECTION, 'DATA_DIR': os.path.join(PROJECT_ROOT, 'test_data'), 'UPLOAD_DIR': os.path.join(PROJECT_ROOT, 'test_upload'), 'RUNTIME_DIR': os.path.join(PROJECT_ROOT, 'test_runtime'), } # Set custom executor command if set via environment variable if 'RESOLWE_DOCKER_COMMAND' in os.environ: FLOW_DOCKER_COMMAND = os.environ['RESOLWE_DOCKER_COMMAND'] FLOW_API = { 'PERMISSIONS': 'resolwe.permissions.permissions', } FLOW_EXPRESSION_ENGINES = [ { 'ENGINE': 'resolwe.flow.expression_engines.jinja', 'CUSTOM_FILTERS': [ 'resolwe_bio.expression_filters.sample', ] }, ] FLOW_EXECUTION_ENGINES = [ 'resolwe.flow.execution_engines.bash', 'resolwe.flow.execution_engines.workflow', ] # Check if any Manager settings are set via environment variables manager_prefix = os.environ.get('RESOLWE_MANAGER_REDIS_PREFIX', 'resolwe-bio.manager') # Ensure Manager channel prefix is a valid Django Channels name. manager_prefix = re.sub('[^0-9a-zA-Z.-]', '-', manager_prefix) FLOW_MANAGER = { 'NAME': 'resolwe.flow.managers.workload_connectors.local', 'REDIS_PREFIX': manager_prefix, 'REDIS_CONNECTION': REDIS_CONNECTION, } FLOW_DOCKER_VOLUME_EXTRA_OPTIONS = { 'data': 'Z', 'data_all': 'z', 'upload': 'z', 'secrets': 'Z', 'users': 'Z', 'tools': 'z', } FLOW_PROCESS_MAX_CORES = 1 # Don't pull Docker images if set via the environment variable. FLOW_DOCKER_DONT_PULL = strtobool(os.environ.get('RESOLWE_DOCKER_DONT_PULL', '0')) # Ensure all container images follow a specific format. FLOW_CONTAINER_VALIDATE_IMAGE = r'.+:(?!latest)' REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.SessionAuthentication', ), 'DEFAULT_FILTER_BACKENDS': ( 'resolwe.permissions.filters.ResolwePermissionsFilter', 'rest_framework_filters.backends.DjangoFilterBackend', ), } FLOW_PROCESSES_FINDERS = ( 'resolwe.flow.finders.FileSystemProcessesFinder', 'resolwe.flow.finders.AppDirectoriesFinder', ) FLOW_PROCESSES_DIRS = (os.path.join(PROJECT_ROOT, '../resolwe_bio/tests/'),) # Do not skip tests that fail on Docker executor if this is set via environment # variable if os.environ.get('RESOLWEBIO_TESTS_SKIP_DOCKER_FAILURES', '').lower() in ["no", "false"]: TESTS_SKIP_DOCKER_FAILURES = False # Elastic Search. ELASTICSEARCH_HOST = os.environ.get('RESOLWE_ES_HOST', 'localhost') ELASTICSEARCH_PORT = int(os.environ.get('RESOLWE_ES_PORT', '59201')) # Testing. TEST_RUNNER = 'resolwe.test_helpers.test_runner.ResolweRunner' TEST_PROCESS_REQUIRE_TAGS = True # Don't profile unless set via the environment variable. TEST_PROCESS_PROFILE = strtobool(os.environ.get('RESOLWE_TEST_PROCESS_PROFILE', '0')) # Channels. CHANNEL_LAYERS = { 'default': { 'BACKEND': 'asgi_redis.RedisChannelLayer', 'ROUTING': 'tests.routing.channel_routing', 'CONFIG': { 'hosts': [(REDIS_CONNECTION['host'], REDIS_CONNECTION['port'])], 'expiry': 3600, }, }, }
Python
0
@@ -4272,16 +4272,163 @@ '0'))%0A%0A +# Disable SECCOMP if set via environment variable.%0AFLOW_DOCKER_DISABLE_SECCOMP = strtobool(os.environ.get('RESOLWE_DOCKER_DISABLE_SECCOMP', '0'))%0A%0A # Ensure
dcf7af23fa237cd761f1a589e2e268875d296841
Test settings updated
tests/settings.py
tests/settings.py
# -*- coding: utf-8 -*- # Standard library imports import os # Third party imports from django.conf import global_settings as default_settings from django.conf import settings # Local application / specific library imports TEST_ROOT = os.path.abspath(os.path.dirname(__file__)) TEST_SETTINGS = { 'DEBUG': False, 'TEMPLATE_DEBUG': False, 'DATABASES': { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:' } }, 'TEMPLATE_CONTEXT_PROCESSORS': default_settings.TEMPLATE_CONTEXT_PROCESSORS, 'INSTALLED_APPS': ( 'django.contrib.auth', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.messages', 'django.contrib.sessions', 'django.contrib.sites', 'precise_bbcode', 'tests', ), 'ROOT_URLCONF': 'tests._testsite.urls', 'MIDDLEWARE_CLASSES': default_settings.MIDDLEWARE_CLASSES, 'ADMINS': ('admin@example.com',), 'MEDIA_ROOT': os.path.join(TEST_ROOT, '_testdata/media/'), 'SITE_ID': 1, } def configure(): if not settings.configured: settings.configure(**TEST_SETTINGS)
Python
0
@@ -1070,16 +1070,245 @@ ID': 1,%0A +%0A%0A # Setting this explicitly prevents Django 1.7+ from showing a%0A # warning regarding a changed default test runner. The test%0A # suite is run with nose, so it does not matter.%0A 'SILENCED_SYSTEM_CHECKS': %5B'1_6.W001'%5D,%0A %7D%0A%0A%0Adef
0b4cbd10ec6deb85603ad21a3ff1a7c3141da315
Change matrix size for convolutionSeparable2 test
examples/convolution/run.py
examples/convolution/run.py
#!/usr/bin/env python import subprocess from xml.dom.minidom import parseString stddevThreshold = 0.1 # tolerate up to 10% variation nTrials = 5 nThreads = 2 def run(cmd): proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() ret = proc.poll() if ret: print '%s: returned failure %d' % (cmd[0], ret) print '-----------------------------' print '%s' % err print '%s' % out error = subprocess.CalledProcessError(ret, cmd) error.output = out raise error return out def compile(radius): cmd = [ "../../src/pbc", "--preproc=/usr/bin/cpp -DKERNEL_RADIUS=%d" % radius, "convolutionSeparable2.pbcc" ] run(cmd) def test(size, mode): # TODO: copy config to dest location? cmd = [ "./convolutionSeparable2", "--time", "--trials=%d" % nTrials, "--isolation", "--threads=%d" % nThreads, "-n", "%d" % size, "--config=./convolutionSeparable2.%s.cfg" % mode ] res = run(cmd) x = parseString(res) timing = x.getElementsByTagName('timing')[0] stddev = float(timing.getAttribute('stddev')) t = float(timing.getAttribute('median')) # For now, just warn if it seems unreasonable if stddev > t * stddevThreshold: print 'WARNING: stddev for %s with n=%d, was high: %f' % (mode, size, stddev) return t,stddev def test_radius(radius, sizes=[2048]): # TODO: try 4096 print 'Testing radius=%d' % radius compile(radius) res = [] for size in sizes: # TODO: add localmem option for sep in ['2d', 'sep']: for local in ['local', 'nolocal']: mode = '%s.%s' % (sep,local) t,stddev = test(size, mode) res.append( (radius, size, mode, t, stddev) ) #print '%d^2, %s takes %f (stddev: %f)' % (size, mode, t, stddev) return res # Run all tests res = [] for radius in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]: t = test_radius(radius) res.extend( t ) for r,s,m,t,dev in res: print 'R=%d, %dx%d, %s takes:\t%f (stddev: %f)' % (r, s, s, m, t, dev)
Python
0
@@ -138,17 +138,17 @@ rials = -5 +7 %0AnThread @@ -1497,28 +1497,16 @@ 2048 -%5D): # TODO: try 4096 +,3500%5D): %0A
bd1e6eba5fa8f47606319dab6ae378383c31a366
fix sdb unit tests
tests/unit/sdb/test_vault.py
tests/unit/sdb/test_vault.py
# -*- coding: utf-8 -*- ''' Test case for the vault SDB module ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals # Import Salt Testing libs from tests.support.unit import TestCase from tests.support.mixins import LoaderModuleMockMixin from tests.support.mock import ( MagicMock, call, patch) # Import Salt libs import salt.sdb.vault as vault class TestVaultSDB(LoaderModuleMockMixin, TestCase): ''' Test case for the vault SDB module ''' def setup_loader_modules(self): return { vault: { '__opts__': { 'vault': { 'url': "http://127.0.0.1", "auth": { 'token': 'test', 'method': 'token' } } } } } def test_set(self): ''' Test salt.sdb.vault.set function ''' mock_vault = MagicMock() mock_vault.return_value.status_code = 200 with patch.dict(vault.__utils__, {'vault.make_request': mock_vault}): vault.set_('sdb://myvault/path/to/foo/bar', 'super awesome') assert mock_vault.call_args_list == [call('POST', 'v1/sdb://myvault/path/to/foo', None, json={'bar': 'super awesome'})] def test_set_question_mark(self): ''' Test salt.sdb.vault.set_ while using the old deprecated solution with a question mark. ''' mock_vault = MagicMock() mock_vault.return_value.status_code = 200 with patch.dict(vault.__utils__, {'vault.make_request': mock_vault}): vault.set_('sdb://myvault/path/to/foo?bar', 'super awesome') assert mock_vault.call_args_list == [call('POST', 'v1/sdb://myvault/path/to/foo', None, json={'bar': 'super awesome'})] def test_get(self): ''' Test salt.sdb.vault.get function ''' mock_vault = MagicMock() mock_vault.return_value.status_code = 200 mock_vault.content.return_value = [{'data': {'bar', 'test'}}] with patch.dict(vault.__utils__, {'vault.make_request': mock_vault}): vault.get('sdb://myvault/path/to/foo/bar') assert mock_vault.call_args_list == [call('GET', 'v1/sdb://myvault/path/to/foo', None)] def test_get_question_mark(self): ''' Test salt.sdb.vault.get while using the old deprecated solution with a question mark. ''' mock_vault = MagicMock() mock_vault.return_value.status_code = 200 mock_vault.content.return_value = [{'data': {'bar', 'test'}}] with patch.dict(vault.__utils__, {'vault.make_request': mock_vault}): vault.get('sdb://myvault/path/to/foo?bar') assert mock_vault.call_args_list == [call('GET', 'v1/sdb://myvault/path/to/foo', None)]
Python
0.000001
@@ -991,32 +991,165 @@ ion%0A '''%0A + version = %7B'v2': False, 'data': None, 'metadata': None, 'type': None%7D%0A mock_version = MagicMock(return_value=version)%0A mock_vau @@ -1283,32 +1283,107 @@ t': mock_vault%7D) +, %5C%0A patch.dict(vault.__utils__, %7B'vault.is_v2': mock_version%7D) :%0A va @@ -1828,32 +1828,165 @@ rk.%0A '''%0A + version = %7B'v2': False, 'data': None, 'metadata': None, 'type': None%7D%0A mock_version = MagicMock(return_value=version)%0A mock_vau @@ -2120,32 +2120,107 @@ t': mock_vault%7D) +, %5C%0A patch.dict(vault.__utils__, %7B'vault.is_v2': mock_version%7D) :%0A va @@ -2589,32 +2589,165 @@ ion%0A '''%0A + version = %7B'v2': False, 'data': None, 'metadata': None, 'type': None%7D%0A mock_version = MagicMock(return_value=version)%0A mock_vau @@ -2951,32 +2951,107 @@ t': mock_vault%7D) +, %5C%0A patch.dict(vault.__utils__, %7B'vault.is_v2': mock_version%7D) :%0A va @@ -3445,32 +3445,165 @@ rk.%0A '''%0A + version = %7B'v2': False, 'data': None, 'metadata': None, 'type': None%7D%0A mock_version = MagicMock(return_value=version)%0A mock_vau @@ -3815,16 +3815,91 @@ _vault%7D) +, %5C%0A patch.dict(vault.__utils__, %7B'vault.is_v2': mock_version%7D) :%0A
119f63d72f8faae892e0f4c75c9d3ae406a3c0b3
Update settings for dev
core/settings.py
core/settings.py
""" Django settings for core project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ import os import sys # Needed for login import django.contrib.auth from django.utils.translation import ugettext_lazy as _ BASE_DIR = os.path.dirname(os.path.dirname(__file__)) DEBUG = False # Import SECRET_KEY and check it try: from core.settings_secret import * except ImportError: print("[ERROR] core/settings_secret.py not found. Please create it according to the template settings_secret.py.template") sys.exit() if SECRET_KEY == "CHANGE_ME": print("[ERROR] Please change your secret key, stored in core/settings_secret.py") print("More information: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-SECRET_KEY") sys.exit() elif len(SECRET_KEY) < 50: print("[WARNING] Your SECRET_KEY is too short. Please consider changing it.") # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: don't run with debug turned on in production! ALLOWED_HOSTS = ['.guhema.com', '.guhema.de', '.sägenmarkt.com', '.sägenmarkt.de', '127.0.0.1'] SITE_ID = 1 # Security Settings # SECURE_CONTENT_TYPE_NOSNIFF = True # SECURE_BROWSER_XSS_FILTER = True # SESSION_COOKIE_SECURE = True # CSRF_COOKIE_SECURE = True # CSRF_COOKIE_HTTPONLY = True # X_FRAME_OPTIONS = "DENY" INSTALLED_APPS = ( 'modeltranslation', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.sites', 'django.contrib.flatpages', # 3rd party 'autoslug', 'easy_thumbnails', 'pagedown', 'markdown_deux', 'django_forms_bootstrap', # Own apps # 'login', 'news', 'downloads', 'products', 'contact', 'fairs', ) # Application definition MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.locale.LocaleMiddleware', 'core.middleware.ForceLangMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'core.urls' WSGI_APPLICATION = 'core.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'de' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True LANGUAGES = [ ('de', _('German')), ('en', _('English')), ('ru', _('Russian')), ] LOCALE_PATHS = ( os.path.join(BASE_DIR, 'locale'), ) # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_ROOT = os.path.join(BASE_DIR, 'static') STATIC_URL = '/static/' STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'templates/core'), ) MEDIA_ROOT = os.path.join(BASE_DIR, 'media') MEDIA_URL = '/media/' # Configure Templates TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'django.template.context_processors.media', 'django.template.context_processors.static', 'django.template.context_processors.request', 'django.core.context_processors.i18n', ], 'debug': DEBUG, }, }, ] # Configure Easy Thumbnails THUMBNAIL_ALIASES = { '': { 'news_front': {'size': (360, 165), 'crop': "smart", 'quality': 100}, 'news_detail': {'size': (452, 254), 'crop': "scale", 'quality': 100}, 'fair_detail': {'size': (600, 600), 'crop': "scale", 'quality': 100}, 'products_300': {'size': (300, 200), 'crop': "scale", 'quality': 100}, 'products_clamping': {'size': (250, 80), 'crop': "scale", 'quality': 100}, 'products_teaserimage': {'size': (262, 174), 'crop': "scale", 'quality': 100}, 'blade_image': {'size': (458, 80), 'crop': "scale", 'quality': 100}, 'blade_image_250h': {'size': (600, 250), 'crop': "scale", 'quality': 100}, 'width_1140': {'size': (1140, 2500), 'crop': "scale", 'quality': 100}, }, } # Markdown Deux Settings MARKDOWN_DEUX_STYLES = { "default": { "extras": { "code-friendly": None, }, # Allow raw HTML (WARNING: don't use this for user-generated # Markdown for your site!). "safe_mode": False, }, "nohtml": { "extras": { "code-friendly": None, }, # Allow raw HTML (WARNING: don't use this for user-generated # Markdown for your site!). "safe_mode": "escape", }, } # Modeltranslation settings MODELTRANSLATION_DEFAULT_LANGUAGE = 'de' MODELTRANSLATION_FALLBACK_LANGUAGES = ('de',)
Python
0
@@ -530,32 +530,41 @@ rror:%0A print( +%0A %22%5BERROR%5D core/se @@ -1323,16 +1323,29 @@ 7.0.0.1' +, 'localhost' %5D%0ASITE_I @@ -2831,17 +2831,16 @@ ation'%0A%0A -%0A # Databa @@ -3362,17 +3362,16 @@ e'),%0A)%0A%0A -%0A # Static
5fa36e781729fbfe5e3343f921e52eebf0062e75
Switch rackspace env variables to prettyconf
tests/settings.py
tests/settings.py
import hashlib import os from tempfile import mkdtemp from time import time from prettyconf.configuration import Configuration config = Configuration() # Append epoch to prevent test runs from clobbering each other. CONTAINER_PREFIX = 'cloud-storage-test-' + str(int(time())) SECRET = hashlib.sha1(os.urandom(128)).hexdigest() SALT = hashlib.sha1(os.urandom(128)).hexdigest() TEXT_FILENAME = 'flask.txt' TEXT_STREAM_FILENAME = 'flask-stream.txt' TEXT_FORM_FILENAME = 'flask-form.txt' TEXT_MD5_CHECKSUM = '2a5a634f5c8d931350e83e41c9b3b0bb' BINARY_FILENAME = 'avatar.png' BINARY_FORM_FILENAME = 'avatar-form.png' BINARY_STREAM_FILENAME = 'avatar-stream.png' BINARY_MD5_CHECKSUM = '2f907a59924ad96b7478074ed96b05f0' BINARY_OPTIONS = { 'meta_data': { 'owner-id': 'da17c32d-21c2-4bfe-b083-e2e78187d868', 'owner-email': 'user.one@startup.com' }, 'content_type': 'image/png', 'content_disposition': 'attachment; filename=avatar-attachment.png', } AMAZON_KEY = config('AMAZON_KEY', default=None) AMAZON_SECRET = config('AMAZON_SECRET', default=None) AMAZON_REGION = config('AMAZON_REGION', default='us-east-1') GOOGLE_CREDENTIALS = config('GOOGLE_CREDENTIALS', default=None) RACKSPACE_KEY = os.environ['RACKSPACE_KEY'] RACKSPACE_SECRET = os.environ['RACKSPACE_SECRET'] RACKSPACE_REGION = os.environ['RACKSPACE_REGION'] # RACKSPACE_KEY = config('RACKSPACE_KEY', default=None) # RACKSPACE_SECRET = config('RACKSPACE_SECRET', default=None) # RACKSPACE_REGION = config('RACKSPACE_REGION', default='IAD') LOCAL_KEY = config('LOCAL_KEY', default=mkdtemp(prefix='cloud-storage-test-')) if not os.path.exists(LOCAL_KEY): os.makedirs(LOCAL_KEY) LOCAL_SECRET = config('LOCAL_SECRET', default='local-storage-secret')
Python
0
@@ -1205,154 +1205,8 @@ e)%0A%0A -RACKSPACE_KEY = os.environ%5B'RACKSPACE_KEY'%5D%0ARACKSPACE_SECRET = os.environ%5B'RACKSPACE_SECRET'%5D%0ARACKSPACE_REGION = os.environ%5B'RACKSPACE_REGION'%5D%0A# RACK @@ -1247,34 +1247,32 @@ , default=None)%0A -# RACKSPACE_SECRET @@ -1315,18 +1315,16 @@ t=None)%0A -# RACKSPAC
6e3ebff613254c7e13d89cd3599e030947a5072f
fix coverage report
tests/unittest/test_calls.py
tests/unittest/test_calls.py
from unittest import TestCase, mock from unittest.mock import patch import RequestsLibrary lib = RequestsLibrary.RequestsLibrary() HTTP_LOCAL_SERVER = 'http://localhost:5000' sess_headers = {'content-type': False} post_headers = {'Content-Type': 'application/json'} class TestCalls(TestCase): @patch('RequestsLibrary.RequestsLibrary._common_request') def test_post_request_with_empty_data(self, common_request): lib.create_session('http_server', HTTP_LOCAL_SERVER, sess_headers) lib.post_request('http_server', '/anything', data="", headers=post_headers) common_request.assert_called_with('post', mock.ANY, '/anything', allow_redirects=True, data='', files=None, headers={'Content-Type': 'application/json'}, json=None, params=None, timeout=None)
Python
0.000001
@@ -1,20 +1,41 @@ +import os%0Aimport sys%0A from unittest import @@ -82,16 +82,213 @@ t patch%0A +# I hate it but I can't get the coverage report to work without it, must be before RequestsLibrary import%0Asys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../src/')))%0A import R @@ -303,16 +303,17 @@ ibrary%0A%0A +%0A lib = Re @@ -508,16 +508,95 @@ tCase):%0A + def test_import_defaults(self):%0A RequestsLibrary.RequestsLibrary()%0A%0A @pat @@ -979,16 +979,16 @@ ata='',%0A + @@ -1089,17 +1089,16 @@ on=None, - %0A
0179b6ce31856c18c9faaa58a55d8882e4a260ce
Configure Pool in if __name__ block
scripts/new_multiproc_manager.py
scripts/new_multiproc_manager.py
""" CPOL Level 1b main production line. @title: CPOL_PROD_1b @author: Valentin Louf <valentin.louf@monash.edu> @institution: Bureau of Meteorology @date: 1/03/2019 @version: 1 .. autosummary:: :toctree: generated/ timeout_handler chunks production_line_manager production_line_multiproc main """ # Python Standard Library import os import gc import glob import argparse import traceback import pandas as pd import dask.bag as db import cpol_processing def chunks(l, n): """ Yield successive n-sized chunks from l. From http://stackoverflow.com/a/312464 """ for i in range(0, len(l), n): yield l[i:i + n] def production_line_manager(infile): """ The production line manager calls the production line and manages it ;-). Buffer function that is used to catch any problem with the processing line without screwing the whole multiprocessing stuff. Parameters: =========== infile: str Name of the input radar file. outpath: str Path for saving output data. """ try: cpol_processing.process_and_save(infile, OUTPATH, sound_dir=SOUND_DIR) except Exception: traceback.print_exc() # logging.error(f"Failed to process {infile}", exc_info=True) gc.collect() return None def main(): date_list = pd.date_range(START_DATE, END_DATE) for day in date_list: input_dir = os.path.join(INPATH, str(day.year), day.strftime("%Y%m%d"), "*.*") flist = sorted(glob.glob(input_dir)) if len(flist) == 0: print('No file found for {}.'.format(day.strftime("%Y-%b-%d"))) continue print(f'{len(flist)} files found for ' + day.strftime("%Y-%b-%d")) for flist_chunk in chunks(flist, 16): bag = db.from_sequence(flist_chunk).map(production_line_manager) bag.compute() # with Pool(16) as pool: # pool.map(production_line_manager, flist) return None if __name__ == '__main__': """ Global variables definition. """ # Main global variables (Path directories). INPATH = "/g/data/hj10/cpol_level_1a/ppi/" OUTPATH = "/g/data/hj10/cpol_level_1b/" SOUND_DIR = "/g/data2/rr5/CPOL_radar/DARWIN_radiosonde" LOG_FILE_PATH = "/short/kl02/vhl548/" # Parse arguments parser_description = "Processing of radar data from level 1a to level 1b." parser = argparse.ArgumentParser(description=parser_description) parser.add_argument( '-s', '--start-date', dest='start_date', default=None, type=str, help='Starting date.', required=True) parser.add_argument( '-e', '--end-date', dest='end_date', default=None, type=str, help='Ending date.', required=True) args = parser.parse_args() START_DATE = args.start_date END_DATE = args.end_date # Creating the general log file. # logname = "cpol_level1b_from_{}_to_{}.log".format(START_DATE, END_DATE) # log_file_name = os.path.join(LOG_FILE_PATH, logname) # logging.basicConfig( # level=logging.WARNING, # format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', # filename=log_file_name, # filemode='w+') # logger = logging.getLogger(__name__) main()
Python
0.99994
@@ -412,49 +412,61 @@ ck%0A%0A -import pandas as pd%0Aimport dask.bag +from multiprocessing import Pool%0A%0Aimport pandas as +p d -b %0Aimp @@ -677,80 +677,36 @@ def -production_line_manager(infile):%0A %22%22%22%0A The production line manager +main(infile):%0A %22%22%22%0A It cal @@ -746,17 +746,9 @@ s it - ;-).%0A +. Buf @@ -772,16 +772,20 @@ is used +%0A to catc @@ -822,20 +822,16 @@ ing line -%0A without @@ -849,16 +849,20 @@ he whole +%0A multipr @@ -1241,704 +1241,8 @@ e)%0A%0A - gc.collect()%0A return None%0A%0A%0Adef main():%0A date_list = pd.date_range(START_DATE, END_DATE)%0A for day in date_list:%0A input_dir = os.path.join(INPATH, str(day.year), day.strftime(%22%25Y%25m%25d%22), %22*.*%22)%0A flist = sorted(glob.glob(input_dir))%0A if len(flist) == 0:%0A print('No file found for %7B%7D.'.format(day.strftime(%22%25Y-%25b-%25d%22)))%0A continue%0A print(f'%7Blen(flist)%7D files found for ' + day.strftime(%22%25Y-%25b-%25d%22))%0A%0A for flist_chunk in chunks(flist, 16):%0A bag = db.from_sequence(flist_chunk).map(production_line_manager)%0A bag.compute()%0A # with Pool(16) as pool:%0A # pool.map(production_line_manager, flist)%0A%0A @@ -2208,96 +2208,32 @@ -# Creating the general log file.%0A # logname = %22cpol_level1b_from_%7B%7D_to_%7B%7D.log%22.format +for day in pd.date_range (STA @@ -2254,28 +2254,27 @@ ATE) +: %0A -# log_file_name + input_dir = o @@ -2289,275 +2289,421 @@ oin( -LOG_FILE_PATH, logname)%0A # logging.basicConfig( +INPATH, str(day.year), day.strftime(%22%25Y%25m%25d%22), %22*.*%22)%0A flist = sorted(glob.glob(input_dir)) %0A -# +if le -vel=logging.WARNING, +n(flist) == 0: %0A -# + -format='%25(asctime)s - %25(name)s - %25(levelname)s - %25(message)s', +print('No file found for %7B%7D.'.format(day.strftime(%22%25Y-%25b-%25d%22)))%0A continue %0A -# - filename=log_file_name, +print(f'%7Blen(flist)%7D files found for ' + day.strftime(%22%25Y-%25b-%25d%22))%0A %0A -# - filemode='w+')%0A # logger = logging.getLogger(__name__)%0A%0A main( +for flist_chunk in chunks(flist, 32):%0A with Pool() as pool:%0A pool.map(main, flist )%0A
3dd0ac13a5c2a3e0dc949d60e807b438c36636a9
Fix for post_process.
core/tessagon.py
core/tessagon.py
from tessagon.core.grid_tile_generator import GridTileGenerator from tessagon.core.rotate_tile_generator import RotateTileGenerator class Tessagon: def __init__(self, **kwargs): if 'function' in kwargs: self.f = kwargs['function'] else: raise ValueError('Must specify a function') self.tile_class = self.init_tile_class() if 'tile_generator' in kwargs: self.tile_generator = kwargs['tile_generator'](self, **kwargs) elif 'rot_factor' in kwargs: self.tile_generator = RotateTileGenerator(self, **kwargs) else: self.tile_generator = GridTileGenerator(self, **kwargs) # Optional post processing function self.post_process = None if 'post_process' in kwargs: self.post_process = kwargs['post_process'] if 'adaptor_class' in kwargs: adaptor_class = kwargs['adaptor_class'] self.mesh_adaptor = adaptor_class(**kwargs) else: raise ValueError('Must provide a mesh adaptor class') self.tiles = None self.face_types = {} self.vert_types = {} def create_mesh(self): self._initialize_tiles() self.mesh_adaptor.create_empty_mesh() self._calculate_verts() self._calculate_faces() self.mesh_adaptor.finish_mesh() if self.post_process: self.post_process() return self.mesh_adaptor.get_mesh() def inspect(self): print("\n=== %s ===\n" % (self.__class__.__name__)) for i in range(len(self.tiles)): self.tiles[i].inspect(tile_number=i) ### Below are protected def _initialize_tiles(self): self.tiles = self.tile_generator.create_tiles() def _calculate_verts(self): for tile in self.tiles: tile.calculate_verts() def _calculate_faces(self): for tile in self.tiles: tile.calculate_faces()
Python
0
@@ -1262,16 +1262,125 @@ rocess:%0A + # Run user defined post-processing code%0A # Need to pass self here (this could be designed better)%0A se @@ -1395,16 +1395,20 @@ process( +self )%0A%0A r
d71353d8d1e0778f121c3ec07067d617ab3ce932
Add run() method to Backend and make start() a wrapper for it. Also set backend.running in backend.start and backend.stop. Whatever code runs in a loop in backend.run() needs to check self.running periodically to make sure it should still be running.
lib/rapidsms/backends/backend.py
lib/rapidsms/backends/backend.py
#!/usr/bin/env python # vim: ai ts=4 sts=4 et sw=4 class Backend(object): def __init__ (self, router): self.router = router def log(self, level, message): self.router.log(level, message) def start(self): raise NotImplementedError def stop(self): raise NotImplementedError def send(self): raise NotImplementedError def receive(self): raise NotImplementedError
Python
0
@@ -130,21 +130,50 @@ router%0A + self.running = False%0A %0A - def @@ -253,32 +253,167 @@ ef start(self):%0A + self.running = True%0A try:%0A self.run()%0A finally:%0A self.running = False%0A%0A def run (self):%0A raise No @@ -467,33 +467,28 @@ -raise NotImplementedError +self.running = False %0A
e7e4779d4b8ddbf2e42dea617e0f61e1e550f505
bump current enterprise version to 2022.2
unit_tests/test_base_version.py
unit_tests/test_base_version.py
# This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # See LICENSE for more details. # # Copyright (c) 2021 ScyllaDB import unittest from utils.get_supported_scylla_base_versions import UpgradeBaseVersion # pylint: disable=no-name-in-module, import-error def general_test(scylla_repo='', linux_distro=''): scylla_version = None version_detector = UpgradeBaseVersion(scylla_repo, linux_distro, scylla_version) version_detector.set_start_support_version() _, version_list = version_detector.get_version_list() return version_list class TestBaseVersion(unittest.TestCase): url_base = 'http://downloads.scylladb.com/' def test_master(self): scylla_repo = self.url_base + 'unstable/scylla/master/rpm/centos/2021-08-29T00:58:58Z/scylla.repo' linux_distro = 'centos' version_list = general_test(scylla_repo, linux_distro) self.assertEqual(version_list, ['5.1']) def test_4_5_with_centos8(self): scylla_repo = self.url_base + 'unstable/scylla/branch-4.5/rpm/centos/2021-08-29T00:58:58Z/scylla.repo' linux_distro = 'centos-8' version_list = general_test(scylla_repo, linux_distro) self.assertEqual(version_list, ['4.4', '4.5']) def test_4_1_with_centos8(self): scylla_repo = self.url_base + 'unstable/scylla/branch-4.1/rpm/centos/2021-08-29T00:58:58Z/scylla.repo' linux_distro = 'centos-8' version_list = general_test(scylla_repo, linux_distro) self.assertEqual(version_list, ['4.1']) def test_4_5(self): scylla_repo = self.url_base + 'unstable/scylla/4.5/rpm/centos/2021-08-29T00:58:58Z/scylla.repo' linux_distro = 'centos' version_list = general_test(scylla_repo, linux_distro) self.assertEqual(version_list, ['4.4', '4.5']) def test_enterprise(self): scylla_repo = self.url_base + 'unstable/scylla-enterprise/enterprise/rpm/centos/2021-08-29T00:58:58Z/scylla.repo' linux_distro = 'centos' version_list = general_test(scylla_repo, linux_distro) self.assertEqual(version_list, ['2022.1']) def test_2021_1(self): scylla_repo = self.url_base + 'unstable/scylla-enterprise/branch-2021.1/rpm/centos/2021-08-29T00:58:58Z/scylla.repo' linux_distro = 'centos' version_list = general_test(scylla_repo, linux_distro) self.assertEqual(version_list, ['4.3', '2020.1', '2021.1']) def test_2021_1_with_centos8(self): scylla_repo = self.url_base + 'unstable/scylla-enterprise/branch-2021.1/rpm/centos/2021-08-29T00:58:58Z/scylla.repo' linux_distro = 'centos-8' version_list = general_test(scylla_repo, linux_distro) self.assertEqual(version_list, ['4.3', '2021.1']) def test_2022_1_with_centos8(self): scylla_repo = self.url_base + \ 'unstable/scylla-enterprise/enterprise-2022.1/deb/unified/2022-06-03T00:22:55Z/scylladb-2022.1/scylla.list' linux_distro = 'centos-8' version_list = general_test(scylla_repo, linux_distro) self.assertEqual(['5.0', '2021.1', '2022.1'], version_list) if __name__ == "__main__": unittest.main()
Python
0.000001
@@ -2479,17 +2479,17 @@ %5B'2022. -1 +2 '%5D)%0A%0A
4e5cc0a58dfaceac79f09f128ad5c4355975dcf0
Update ABC interface
tests/test_a3c.py
tests/test_a3c.py
import os import unittest import tempfile import multiprocessing as mp import chainer from chainer import optimizers from chainer import links as L from chainer import functions as F import policy import v_function import a3c import async from envs.simple_abc import ABC import run_a3c class A3CFF(chainer.ChainList, a3c.A3CModel): def __init__(self, n_actions): self.pi = policy.FCSoftmaxPolicy( 1, n_actions, n_hidden_channels=10, n_hidden_layers=2) self.v = v_function.FCVFunction( 1, n_hidden_channels=10, n_hidden_layers=2) super().__init__(self.pi, self.v) def pi_and_v(self, state, keep_same_state=False): return self.pi(state), self.v(state) class A3CLSTM(chainer.ChainList, a3c.A3CModel): def __init__(self, n_actions): self.lstm = L.LSTM(1, 10) self.pi = policy.FCSoftmaxPolicy( 10, n_actions, n_hidden_channels=10, n_hidden_layers=2) self.v = v_function.FCVFunction( 10, n_hidden_channels=10, n_hidden_layers=2) super().__init__(self.lstm, self.pi, self.v) def pi_and_v(self, state, keep_same_state=False): if keep_same_state: prev_h, prev_c = self.lstm.h, self.lstm.c h = F.relu(self.lstm(state)) self.lstm.h, self.lstm.c = prev_h, prev_c else: h = F.relu(self.lstm(state)) return self.pi(h), self.v(h) def reset_state(self): print('reset') self.lstm.reset_state() def unchain_backward(self): self.lstm.h.unchain_backward() self.lstm.c.unchain_backward() class TestA3C(unittest.TestCase): def setUp(self): pass def test_abc_ff(self): self._test_abc(1, False) self._test_abc(2, False) self._test_abc(5, False) def test_abc_lstm(self): self._test_abc(1, True) self._test_abc(2, True) self._test_abc(5, True) def _test_abc(self, t_max, use_lstm): nproc = 8 n_actions = 3 def make_env(process_idx, test): return ABC() def model_opt(): if use_lstm: model = A3CLSTM(n_actions) else: model = A3CFF(n_actions) opt = optimizers.RMSprop(1e-3, eps=1e-2) opt.setup(model) return model, opt phi = lambda x: x model, opt = run_a3c.run_a3c( nproc, make_env, model_opt, phi, t_max, steps=40000) # Test env = ABC() total_r = env.reward def pi_func(state): return model.pi_and_v(state)[0] model.reset_state() while not env.is_terminal: pout = pi_func(chainer.Variable( env.state.reshape((1,) + env.state.shape))) # Use the most probale actions for stability of test results action = pout.most_probable_actions.data[0] print('state:', env.state, 'action:', action) print('probs', pout.probs.data) env.receive_action(action) total_r += env.reward self.assertAlmostEqual(total_r, 1) def _test_save_load(self, use_lstm): n_actions = 3 if use_lstm: model = A3CFF(n_actions) else: model = A3CLSTM(n_actions) opt = optimizers.RMSprop(1e-3, eps=1e-2) opt.setup(model) agent = a3c.A3C(model, opt, 1, 0.9, beta=1e-2) outdir = tempfile.mkdtemp() filename = os.path.join(outdir, 'test_a3c.h5') agent.save_model(filename) self.assertTrue(os.path.exists(filename)) self.assertTrue(os.path.exists(filename + '.opt')) agent.load_model(filename) def test_save_load_ff(self): self._test_save_load(False) def test_save_load_lstm(self): self._test_save_load(True)
Python
0
@@ -38,37 +38,8 @@ file -%0Aimport multiprocessing as mp %0A%0Aim @@ -196,21 +196,8 @@ a3c%0A -import async%0A from @@ -373,25 +373,25 @@ -1 +5 , n_actions, @@ -485,17 +485,17 @@ -1 +5 , n_hidd @@ -784,17 +784,17 @@ L.LSTM( -1 +5 , 10)%0A @@ -2495,26 +2495,85 @@ tal_r = -env.reward +0%0A obs = env.reset()%0A done = False%0A reward = 0.0 %0A%0A @@ -2690,23 +2690,12 @@ not -env.is_terminal +done :%0A @@ -3044,26 +3044,39 @@ -env.receive_action +obs, reward, done, _ = env.step (act @@ -3103,20 +3103,16 @@ al_r += -env. reward%0A
1edd7017c0e8bbf343df65d4d32e3467e1211a19
Enhance re-raised TemplateDoesNotExist exception
respite/views/views.py
respite/views/views.py
from django.shortcuts import render from django.http import HttpResponse from django.template import TemplateDoesNotExist from django.conf import settings from respite.settings import DEFAULT_FORMAT from respite.utils import parse_http_accept_header from respite.serializers import serializers from respite import formats class Views(object): """ Base class for views. :attribute template_path: A string describing a path to prefix templates with, or ``''`` by default. :attribute supported_formats: A list of strings describing formats supported by these views, or ``['html']`` by default. """ template_path = '' supported_formats = ['html'] def options(self, request, map, *args, **kwargs): """List communication options.""" options = {} for method, function in map.items(): options[method] = function.__doc__ return self._render( request = request, template = None, context = { 'options': options }, status = 200, headers = { 'Allow': ', '.join(options.keys()) } ) def _get_format(self, request): """ Determine and return a 'formats.Format' instance describing the most desired response format that is supported by these views. :param request: A django.http.HttpRequest instance. Formats specified by extension (e.g. '/articles/index.html') take precedence over formats given in the HTTP Accept header, even if it's a format that isn't known by Respite. If the request doesn't specify a format by extension (e.g. '/articles/' or '/articles/new') and none of the formats in the HTTP Accept header are supported, Respite will fall back on the format given in DEFAULT_FORMAT. """ # Derive a list of 'formats.Format' instances from the list of formats these views support. supported_formats = [formats.find(format) for format in self.supported_formats] # Determine format by extension... if '.' in request.path: extension = request.path.split('.')[-1] try: format = formats.find_by_extension(extension) except formats.UnknownFormat: return None return format if format in supported_formats else None # Determine format by HTTP Accept header... if 'HTTP_ACCEPT' in request.META: # Parse the HTTP Accept header, returning a list of accepted content types sorted by quality for accepted_content_type in parse_http_accept_header(request.META['HTTP_ACCEPT']): # Default to the format given in DEFAULT_FORMAT for the '*/*' content type. if accepted_content_type == '*/*' and DEFAULT_FORMAT: default_format = formats.find(DEFAULT_FORMAT) if default_format in supported_formats: return default_format try: format = formats.find_by_content_type(accepted_content_type) except formats.UnknownFormat: continue if format in supported_formats: return format else: continue # If none of the formats given in the HTTP 'accept' header are supported by these views, # or no HTTP 'accept' header was given at all, default to the format given in # DEFAULT_FORMAT if that's supported. if DEFAULT_FORMAT: default_format = formats.find(DEFAULT_FORMAT) if default_format in supported_formats: return default_format def _render(self, request, template=None, status=200, context={}, headers={}): """ Render a HTTP response. :param request: A django.http.HttpRequest instance. :param template: A string describing the path to a template. :param status: An integer describing the HTTP status code to respond with. :param context: A dictionary describing variables to populate the template with. :param headers: A dictionary describing HTTP headers. Please note that ``template`` must not specify an extension, as one will be appended according to the request format. For example, a value of ``blog/posts/index`` would populate ``blog/posts/index.html`` for requests that query the resource's HTML representation. If no template that matches the request format exists at the given location, or if ``template`` is ``None``, Respite will attempt to serialize the template context automatically. You can change the way your models are serialized by defining ``serialize`` methods that return a dictionary:: class NuclearMissile(models.Model): serial_number = models.IntegerField() is_armed = models.BooleanField() launch_code = models.IntegerField() def serialize(self): return { 'serial_number': self.serial_number, 'is_armed': self.is_armed } If the request format is not supported by the view (as determined by the ``supported_formats`` property or a specific view's ``override_supported_formats`` decorator), this function will yield HTTP 406 Not Acceptable. """ format = self._get_format(request) # Render 406 Not Acceptable if the requested format isn't supported. if not format: return HttpResponse(status=406) # Render template... try: response = render( request = request, template_name = '%s%s.%s' % (self.template_path, template, format.extension), dictionary = context, status = status, content_type = '%s; charset=%s' % (format.content_type, settings.DEFAULT_CHARSET) ) # ... or if no template exists, look for an appropriate serializer. except TemplateDoesNotExist: if format in serializers: response = HttpResponse( content = serializers[format](context).serialize(), content_type = '%s; charset=%s' % (format.content_type, settings.DEFAULT_CHARSET), status = status ) elif template: raise else: response = HttpResponse() for header, value in headers.items(): response[header] = value return response
Python
0
@@ -6618,16 +6618,345 @@ raise + TemplateDoesNotExist(%0A %22%25(template)s.%25(extension)s does not exist and no serializer for %25(format)s could be found.%22 %25 %7B%0A 'template': template,%0A 'extension': format.extension,%0A 'format': format.acronym%0A %7D%0A ) %0A
30c701ca797ee289fa70866c1b5dfa0dce623d76
fix typos : porivided -> provided
text_classification/train.py
text_classification/train.py
import os import sys import gzip import paddle.v2 as paddle import reader from utils import logger, parse_train_cmd, build_dict, load_dict from network_conf import fc_net, convolution_net def train(topology, train_data_dir=None, test_data_dir=None, word_dict_path=None, label_dict_path=None, model_save_dir="models", batch_size=32, num_passes=10): """ train dnn model :params train_data_path: path of training data, if this parameter is not specified, paddle.dataset.imdb will be used to run this example :type train_data_path: str :params test_data_path: path of testing data, if this parameter is not specified, paddle.dataset.imdb will be used to run this example :type test_data_path: str :params word_dict_path: path of training data, if this parameter is not specified, paddle.dataset.imdb will be used to run this example :type word_dict_path: str :params num_pass: train pass number :type num_pass: int """ if not os.path.exists(model_save_dir): os.mkdir(model_save_dir) use_default_data = (train_data_dir is None) if use_default_data: logger.info(("No training data are porivided, " "use paddle.dataset.imdb to train the model.")) logger.info("please wait to build the word dictionary ...") word_dict = paddle.dataset.imdb.word_dict() train_reader = paddle.batch( paddle.reader.shuffle( lambda: paddle.dataset.imdb.train(word_dict)(), buf_size=1000), batch_size=100) test_reader = paddle.batch( lambda: paddle.dataset.imdb.test(word_dict)(), batch_size=100) class_num = 2 else: if word_dict_path is None or not os.path.exists(word_dict_path): logger.info(("word dictionary is not given, the dictionary " "is automatically built from the training data.")) # build the word dictionary to map the original string-typed # words into integer-typed index build_dict( data_dir=train_data_dir, save_path=word_dict_path, use_col=1, cutoff_fre=5, insert_extra_words=["<UNK>"]) if not os.path.exists(label_dict_path): logger.info(("label dictionary is not given, the dictionary " "is automatically built from the training data.")) # build the label dictionary to map the original string-typed # label into integer-typed index build_dict( data_dir=train_data_dir, save_path=label_dict_path, use_col=0) word_dict = load_dict(word_dict_path) lbl_dict = load_dict(label_dict_path) class_num = len(lbl_dict) logger.info("class number is : %d." % (len(lbl_dict))) train_reader = paddle.batch( paddle.reader.shuffle( reader.train_reader(train_data_dir, word_dict, lbl_dict), buf_size=1000), batch_size=batch_size) if test_data_dir is not None: # here, because training and testing data share a same format, # we still use the reader.train_reader to read the testing data. test_reader = paddle.batch( paddle.reader.shuffle( reader.train_reader(test_data_dir, word_dict, lbl_dict), buf_size=1000), batch_size=batch_size) else: test_reader = None dict_dim = len(word_dict) logger.info("length of word dictionary is : %d." % (dict_dim)) paddle.init(use_gpu=False, trainer_count=1) # network config cost, prob, label = topology(dict_dim, class_num) # create parameters parameters = paddle.parameters.create(cost) # create optimizer adam_optimizer = paddle.optimizer.Adam( learning_rate=1e-3, regularization=paddle.optimizer.L2Regularization(rate=1e-3), model_average=paddle.optimizer.ModelAverage(average_window=0.5)) # create trainer trainer = paddle.trainer.SGD( cost=cost, extra_layers=paddle.evaluator.auc(input=prob, label=label), parameters=parameters, update_equation=adam_optimizer) # begin training network feeding = {"word": 0, "label": 1} def _event_handler(event): """ Define end batch and end pass event handler """ if isinstance(event, paddle.event.EndIteration): if event.batch_id % 100 == 0: logger.info("Pass %d, Batch %d, Cost %f, %s\n" % ( event.pass_id, event.batch_id, event.cost, event.metrics)) if isinstance(event, paddle.event.EndPass): if test_reader is not None: result = trainer.test(reader=test_reader, feeding=feeding) logger.info("Test at Pass %d, %s \n" % (event.pass_id, result.metrics)) with gzip.open( os.path.join(model_save_dir, "dnn_params_pass_%05d.tar.gz" % event.pass_id), "w") as f: trainer.save_parameter_to_tar(f) trainer.train( reader=train_reader, event_handler=_event_handler, feeding=feeding, num_passes=num_passes) logger.info("Training has finished.") def main(args): if args.nn_type == "dnn": topology = fc_net elif args.nn_type == "cnn": topology = convolution_net train( topology=topology, train_data_dir=args.train_data_dir, test_data_dir=args.test_data_dir, word_dict_path=args.word_dict, label_dict_path=args.label_dict, batch_size=args.batch_size, num_passes=args.num_passes, model_save_dir=args.model_save_dir) if __name__ == "__main__": args = parse_train_cmd() if args.train_data_dir is not None: assert args.word_dict and args.label_dict, ( "the parameter train_data_dir, word_dict_path, and label_dict_path " "should be set at the same time.") main(args)
Python
0.999998
@@ -1251,16 +1251,16 @@ re p +r o -ri vided +d , %22%0A
3eacd6c42126f08c9f941f47bab59430d1180c59
use default bulk actions
reversedict/indexer.py
reversedict/indexer.py
import contextlib import collections import nlp import elastic DEFAULT_SEEDS = ['philosophy','science','art','health','emotion'] def index_terms(seeds=None, max_count=5000): ''' Index words by their definitions and synonyms. Starts with a list of seed word, e.g. top 100 used terms. Index the words, queue words occured in definitions for indexing later. When dequeueing, pop the next most used word. ''' with connect_search() as (index_term, indexed): with init_queue(indexed, seeds) as (push_queue, pop_queue): term = pop_queue() while term: print 'indexing', term linked_terms = index_term(term) push_queue(linked_terms) if max_count and max_count <= len(indexed): break term = pop_queue() print 'indexed', len(indexed), 'terms' return True @contextlib.contextmanager def connect_search(): elastic.client.indices.create(index=elastic.SEARCH_INDEX, ignore=400) actions = {} def index_term(term): ''' Look up definitions and synonyms of term, then returns their tokens for indexing further ''' definitions, synonyms = nlp.get_definitions_synonyms(term) if not definitions: return [] doc = {'term':term, 'definitions':definitions, 'synonyms':synonyms} actions[term] = {'_op_type':'index', '_id':hash(term), '_index':elastic.SEARCH_INDEX, '_type':'term', 'doc':doc } actions_count = len(actions) if actions_count > 1000 and actions_count % 1000 == 0: commit_index_actions() return nlp.tokenize(*definitions + synonyms) def commit_index_actions(): actionables = filter(None, actions.values()) results = elastic.helpers.parallel_bulk(elastic.client, actionables) for is_success, response in results: if not is_success: print response print 'committed', len(actionables), 'terms'; print for term in actions: actions[term] = None return True try: yield index_term, actions.viewkeys() finally: if actions: commit_index_actions() elastic.client.indices.refresh(index=elastic.SEARCH_INDEX) @contextlib.contextmanager def init_queue(indexed, seeds=None): seeds = seeds or DEFAULT_SEEDS queue = collections.Counter() is_not_indexed = lambda t: t not in indexed def yield_terms(): while seeds: yield seeds.pop(0) while queue: term,_ = queue.most_common(1)[0] del queue[term] yield term def pop(): for term in yield_terms(): if is_not_indexed(term): return term def push(tokens): queue.update(filter(is_not_indexed, tokens)) yield push, pop
Python
0
@@ -1994,17 +1994,8 @@ ers. -parallel_ bulk
f81bc19a9627225113ff1a3fa2aa0e6446402acb
test that shorten is text/plain (answer no :S)
tests/test_api.py
tests/test_api.py
import unittest from flask import url_for import summerurlapp import appconfig import types class SummerApiTestCase(unittest.TestCase): """Test that the API works as intended""" testurl_http1 = "http://random.org" testurl_bad = "random.org" def setUp(self): summerurlapp.app.config.from_object(appconfig.TestConfig) self.app = summerurlapp.app.test_client() summerurlapp.init_db() def tearDown(self): summerurlapp.init_db() # use init_db() to clear the test db after testcase def post_shorten(self, link): return self.app.post("/api/shorten", data = dict(link = link)) def test_shorten(self): resp = self.post_shorten(self.testurl_http1) self.assertEqual(resp.data[0], "1") def test_getbyid_ok(self): respPost = self.post_shorten(self.testurl_http1) gotid = respPost.data[0] respId = self.app.get('/api/' + gotid) self.assertEqual(respId.status_code, 301) self.assertEqual(respId.location, self.testurl_http1) def test_getbyid_appendscheme(self): respPost = self.post_shorten(self.testurl_bad) gotid = respPost.data[0] respId = self.app.get('/api/' + gotid) self.assertEqual(respId.status_code, 301) self.assertEqual(respId.location, "http://" + self.testurl_bad) def test_getbyid_noid(self): resp = self.app.get('/api/9000') self.assertEqual(resp.status_code, 404) resp = self.app.get('/api/nonexistentid') self.assertEqual(resp.status_code, 404)
Python
0.999964
@@ -641,17 +641,16 @@ link))%0A%0A -%0A def @@ -756,19 +756,44 @@ esp. -data%5B0%5D, %221 +headers%5B'Content-Type'%5D, %22text/plain %22)%0A%0A
06ef2e114949e1e733227fb6ddad56f1ff4cbaed
Bump the version to 0.0.2 and add a __version_info__ tuple
llic.py
llic.py
""" Low-Level iCalendar library. """ from __future__ import unicode_literals import pytz __version__ = "0.0.1" DEFAULT_ICAL_LINE_LENGTH = 75 CRLF = b"\r\n" CRLF_WRAP = b"\r\n " NAME_VALUE_SEPARATOR = b":" class BaseCalendarWriter(object): def __init__(self, output, line_length=DEFAULT_ICAL_LINE_LENGTH): self.output = output self.line_length = line_length self.line_position = 0 def write(self, octets): assert self.line_position <= self.line_length octets_len = len(octets) if octets_len + self.line_position <= self.line_length: self.output.write(octets) self.line_position += octets_len else: self.__wrap_write(octets) def __wrap_write(self, octets): out = self.output while octets: write_count = self.line_length - self.line_position out.write(octets[:write_count]) self.endline(True) octets = octets[write_count:] def endline(self, is_wrapping): out = self.output if is_wrapping: out.write(CRLF_WRAP) self.line_position = 1 else: out.write(CRLF) self.line_position = 0 def start_contentline(self, name): self.write(name) self.write(NAME_VALUE_SEPARATOR) def value(self, value): self.write(value) def end_contentline(self): self.endline(False) TEXT_DELETE_CHARS = b"".join(chr(c) for c in range(0x0, 0x20)) class TypesCalendarWriterHelperMixin(object): # The following range of chars cannot occur in iCalendar TEXT, so we # just delete them. def as_text(self, text): """ Write text escaped as an iCalendar TEXT value. """ if isinstance(text, unicode): text = text.encode("utf-8") # TEXT must be escaped as follows: # \\ encodes \, \N or \n encodes newline # \; encodes ;, \, encodes , text = text.replace(b"\\", b"\\\\") # escape \ text = text.replace(b"\n", b"\\n") text = text.replace(b";", b"\\;") text = text.replace(b",", b"\\,") text = text.translate(None, TEXT_DELETE_CHARS) return text def as_datetime(self, dt): if dt.tzinfo is None: raise ValueError("dt must have a tzinfo, got: {!r}".format(dt)) if dt.tzinfo != pytz.utc: dt = dt.astimezone(pytz.utc) return dt.strftime("%Y%m%dT%H%M%SZ") class CalendarWriterHelperMixin(object): def contentline(self, name, value): self.start_contentline(name) self.value(value) self.end_contentline() def begin(self, section): self.contentline("BEGIN", section) def end(self, section): self.contentline("END", section) class CalendarWriter(TypesCalendarWriterHelperMixin, CalendarWriterHelperMixin, BaseCalendarWriter): pass
Python
0.999962
@@ -108,10 +108,75 @@ 0.0. -1%22 +2%22%0A__version_info__ = tuple(int(n) for n in __version__.split(%22.%22)) %0A%0ADE
64a37bb9b758630c0f2c649d82e10e849c095d48
Test side effect (#1889)
tests/test_cli.py
tests/test_cli.py
# -*- coding: utf-8 -*- """ tests.test_cli ~~~~~~~~~~~~~~ :copyright: (c) 2016 by the Flask Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ # # This file was part of Flask-CLI and was modified under the terms its license, # the Revised BSD License. # Copyright (C) 2015 CERN. # from __future__ import absolute_import, print_function import click import pytest from click.testing import CliRunner from flask import Flask, current_app from flask.cli import AppGroup, FlaskGroup, NoAppException, ScriptInfo, \ find_best_app, locate_app, with_appcontext, prepare_exec_for_file def test_cli_name(test_apps): """Make sure the CLI object's name is the app's name and not the app itself""" from cliapp.app import testapp assert testapp.cli.name == testapp.name def test_find_best_app(test_apps): """Test of find_best_app.""" class mod: app = Flask('appname') assert find_best_app(mod) == mod.app class mod: application = Flask('appname') assert find_best_app(mod) == mod.application class mod: myapp = Flask('appname') assert find_best_app(mod) == mod.myapp class mod: myapp = Flask('appname') myapp2 = Flask('appname2') pytest.raises(NoAppException, find_best_app, mod) def test_prepare_exec_for_file(test_apps): assert prepare_exec_for_file('test.py') == 'test' assert prepare_exec_for_file('/usr/share/__init__.py') == 'share' with pytest.raises(NoAppException): prepare_exec_for_file('test.txt') def test_locate_app(test_apps): """Test of locate_app.""" assert locate_app("cliapp.app").name == "testapp" assert locate_app("cliapp.app:testapp").name == "testapp" assert locate_app("cliapp.multiapp:app1").name == "app1" pytest.raises(RuntimeError, locate_app, "cliapp.app:notanapp") def test_scriptinfo(test_apps): """Test of ScriptInfo.""" obj = ScriptInfo(app_import_path="cliapp.app:testapp") assert obj.load_app().name == "testapp" assert obj.load_app().name == "testapp" def create_app(info): return Flask("createapp") obj = ScriptInfo(create_app=create_app) app = obj.load_app() assert app.name == "createapp" assert obj.load_app() == app def test_with_appcontext(): """Test of with_appcontext.""" @click.command() @with_appcontext def testcmd(): click.echo(current_app.name) obj = ScriptInfo(create_app=lambda info: Flask("testapp")) runner = CliRunner() result = runner.invoke(testcmd, obj=obj) assert result.exit_code == 0 assert result.output == 'testapp\n' def test_appgroup(): """Test of with_appcontext.""" @click.group(cls=AppGroup) def cli(): pass @cli.command(with_appcontext=True) def test(): click.echo(current_app.name) @cli.group() def subgroup(): pass @subgroup.command(with_appcontext=True) def test2(): click.echo(current_app.name) obj = ScriptInfo(create_app=lambda info: Flask("testappgroup")) runner = CliRunner() result = runner.invoke(cli, ['test'], obj=obj) assert result.exit_code == 0 assert result.output == 'testappgroup\n' result = runner.invoke(cli, ['subgroup', 'test2'], obj=obj) assert result.exit_code == 0 assert result.output == 'testappgroup\n' def test_flaskgroup(): """Test FlaskGroup.""" def create_app(info): return Flask("flaskgroup") @click.group(cls=FlaskGroup, create_app=create_app) def cli(**params): pass @cli.command() def test(): click.echo(current_app.name) runner = CliRunner() result = runner.invoke(cli, ['test']) assert result.exit_code == 0 assert result.output == 'flaskgroup\n'
Python
0
@@ -379,16 +379,37 @@ function +%0Aimport os%0Aimport sys %0A%0Aimport @@ -1374,24 +1374,334 @@ test_apps):%0A + %22%22%22Expect the correct path to be set and the correct module name to be returned.%0A%0A :func:%60prepare_exec_for_file%60 has a side effect, where%0A the parent directory of given file is added to %60sys.path%60.%0A %22%22%22%0A realpath = os.path.realpath('/tmp/share/test.py')%0A dirname = os.path.dirname(realpath)%0A assert p @@ -1718,24 +1718,35 @@ c_for_file(' +/tmp/share/ test.py') == @@ -1753,16 +1753,163 @@ 'test'%0A + assert dirname in sys.path%0A%0A realpath = os.path.realpath('/tmp/share/__init__.py')%0A dirname = os.path.dirname(os.path.dirname(realpath))%0A asse @@ -1939,11 +1939,11 @@ e('/ -usr +tmp /sha @@ -1970,16 +1970,48 @@ 'share'%0A + assert dirname in sys.path%0A%0A with @@ -2073,16 +2073,27 @@ r_file(' +/tmp/share/ test.txt
4f13f9cee13413e64095ddd5d283d92d1329f423
Add assertRaises(ArgumentError)
tests/test_cli.py
tests/test_cli.py
# -*- coding: utf-8 -*- import unittest from types import ModuleType from pythainlp import __main__, cli class TestMainPackage(unittest.TestCase): def test_cli_main(self): # call with no argument, should exit with 2 with self.assertRaises(SystemExit) as ex: __main__.main() self.assertEqual(ex.exception.code, 2) self.assertIsNone(__main__.main(["thainlp", "data", "path"])) self.assertIsNone(__main__.main(["thainlp", "NOT_EXIST", "command"])) def test_cli_data(self): self.assertIsInstance(getattr(cli, "data"), ModuleType) self.assertIsNotNone(cli.data.App(["thainlp", "data", "catalog"])) self.assertIsNotNone(cli.data.App(["thainlp", "data", "path"])) self.assertIsNotNone(cli.data.App(["thainlp", "data", "info", "test"])) self.assertIsNotNone( cli.data.App(["thainlp", "data", "get", "NOT_EXIST"]) ) self.assertIsNotNone( cli.data.App(["thainlp", "data", "rm", "NOT_EXIST"]) ) def test_cli_soundex(self): self.assertIsInstance(getattr(cli, "soundex"), ModuleType) self.assertIsNotNone(cli.soundex.App(["thainlp", "soundex", "ทดสอบ"])) def test_cli_tag(self): self.assertIsInstance(getattr(cli, "tag"), ModuleType) self.assertIsNotNone( cli.tag.App( [ "thainlp", "tag", "pos", "-s", " ", "มอเตอร์ไซค์ ความว่างเปล่า", ] ) ) self.assertIsNotNone( cli.tag.App( [ "thainlp", "tag", "role", "-s", " ", "มอเตอร์ไซค์ ความว่างเปล่า", ] ) ) def test_cli_tokenize(self): self.assertIsInstance(getattr(cli, "data"), ModuleType) self.assertIsNotNone( cli.tokenize.App( [ "thainlp", "tokenize", "subword", "-s", "|", "ถ้าฉันยิงกระต่ายได้ ฉันก็ยิงฟาสซิสต์ได้", ] ) ) self.assertIsNotNone( cli.tokenize.App( [ "thainlp", "tokenize", "syllable", "-s", "|", "-w", "ถ้าฉันยิงกระต่ายได้ ฉันก็ยิงฟาสซิสต์ได้", ] ) ) self.assertIsNotNone( cli.tokenize.App( [ "thainlp", "tokenize", "word", "-nw", "-a", "newmm", "-s", "|", "ถ้าฉันยิงกระต่ายได้ ฉันก็ยิงฟาสซิสต์ได้", ] ) ) self.assertIsNotNone( cli.tokenize.App( [ "thainlp", "tokenize", "sent", "-s", "|", ( "ถ้าฉันยิงกระต่ายได้ ฉันก็ยิงฟาสซิสต์ได้" "กระสุนสำหรับสมองของคุณวันนี้" "แต่คุณก็จะลืมมันไปทั้งหมดอีกครั้ง" ), ] ) )
Python
0.000002
@@ -34,16 +34,51 @@ nittest%0A +from argparse import ArgumentError%0A from typ @@ -385,16 +385,17 @@ ode, 2)%0A +%0A @@ -448,32 +448,84 @@ ata%22, %22path%22%5D))%0A +%0A with self.assertRaises(ArgumentError):%0A self.ass @@ -526,32 +526,49 @@ lf.assertIsNone( +%0A __main__.main(%5B%22 @@ -601,16 +601,29 @@ mmand%22%5D) +%0A )%0A%0A d
d2a66d7251266e732246a67c4232e51464ef0952
Support NZ geocoding
load.py
load.py
import json import os from datetime import date import boto3 import requests from googlemaps import Client from twilio.rest import TwilioRestClient s3 = boto3.resource('s3') def run_poll(event, context): """ Update S3 with latest trackpoints from Spot. """ s3_history = get_history_from_s3() s3_latest_timeStamp = s3_history[-1]['timeStamp'] spot_new = get_history_from_spot( s3_latest_timeStamp ) if spot_new: combined_history = s3_history + spot_new write_to_s3(combined_history) notify_by_text(s3_history[-1], combined_history[-1]) else: pass def latest_status(): """ Return a textual summary of the latest trackpoint. """ latest = get_history_from_s3()[-1] return '{}\'s last tracked location was {} at {}.'.format(latest['name'], latest['location'], latest['timeStamp']) def is_newer_than(s3_latest_timeStamp, spot_message): return s3_latest_timeStamp < str(spot_message['dateTime']) def get_history_from_spot(s3_latest_timeStamp): gmaps = _get_gmaps() def _build_track_point(spot_message): def _reverse_geocode(lat, lon): try: for addr in gmaps.reverse_geocode((lat, lon))[0]['address_components']: if 'postal_town' in addr['types']: return addr['long_name'] except: return '[Reverse Geocode Error]' track_point = dict() track_point['lat'] = spot_message['latitude'] track_point['lon'] = spot_message['longitude'] track_point['timeStamp'] = str(spot_message['dateTime']) track_point['messageType'] = str(spot_message['messageType']) track_point['location'] = _reverse_geocode(spot_message['latitude'], spot_message['longitude']) track_point['name'] = str(spot_message['messengerName']) return track_point r = requests.get( 'https://api.findmespot.com/spot-main-web/consumer/rest-api/2.0/public/feed/0kh77fpkuvgEaVFm0LklfeKXetFB6Iqgr/message.json') track = [_build_track_point(msg) for msg in r.json()['response']['feedMessageResponse']['messages']['message'] if is_newer_than(s3_latest_timeStamp, msg)] track.reverse() return track def get_history_from_s3(): return json.loads(s3.Object('bikerid.es', 'track/history.json').get()['Body'].read()) def write_to_s3(history): def _write_file_to_s3( name, content ): s3.Object('bikerid.es', 'track/{}.json'.format(name)).put(Body=json.dumps(content), ContentType='application/json') _write_file_to_s3('latest',history[-1]) _write_file_to_s3('history',history) def notify_by_text(previous_trackpoint, trackpoint): if previous_trackpoint['timeStamp'].startswith(date.today().strftime('%Y-%m-%d')): return twilio = _get_twilio() message = 'New location: {} at {}.'.format(trackpoint['location'], trackpoint['timeStamp']) for number in os.environ.get('alerts_numbers', '').split(','): twilio.messages.create(body=message, to=number, from_='+441631402022') def _get_twilio(): return TwilioRestClient(os.environ['twilio_account'], os.environ['twilio_token']) def _get_gmaps(): return Client(key=os.environ.get('gmaps_key', None)) if __name__ == '__main__': run_poll(None, None)
Python
0
@@ -1156,19 +1156,42 @@ -for addr in +#Hack%0A googleResp = gma @@ -1249,45 +1249,225 @@ ts'%5D -:%0A if 'postal_town +%0A for addr in googleResp:%0A if 'postal_town' in addr%5B'types'%5D:%0A return addr%5B'long_name'%5D%0A for addr in googleResp:%0A if 'locality ' in
a396332ad66d31ac5caa1fcbf92ed564615fac85
Use assert_raises in test_cli
tests/test_cli.py
tests/test_cli.py
import subprocess import os from nose.tools import eq_ # Get the filename of 'halibote.txt', which contains some mojibake about # Harry Potter in Chinese THIS_DIR = os.path.dirname(__file__) TEST_FILENAME = os.path.join(THIS_DIR, 'halibote.txt') CORRECT_OUTPUT = '【更新】《哈利波特》石堧卜才新婚娶初戀今痠逝\n' FAILED_OUTPUT = '''ftfy error: This input couldn't be decoded as 'windows-1252'. We got the following error: 'charmap' codec can't decode byte 0x90 in position 5: character maps to <undefined> ftfy works best when its input is in a known encoding. You can use `ftfy -g` to guess, if you're desperate. Otherwise, give the encoding name with the `-e` option, such as `ftfy -e latin-1`. ''' def get_command_output(args, stdin=None): return subprocess.check_output(args, stdin=stdin, stderr=subprocess.STDOUT, timeout=5).decode('utf-8') def test_basic(): output = get_command_output(['ftfy', TEST_FILENAME]) eq_(output, CORRECT_OUTPUT) def test_guess_bytes(): output = get_command_output(['ftfy', '-g', TEST_FILENAME]) eq_(output, CORRECT_OUTPUT) def test_alternate_encoding(): # The file isn't really in Windows-1252. But that's a problem ftfy # can fix, if it's allowed to be sloppy when reading the file. output = get_command_output(['ftfy', '-e', 'sloppy-windows-1252', TEST_FILENAME]) eq_(output, CORRECT_OUTPUT) def test_wrong_encoding(): # It's more of a problem when the file doesn't actually decode. try: get_command_output(['ftfy', '-e', 'windows-1252', TEST_FILENAME]) assert False, "Should have raised a CalledProcessError" except subprocess.CalledProcessError as e: eq_(e.output.decode('utf-8'), FAILED_OUTPUT) def test_stdin(): with open(TEST_FILENAME, 'rb') as infile: output = get_command_output(['ftfy'], stdin=infile) eq_(output, CORRECT_OUTPUT)
Python
0.000001
@@ -47,16 +47,31 @@ port eq_ +, assert_raises %0A%0A%0A# Get @@ -1469,11 +1469,68 @@ -try +with assert_raises(subprocess.CalledProcessError) as context :%0A @@ -1609,119 +1609,30 @@ - assert False, %22Should have raised a CalledProcessError%22%0A except subprocess.CalledProcessError as e:%0A +e = context.exception%0A
6379da01e44098d0a44bb99177e9b068b5488d87
Normalize one more path in test_check.py
test_check.py
test_check.py
#!/usr/bin/env python import os import shutil import sys import checkguard import unittest from collections import namedtuple from StringIO import StringIO Output = namedtuple('Output', ['stdout', 'stderr']) def lines_counts(textblock): line_hash = {} for line in textblock.split('\n'): try: line_hash[line] += 1 except KeyError: line_hash[line] = 1 return line_hash def np(path): """ np is a simple version of os.path.normpath, but for strings that are not guaranteed to just be a single path. Does not collapse empty path sections. """ return path.replace('/', os.sep) def setupFileInDangerZone(fileName, permissions): output = np('danger_zone/default/') shutil.rmtree(output, True) os.makedirs(output) outputFileName = os.path.join(output, os.path.basename(fileName)) shutil.copy(fileName, output) os.chmod(outputFileName, permissions) return outputFileName def runCheckGuardWithArgstring(argstring): checkguard.main(argstring.split()) return Output(sys.stdout.getvalue(), sys.stderr.getvalue()) def runCheckGuard(directory, exclusions=None): invokation = ['-r', directory] if exclusions is not None: invokation.append('--exclude=' + exclusions) checkguard.main(invokation) return Output(sys.stdout.getvalue(), sys.stderr.getvalue()) def runCheckGuardOnFile(inputFile): dirName, baseName = os.path.split(inputFile) checkFile = os.path.join(dirName, baseName) invokation = [checkFile] checkguard.main(invokation) return Output(sys.stdout.getvalue(), sys.stderr.getvalue()) class TestCheckGuard(unittest.TestCase): def setUp(self): self.saved_out = sys.stdout sys.stdout = StringIO() self.saved_err = sys.stderr sys.stderr = StringIO() def tearDown(self): sys.stdout = self.saved_out sys.stderr = self.saved_err def test_standard_guard_file(self): self.assertEqual(runCheckGuardOnFile(np('guard_tree/BasicHeader.h')).stdout, '') def test_mismatched_guard_file(self): self.assertEqual(runCheckGuardOnFile(np('guard_tree/mismatched_name.h')).stdout, np('guard_tree/mismatched_name.h\n')) def test_non_header_file(self): self.assertEqual(runCheckGuardOnFile(np('guard_tree/non_header_file.txt')).stdout, np('guard_tree/non_header_file.txt\n')) def test_multiple_header_files(self): self.assertEqual(lines_counts( runCheckGuardWithArgstring(np('guard_tree/BasicHeader.h ' 'guard_tree/non_header_file.txt guard_tree/mismatched_name.h')).stdout), lines_counts(np( 'guard_tree/non_header_file.txt\n' 'guard_tree/mismatched_name.h\n'))) def test_once_tree(self): self.assertEqual(lines_counts(runCheckGuard(np('once_tree')).stdout), lines_counts(np( 'once_tree/mismatched_name.h\n' 'once_tree/BasicHeader.hpp\n' 'once_tree/BasicHeader.h\n'))) def test_guard_tree(self): self.assertEqual(runCheckGuard(np('guard_tree')).stdout, np('guard_tree/mismatched_name.h\n')) def test_exclusion_match(self): self.assertEqual(runCheckGuard(np('guard_tree'), np('*/mismatched_name.h')).stdout, '') def test_exclusion_no_match(self): self.assertEqual(runCheckGuard(np('guard_tree'), np('*/some_other_name.h')).stdout, 'guard_tree/mismatched_name.h\n') def test_standard_guard_file_with_recursive_search(self): self.assertEqual(runCheckGuardWithArgstring(np('-r guard_tree/BasicHeader.h')).stdout, '') def test_mismatched_guard_file_with_recursive_search(self): self.assertEqual(runCheckGuardWithArgstring(np('-r guard_tree/mismatched_name.h')).stdout, np('guard_tree/mismatched_name.h\n')) def test_error_on_passing_directory_as_file(self): self.assertEqual(runCheckGuardWithArgstring(np('guard_tree')).stderr, np("'guard_tree' is a directory. Search it for headers with -r\n")) def test_read_only_mismatched_guard_file(self): fileName = setupFileInDangerZone(np('guard_tree/mismatched_name.h'), 0444) self.assertEqual(runCheckGuardOnFile(fileName).stdout, fileName + '\n') if __name__ == '__main__': unittest.main()
Python
0.000726
@@ -3124,16 +3124,19 @@ dout,%0A%09%09 +np( 'guard_t @@ -3160,16 +3160,17 @@ me.h%5Cn') +) %0A%0A%09def t
83dc9b5f80268a5bd23a737d66a219067353f2b7
change parameter handling
test_files.py
test_files.py
#!/usr/bin/env python # Generate test directories to mess with from a list of filenames. import argparse import os import sys parser = argparse.ArgumentParser() parser.add_argument('-i', '--input') parser.add_argument('-t', '--target') args = parser.parse_args() base_dir = args.target if args.target else 'testing' input_file = args.input if args.input is not None else 'filenames.tsv' with open(input_file) as fh: [os.makedirs(os.path.join(base_dir, row.split()[0])) for row in fh.readlines()]
Python
0.000003
@@ -343,38 +343,10 @@ put -if args.input is not None else +or 'fi
13f797c35fd3d78d71dfb1b36d953a017c995034
Convert public and private key blob into string format as it is PEM
drift/core/resources/jwtsession.py
drift/core/resources/jwtsession.py
# -*- coding: utf-8 -*- """ JWT Session Management By including this resource module in a Drift app, it will be able to accept JWT from a list of trusted issuers and issue and sign new JWT's. Custom attributes for top level registration: key_size: <int> Size in bytes of private key. trusted_issuers: <list of deployable names> Default value is ['drift-base'] expiry_days: <int> Expiration in days, default is 365. """ import logging import datetime from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives import serialization from cryptography.hazmat.backends import default_backend DEFAULT_KEY_SIZE = 1024 DEFAULT_EXPIRY_DAYS = 365 log = logging.getLogger(__name__) def register_deployable(ts, deployablename, attributes): """ Deployable registration callback. 'deployablename' is from table 'deployable-names'. """ pass def register_deployable_on_tier(ts, deployable, attributes): """ Deployable registration callback for tier. 'deployable' is from table 'deployables'. """ pk = {'tier_name': deployable['tier_name'], 'deployable_name': deployable['deployable_name']} row = ts.get_table('public-keys').get(pk) if row is None: row = ts.get_table('public-keys').add(pk) # Generate RSA key pairs private_key = rsa.generate_private_key( public_exponent=65537, key_size=attributes.get('key_size', DEFAULT_KEY_SIZE), backend=default_backend() ) public_key = private_key.public_key() private_pem = private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption() ) public_pem = public_key.public_bytes( encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo ) now = datetime.datetime.utcnow() expiry_days = attributes.get('expiry_days', DEFAULT_EXPIRY_DAYS) keypair = { 'issued': now.isoformat() + "Z", 'expires': (now + datetime.timedelta(days=expiry_days)).isoformat() + "Z", 'public_key': public_pem, 'private_key': private_pem, } # LEGACY SUPPORT! Make sure there is only one keypair registered. If one exists already # in the config then leave it as is. If not, add one. if 'keys' in row and len(row['keys']) > 0: # Just leave this as is log.warning("Legacy support: Key pair already registered, leaving it as is.") current_keypair = row['keys'][0] else: log.warning("Legacy support: Adding new key pair for this deployable.") current_keypair = keypair row.setdefault('keys', []).append(keypair) # LEGACY SUPPORT! Register drift-base as trusted issuer. Always. if deployable['deployable_name'] == 'drift-base': issuers = deployable.setdefault('jwt_trusted_issuers', []) for issuer in issuers: if issuer.get('iss') == 'drift-base': log.warning("Legacy support: drift-base already configured as trusted issuer.") break else: log.warning("Legacy support: Adding drift-base as trusted issuer.") issuers.append({ 'iss': 'drift-base', 'iat': current_keypair['issued'], 'exp': current_keypair['expires'], 'pub_rsa': current_keypair['public_key'], }) def register_resource_on_tier(ts, tier, attributes): """ Tier registration callback. 'tier' is from table 'tiers'. 'attributes' is a dict containing optional attributes for default values. """ # LEGACY SUPPORT! Register the service user tier.setdefault('service_user', {"password": "SERVICE", "username": "user+pass:$SERVICE$"}) def register_deployable_on_tenant(ts, deployable_name, tier_name, tenant_name, resource_attributes): pass
Python
0.999999
@@ -2216,45 +2216,129 @@ _pem -,%0A 'private_key': private_pem, +.decode(), # PEM is actually a text format%0A 'private_key': private_pem.decode(), # PEM is actually a text format %0A
afea442758ebc60da443b45b9362040017c9f82b
rename variable to be more clear
corehq/apps/api/tests/test_auth.py
corehq/apps/api/tests/test_auth.py
from django.contrib.auth.models import AnonymousUser from django.http import HttpResponse from django.test import TestCase, RequestFactory from corehq.apps.api.resources.auth import LoginAuthentication, LoginAndDomainAuthentication, \ RequirePermissionAuthentication from corehq.apps.domain.models import Domain from corehq.apps.users.models import WebUser, HQApiKey, Permissions, UserRole class AuthenticationTestBase(TestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.factory = RequestFactory() cls.domain = 'api-test' cls.project = Domain.get_or_create_with_name(cls.domain, is_active=True) cls.username = 'alice@example.com' cls.password = '***' cls.user = WebUser.create(cls.domain, cls.username, cls.password, None, None) cls.api_key, _ = HQApiKey.objects.get_or_create(user=WebUser.get_django_user(cls.user)) @classmethod def tearDownClass(cls): cls.project.delete() super().tearDownClass() def _get_request_with_api_key(self, domain=None): return self._get_request(domain, HTTP_AUTHORIZATION=self._contruct_api_auth_header(self.username, self.api_key)) def _contruct_api_auth_header(self, username, api_key): return f'ApiKey {username}:{api_key.key}' def _get_request(self, domain=None, **extras): path = self._get_domain_path() if domain else '' request = self.factory.get(path, **extras) request.user = AnonymousUser() # this is required for HQ's permission classes to resolve request.domain = domain # as is this for any domain-specific request return request def _get_domain_path(self): return f'/a/{self.domain}/' def assertAuthenticationSuccess(self, auth_instance, request): # we can't use assertTrue, because auth failures can return "truthy" HttpResponse objects self.assertEqual(True, auth_instance.is_authenticated(request)) def assertAuthenticationFail(self, auth_instance, request): result = auth_instance.is_authenticated(request) # currently auth classes return a 401/403 response in some scenarios # this should likely be changed to always return False # more discussion here: https://github.com/dimagi/commcare-hq/pull/28201#discussion_r461082885 if isinstance(result, HttpResponse): self.assertIn(result.status_code, (401, 403)) else: self.assertFalse(result) class LoginAuthenticationTest(AuthenticationTestBase): def test_login_no_auth(self): self.assertAuthenticationFail(LoginAuthentication(), self._get_request()) def test_login_with_auth(self): self.assertAuthenticationSuccess(LoginAuthentication(), self._get_request_with_api_key()) class LoginAndDomainAuthenticationTest(AuthenticationTestBase): def test_login_no_auth_no_domain(self): self.assertAuthenticationFail(LoginAndDomainAuthentication(), self._get_request()) def test_login_no_auth_with_domain(self): self.assertAuthenticationFail(LoginAndDomainAuthentication(), self._get_request(domain=self.domain)) def test_login_with_domain(self): self.assertAuthenticationSuccess(LoginAndDomainAuthentication(), self._get_request_with_api_key(domain=self.domain)) def test_login_with_wrong_domain(self): project = Domain.get_or_create_with_name('api-test-fail', is_active=True) self.addCleanup(project.delete) self.assertAuthenticationFail(LoginAndDomainAuthentication(), self._get_request_with_api_key(domain=project.name)) class RequirePermissionAuthenticationTest(AuthenticationTestBase): require_edit_data = RequirePermissionAuthentication(Permissions.edit_data) def test_login_no_auth_no_domain(self): self.assertAuthenticationFail(self.require_edit_data, self._get_request()) def test_login_no_auth_with_domain(self): self.assertAuthenticationFail(self.require_edit_data, self._get_request(domain=self.domain)) def test_login_with_wrong_domain(self): project = Domain.get_or_create_with_name('api-test-fail', is_active=True) self.addCleanup(project.delete) self.assertAuthenticationFail(self.require_edit_data, self._get_request_with_api_key(domain=project.name)) def test_login_with_domain_no_permissions(self): self.assertAuthenticationFail(self.require_edit_data, self._get_request_with_api_key(domain=self.domain)) def test_login_with_domain_admin(self): user_with_permission = WebUser.create(self.domain, 'domain_admin', '***', None, None, is_admin=True) api_key_with_permissions, _ = HQApiKey.objects.get_or_create( user=WebUser.get_django_user(user_with_permission) ) self.addCleanup(lambda: user_with_permission.delete(None)) self.assertAuthenticationSuccess(self.require_edit_data, self._get_request( domain=self.domain, HTTP_AUTHORIZATION=self._contruct_api_auth_header( user_with_permission.username, api_key_with_permissions ) )) def test_login_with_explicit_permission(self): role = UserRole.get_or_create_with_permissions(self.domain, Permissions(edit_data=True), 'edit-data') self.addCleanup(role.delete) user_with_permission = WebUser.create(self.domain, 'permission', '***', None, None, role_id=role.get_id) api_key_with_permissions, _ = HQApiKey.objects.get_or_create( user=WebUser.get_django_user(user_with_permission) ) self.addCleanup(lambda: user_with_permission.delete(None)) self.assertAuthenticationSuccess(self.require_edit_data, self._get_request( domain=self.domain, HTTP_AUTHORIZATION=self._contruct_api_auth_header( user_with_permission.username, api_key_with_permissions ) )) def test_login_with_wrong_permission(self): role = UserRole.get_or_create_with_permissions(self.domain, Permissions(edit_data=False), 'edit-data') self.addCleanup(role.delete) user_with_permission = WebUser.create(self.domain, 'permission', '***', None, None, role_id=role.get_id) api_key_with_permissions, _ = HQApiKey.objects.get_or_create( user=WebUser.get_django_user(user_with_permission) ) self.addCleanup(lambda: user_with_permission.delete(None)) self.assertAuthenticationFail(self.require_edit_data, self._get_request( domain=self.domain, HTTP_AUTHORIZATION=self._contruct_api_auth_header( user_with_permission.username, api_key_with_permissions ) ))
Python
0.000006
@@ -6761,24 +6761,27 @@ ta=False), ' +no- edit-data')%0A @@ -6826,32 +6826,35 @@ user_with +out _permission = We @@ -6945,32 +6945,35 @@ api_key_with +out _permissions, _ @@ -7048,32 +7048,35 @@ o_user(user_with +out _permission)%0A @@ -7115,32 +7115,35 @@ ambda: user_with +out _permission.dele @@ -7473,32 +7473,35 @@ user_with +out _permission.user @@ -7556,32 +7556,35 @@ api_key_with +out _permissions%0A
37d81e68bf5799272b8afa646916da46f2cc8ac5
Introduce "compound filters", make `indices` one
corehq/apps/hqcase/api/get_list.py
corehq/apps/hqcase/api/get_list.py
import datetime from base64 import b64decode, b64encode from itertools import chain from django.utils.http import urlencode from django.http import QueryDict from dateutil.parser import parse from dimagi.utils.parsing import FALSE_STRINGS from corehq.apps.case_search.filter_dsl import ( CaseFilterError, build_filter_from_xpath, ) from corehq.apps.es import case_search from corehq.apps.es import cases as case_es from .core import UserError, serialize_es_case DEFAULT_PAGE_SIZE = 20 MAX_PAGE_SIZE = 5000 CUSTOM_PROPERTY_PREFIX = 'property.' INDEX_PREFIX = 'indices.' def _to_boolean(val): return not (val == '' or val.lower() in FALSE_STRINGS) def _to_int(val, param_name): try: return int(val) except ValueError: raise UserError(f"'{val}' is not a valid value for '{param_name}'") def _make_date_filter(date_filter, param): def filter_fn(val): try: # If it's only a date, don't turn it into a datetime val = datetime.datetime.strptime(val, '%Y-%m-%d').date() except ValueError: try: val = parse(val) except ValueError: raise UserError(f"Cannot parse datetime '{val}'") return date_filter(**{param: val}) return filter_fn def _to_date_filters(field, date_filter): return [ (f'{field}.gt', _make_date_filter(date_filter, 'gt')), (f'{field}.gte', _make_date_filter(date_filter, 'gte')), (f'{field}.lte', _make_date_filter(date_filter, 'lte')), (f'{field}.lt', _make_date_filter(date_filter, 'lt')), ] FILTERS = { 'external_id': case_search.external_id, 'case_type': case_es.case_type, 'owner_id': case_es.owner, 'case_name': case_es.case_name, 'closed': lambda val: case_es.is_closed(_to_boolean(val)), } FILTERS.update(chain(*[ _to_date_filters('last_modified', case_es.modified_range), _to_date_filters('server_last_modified', case_es.server_modified_range), _to_date_filters('date_opened', case_es.opened_range), _to_date_filters('date_closed', case_es.closed_range), _to_date_filters('indexed_on', case_search.indexed_on), ])) def get_list(domain, params): if 'cursor' in params: params_string = b64decode(params['cursor']).decode('utf-8') params = QueryDict(params_string).dict() es_result = _run_query(domain, params) hits = es_result.hits ret = { "matching_records": es_result.total, "cases": [serialize_es_case(case) for case in hits], } cases_in_result = len(hits) if cases_in_result and es_result.total > cases_in_result: cursor = urlencode({**params, **{'indexed_on.gte': hits[-1]["@indexed_on"]}}) ret['next'] = {'cursor': b64encode(cursor.encode('utf-8'))} return ret def _run_query(domain, params): params = params.copy() page_size = _to_int(params.pop('limit', DEFAULT_PAGE_SIZE), 'limit') if page_size > MAX_PAGE_SIZE: raise UserError(f"You cannot request more than {MAX_PAGE_SIZE} cases per request.") query = (case_search.CaseSearchES() .domain(domain) .size(page_size) .sort("@indexed_on")) for key, val in params.items(): if key.startswith(CUSTOM_PROPERTY_PREFIX): query = query.filter(_get_custom_property_filter(key, val)) elif key.startswith(INDEX_PREFIX): query = query.filter(_get_index_filter(key, val)) elif key == 'xpath': query = query.filter(_get_xpath_filter(domain, val)) elif key in FILTERS: query = query.filter(FILTERS[key](val)) else: raise UserError(f"'{key}' is not a valid parameter.") return query.run() def _get_custom_property_filter(key, val): prop = key[len(CUSTOM_PROPERTY_PREFIX):] if val == "": return case_search.case_property_missing(prop) return case_search.exact_case_property_text_query(prop, val) def _get_index_filter(key, case_id): identifier = key[len(INDEX_PREFIX):] return case_search.reverse_index_case_query(case_id, identifier) def _get_xpath_filter(domain, xpath): try: return build_filter_from_xpath(domain, xpath) except CaseFilterError as e: raise UserError(f'Bad XPath: {e}')
Python
0.000023
@@ -554,34 +554,8 @@ y.'%0A -INDEX_PREFIX = 'indices.'%0A %0A%0Ade @@ -1574,16 +1574,135 @@ %5D%0A%0A%0A +def _index_filter(identifier, case_id):%0A return case_search.reverse_index_case_query(case_id, identifier)%0A%0A%0A%0ASIMPLE_ FILTERS @@ -1917,16 +1917,23 @@ al)),%0A%7D%0A +SIMPLE_ FILTERS. @@ -2270,16 +2270,69 @@ n),%0A%5D))%0A +COMPOUND_FILTERS = %7B%0A 'indices': _index_filter,%0A%7D%0A %0A%0Adef ge @@ -3533,33 +3533,19 @@ key -.startswith(INDEX_PREFIX) + == 'xpath' :%0A @@ -3576,37 +3576,37 @@ filter(_get_ -index +xpath _filter( key, val))%0A @@ -3585,35 +3585,38 @@ et_xpath_filter( -key +domain , val))%0A @@ -3620,34 +3620,41 @@ elif key -== 'xpath' +in SIMPLE_FILTERS :%0A @@ -3680,115 +3680,229 @@ ter( -_get_xpath_filter(domain, val))%0A elif key in FILTERS:%0A query = query.filter(FILTERS%5Bkey%5D( +SIMPLE_FILTERS%5Bkey%5D(val))%0A elif '.' in key and key.split(%22.%22)%5B0%5D in COMPOUND_FILTERS:%0A prefix, qualifier = key.split(%22.%22, maxsplit=1)%0A query = query.filter(COMPOUND_FILTERS%5Bprefix%5D(qualifier, val) @@ -4241,157 +4241,8 @@ )%0A%0A%0A -def _get_index_filter(key, case_id):%0A identifier = key%5Blen(INDEX_PREFIX):%5D%0A return case_search.reverse_index_case_query(case_id, identifier)%0A%0A%0A def
48b41ed2b4bcacd6fc6facaaf1ef5ea3390b49a8
remove unecessary checks for stock_result
corehq/form_processor/reprocess.py
corehq/form_processor/reprocess.py
from collections import defaultdict from casexml.apps.case.exceptions import IllegalCaseId, InvalidCaseIndex, CaseValueError, PhoneDateValueError from casexml.apps.case.exceptions import UsesReferrals from casexml.apps.case.signals import case_post_save from corehq.apps.commtrack.exceptions import MissingProductId from corehq.blobs.mixin import bulk_atomic_blobs from corehq.form_processor.backends.sql.dbaccessors import FormAccessorSQL, CaseAccessorSQL, LedgerAccessorSQL from corehq.form_processor.change_publishers import publish_form_saved, publish_case_saved, publish_ledger_v2_saved from corehq.form_processor.exceptions import XFormNotFound from corehq.form_processor.interfaces.processor import FormProcessorInterface from corehq.form_processor.models import XFormInstanceSQL, CaseTransaction, LedgerTransaction from corehq.form_processor.submission_post import SubmissionPost, _transform_instance_to_error from corehq.form_processor.utils.general import should_use_sql_backend from corehq.form_processor.utils.xform import _get_form from corehq.sql_db.util import get_db_alias_for_partitioned_doc from couchforms.models import XFormInstance def pre_process_unfinished_stub(stub): if stub.saved: # ignore for now return if not should_use_sql_backend(stub.domain): # ignore for couch domains stub.delete() return form_id = stub.xform_id try: form = FormAccessorSQL.get_form(form_id) except XFormNotFound: # form doesn't exist which means the failure probably happend during saving so # let mobile handle re-submitting it stub.delete() return _reprocess_form(form) stub.delete() def reprocess_xform_error(form): """ Attempt to re-process an error form. This was created specifically to address the issue of out of order forms and child cases (form creates child case before parent case has been created). See http://manage.dimagi.com/default.asp?250459 :param form_id: ID of the error form to process """ if not form: raise Exception('Form with ID {} not found'.format(form.form_id)) if not form.is_error: raise Exception('Form was not an error form: {}={}'.format(form.form_id, form.doc_type)) return _reprocess_form(form) def reprocess_xform_error_by_id(form_id, domain=None): form = _get_form(form_id) if domain and form.domain != domain: raise Exception('Form not found') return reprocess_xform_error(form) def _reprocess_form(form): # reset form state prior to processing if should_use_sql_backend(form.domain): form.state = XFormInstanceSQL.NORMAL else: form.doc_type = 'XFormInstance' form.initial_processing_complete = True form.problem = None interface = FormProcessorInterface(form.domain) cache = interface.casedb_cache( domain=form.domain, lock=True, deleted_ok=True, xforms=[form] ) with cache as casedb: try: case_stock_result = SubmissionPost.process_xforms_for_cases([form], casedb) except (IllegalCaseId, UsesReferrals, MissingProductId, PhoneDateValueError, InvalidCaseIndex, CaseValueError) as e: instance = _transform_instance_to_error(interface, e, form) # this is usually just one document, but if an edit errored we want # to save the deprecated form as well interface.save_processed_models([form]) return form if case_stock_result: stock_result = case_stock_result.stock_result if stock_result: assert stock_result.populated cases = case_stock_result.case_models if should_use_sql_backend(form.domain): cases = _filter_already_processed_cases(form, cases) for case in cases: CaseAccessorSQL.save_case(case) ledgers = _filter_already_processed_ledgers(form, stock_result.models_to_save) LedgerAccessorSQL.save_ledger_values(ledgers) FormAccessorSQL.update_form_problem_and_state(form) publish_form_saved(form) for case in cases: publish_case_saved(case) case_post_save.send(case.__class__, case=case) for ledger in ledgers: publish_ledger_v2_saved(ledger) else: with bulk_atomic_blobs([form] + cases): XFormInstance.save(form) # use this save to that we don't overwrite the doc_type XFormInstance.get_db().bulk_save(cases) if stock_result: stock_result.commit() case_stock_result.stock_result.finalize() case_stock_result.case_result.commit_dirtiness_flags() return form def _filter_already_processed_cases(form, cases): """Remove any cases that already have a case transaction for this form""" cases_by_id = { case.case_id: case for case in cases } case_dbs = defaultdict(list) for case in cases: db_name = get_db_alias_for_partitioned_doc(case.case_id) case_dbs[db_name].append(case.case_id) for db_name, case_ids in case_dbs.items(): transactions = CaseTransaction.objects.using(db_name).filter(case_id__in=case_ids, form_id=form.form_id) for trans in transactions: del cases_by_id[trans.case_id] return cases_by_id.values() def _filter_already_processed_ledgers(form, ledgers): """Remove any ledgers that already have a ledger transaction for this form""" ledgers_by_id = { ledger.ledger_reference: ledger for ledger in ledgers } ledger_dbs = defaultdict(list) for ledger in ledgers_by_id.values(): db_name = get_db_alias_for_partitioned_doc(ledger.case_id) ledger_dbs[db_name].append(ledger.case_id) for db_name, case_ids in ledger_dbs.items(): transactions = LedgerTransaction.objects.using().filter(case_id__in=case_ids, form_id=form.form_id) for trans in transactions: del ledgers_by_id[trans.ledger_reference] return ledgers_by_id.values()
Python
0
@@ -3595,41 +3595,8 @@ ult%0A - if stock_result:%0A @@ -4639,45 +4639,8 @@ es)%0A - if stock_result:%0A
6e8c58608b85e10bc258b12dc83fb3dcdc0056a4
tweak so that no files are created to cwd
utest/api/test_run_and_rebot.py
utest/api/test_run_and_rebot.py
import unittest import sys import tempfile from os.path import abspath, dirname, join, exists from os import remove from StringIO import StringIO from robot.utils.asserts import assert_equals from robot import run, rebot ROOT = dirname(dirname(dirname(abspath(__file__)))) TEMP = tempfile.gettempdir() LOG_PATH = join(TEMP, 'log.html') LOG = 'Log: %s' % LOG_PATH class Base(unittest.TestCase): def setUp(self): self.orig__stdout__ = sys.__stdout__ self.orig__stderr__ = sys.__stderr__ self.orig_stdout = sys.stdout self.orig_stderr = sys.stderr sys.__stdout__ = StringIO() sys.__stderr__ = StringIO() sys.stdout = StringIO() sys.stderr = StringIO() if exists(LOG_PATH): remove(LOG_PATH) def tearDown(self): sys.__stdout__ = self.orig__stdout__ sys.__stderr__ = self.orig__stderr__ sys.stdout = self.orig_stdout sys.stderr = self.orig_stderr def _assert_outputs(self, stdout=None, stderr=None): self._assert_output(sys.__stdout__, stdout) self._assert_output(sys.__stderr__, stderr) self._assert_output(sys.stdout, None) self._assert_output(sys.stderr, None) def _assert_output(self, stream, expected): output = stream.getvalue() if expected: self._assert_output_contains(output, expected) else: self._assert_no_output(output) def _assert_no_output(self, output): if output: raise AssertionError('Expected output to be empty:\n%s' % output) def _assert_output_contains(self, output, expected_items): for expected in expected_items: content, count = expected if output.count(content) != count: raise AssertionError("'%s' not %d times in output:\n%s" % (content, count, output)) class TestRun(Base): data = join(ROOT, 'atest', 'testdata', 'misc', 'pass_and_fail.html') nonex = join(TEMP, 'non-existing-file-this-is.txt') def test_run_once(self): assert_equals(run(self.data, outputdir=TEMP, report='none'), 1) self._assert_outputs([('Pass And Fail', 2), (LOG, 1), ('Report:', 0)]) assert exists(LOG_PATH) def test_run_multiple_times(self): assert_equals(run(self.data, output='NONE', critical='nomatch'), 0) assert_equals(run(self.data, output='NONE', name='New Name'), 1) self._assert_outputs([('Pass And Fail', 2), ('New Name', 2), (LOG, 0)]) def test_run_fails(self): assert_equals(run(self.nonex), 252) assert_equals(run(self.data, outputdir=TEMP), 1) self._assert_outputs(stdout=[('Pass And Fail', 2), (LOG, 1)], stderr=[('[ ERROR ]', 1), (self.nonex, 1), ('--help', 1)]) def test_custom_stdout(self): stdout = StringIO() assert_equals(run(self.data, output='NONE', stdout=stdout), 1) self._assert_output(stdout, [('Pass And Fail', 2), ('Output:', 1), ('Log:', 0), ('Report:', 0)]) self._assert_outputs() def test_custom_stderr(self): stderr = StringIO() assert_equals(run(self.nonex, stderr=stderr), 252) assert_equals(run(self.data, output='NONE', stderr=stderr), 1) self._assert_output(stderr, [('[ ERROR ]', 1), (self.nonex, 1), ('--help', 1)]) self._assert_outputs([('Pass And Fail', 2), ('Output:', 1), ('Log:', 0), ('Report:', 0)]) class TestRebot(Base): data = join(ROOT, 'atest', 'testdata', 'rebot', 'created_normal.xml') nonex = join(TEMP, 'non-existing-file-this-is.xml') def test_run_once(self): assert_equals(rebot(self.data, outputdir=TEMP, report='NONE'), 1) self._assert_outputs([(LOG, 1), ('Report:', 0)]) assert exists(LOG_PATH) def test_run_multiple_times(self): assert_equals(rebot(self.data, outputdir=TEMP, critical='nomatch'), 0) assert_equals(rebot(self.data, outputdir=TEMP, name='New Name'), 1) self._assert_outputs([(LOG, 2)]) def test_run_fails(self): assert_equals(rebot(self.nonex), 252) assert_equals(rebot(self.data, outputdir=TEMP), 1) self._assert_outputs(stdout=[(LOG, 1)], stderr=[('[ ERROR ]', 1), (self.nonex, 2), ('--help', 1)]) def test_custom_stdout(self): stdout = StringIO() assert_equals(rebot(self.data, report='None', stdout=stdout), 1) self._assert_output(stdout, [('Log:', 1), ('Report:', 0)]) self._assert_outputs() def test_custom_stdout_and_stderr(self): output = StringIO() assert_equals(rebot(self.data, log='NONE', report='NONE', stdout=output, stderr=output), 252) assert_equals(rebot(self.data, report='NONE', stdout=output, stderr=output), 1) self._assert_output(output, [('[ ERROR ] No outputs created', 1), ('--help', 1), ('Log:', 1), ('Report:', 0)]) self._assert_outputs() if __name__ == '__main__': unittest.main()
Python
0
@@ -4526,32 +4526,76 @@ ', stdout=stdout +,%0A outputdir=TEMP ), 1)%0A se @@ -4996,16 +4996,32 @@ r=output +, outputdir=TEMP ), 1)%0A
b0ef91a827650a23be819d6d4f1a99d096af8dfe
fix missing header tests
course_grader/test/dao/test_csv.py
course_grader/test/dao/test_csv.py
# Copyright 2021 UW-IT, University of Washington # SPDX-License-Identifier: Apache-2.0 from django.test import TestCase, override_settings from course_grader.dao.csv import InsensitiveDictReader, GradeImportCSV from course_grader.dao.section import get_section_by_label from course_grader.dao.person import PWS from course_grader.exceptions import InvalidCSV from uw_pws.util import fdao_pws_override from uw_sws.util import fdao_sws_override import os @fdao_sws_override @fdao_pws_override class CVSDAOFunctionsTest(TestCase): def setUp(self): self.resource_path = os.path.abspath(os.path.join( os.path.dirname(__file__), "..", "..", "resources", "csv")) def test_is_true(self): grade_import = GradeImportCSV() self.assertTrue(grade_import.is_true("1")) self.assertTrue(grade_import.is_true("Y")) self.assertTrue(grade_import.is_true("Yes")) self.assertTrue(grade_import.is_true("TRUE")) self.assertFalse(grade_import.is_true("0")) self.assertFalse(grade_import.is_true("N")) self.assertFalse(grade_import.is_true("NO")) self.assertFalse(grade_import.is_true("F")) self.assertFalse(grade_import.is_true("")) def test_validate(self): grade_import = GradeImportCSV() fileobj = open(os.path.join(self.resource_path, "test1.csv")) r = grade_import.validate(fileobj) self.assertEqual(grade_import.has_header, True) self.assertEqual(grade_import.dialect.delimiter, ",") fileobj = open(os.path.join(self.resource_path, "missing_header.csv")) self.assertRaisesRegex( InvalidCSV, "Missing \"Class Grade\" header$", grade_import.validate, fileobj) fileobj = open(os.path.join(self.resource_path, "missing_grade.csv")) self.assertRaisesRegex( InvalidCSV, "Missing \"Class Grade\" header$", grade_import.validate, fileobj) fileobj = open(os.path.join(self.resource_path, "large_header.csv")) r = grade_import.validate(fileobj) self.assertEqual(grade_import.has_header, True) self.assertEqual(grade_import.dialect.delimiter, ",") fileobj = open(os.path.join(self.resource_path, "unk_delimiter.csv")) r = grade_import.validate(fileobj) self.assertEqual(grade_import.has_header, True) self.assertEqual(grade_import.dialect.delimiter, ",") def test_grades_for_section(self): # Section/user do not matter here section = get_section_by_label("2013,spring,A B&C,101/A") user = PWS().get_person_by_regid("FBB38FE46A7C11D5A4AE0004AC494FFE") f = open(os.path.join(self.resource_path, "test1.csv")) r = GradeImportCSV().grades_for_section(section, user, fileobj=f) self.assertEqual(len(r.get("grades")), 6) self.assertEqual( len([g for g in r["grades"] if g["is_incomplete"] is True]), 2) self.assertEqual( len([g for g in r["grades"] if g["is_writing"] is True]), 2) f.close() f = open(os.path.join(self.resource_path, "test2.csv")) r = GradeImportCSV().grades_for_section(section, user, fileobj=f) self.assertEqual(len(r.get("grades")), 6) f.close() class InsensitiveDictReaderTest(CVSDAOFunctionsTest): def test_insensitive_dict_reader(self): f = open(os.path.join(self.resource_path, "test3.csv")) reader = InsensitiveDictReader(f) row = next(reader) self.assertEqual(row.get("Field1"), "ök1") self.assertEqual(row.get("Field2"), "øk2") self.assertEqual(row.get("Field3"), "ok3") self.assertEqual(row.get("Field4"), "ok4") self.assertEqual(row.get("Field 5", "Field5"), "ok5") self.assertEqual(row.get("Field6", "field 6"), "") self.assertEqual(row.get("Field7"), "") self.assertEqual(row.get("Field8"), None) f.close()
Python
0.000005
@@ -1632,32 +1632,36 @@ ex(%0A + InvalidCSV, %22Mis @@ -1665,38 +1665,29 @@ Missing -%5C%22Class Grade%5C%22 he +header: gr ade -r $%22,%0A @@ -1841,32 +1841,36 @@ ex(%0A + InvalidCSV, %22Mis @@ -1878,30 +1878,21 @@ ing -%5C%22Class Grade%5C%22 he +header: gr ade -r $%22,%0A
2ed7986375cb9acf0f32e61b466178081c75cc24
Fix test for Python 3
tests/test_net.py
tests/test_net.py
import unittest import urllib2 import oembed class EndpointTest(unittest.TestCase): def testInit(self): #plain init ep = oembed.OEmbedEndpoint('http://www.flickr.com/services/oembed') self.assertEqual(len(ep.getUrlSchemes()), 0) #init with schemes ep = oembed.OEmbedEndpoint('http://www.flickr.com/services/oembed',\ ['http://*.flickr.com/*',\ 'http://flickr.com/*']) self.assertEqual(len(ep.getUrlSchemes()), 2) def testUrlScheme(self): ep = oembed.OEmbedEndpoint('http://www.flickr.com/services/oembed') #add some schemes ep.addUrlScheme('http://flickr.com/*') ep.addUrlScheme('http://*.flickr.com/*') self.assertEqual(len(ep.getUrlSchemes()), 2) #add duplicated ep.addUrlScheme('http://*.flickr.com/*') self.assertEqual(len(ep.getUrlSchemes()), 2) #remove url ep.addUrlScheme('http://*.flickr.com/') ep.delUrlScheme('http://flickr.com/*') self.assertEqual(len(ep.getUrlSchemes()), 2) #clear all ep.clearUrlSchemes() self.assertEqual(len(ep.getUrlSchemes()), 0) class UrlSchemeTest(unittest.TestCase): def testInit(self): scheme = oembed.OEmbedUrlScheme('http://*.flickr.com/*') self.assertEqual(scheme.getUrl(), 'http://*.flickr.com/*') self.assertTrue(scheme.match('http://www.flickr.com/photos/wizardbt/2584979382/')) self.assertFalse(scheme.match('http://flickr.com/photos/wizardbt/2584979382/')) self.assertFalse(scheme.match('http://flick.com/photos/wizardbt/2584979382/')) self.assertFalse(scheme.match('https://flickr.com/photos/wizardbt/2584979382/')) self.assertFalse(scheme.match('flickr.com/photos/wizardbt/2584979382/')) self.assertFalse(scheme.match('http://flickr/photos/wizardbt/2584979382/')) self.assertFalse(scheme.match('http://conflickr.com/')) class ConsumerTest(unittest.TestCase): def testGettersAndSetters(self): consumer = oembed.OEmbedConsumer() ep1 = oembed.OEmbedEndpoint('http://www.flickr.com/services/oembed') ep2 = oembed.OEmbedEndpoint('http://api.pownce.com/2.1/oembed.{format}') ep3 = oembed.OEmbedEndpoint('http://www.vimeo.com/api/oembed.{format}') #adding consumer.addEndpoint(ep1) consumer.addEndpoint(ep2) consumer.addEndpoint(ep3) self.assertEqual(len(consumer.getEndpoints()), 3) #removing one consumer.delEndpoint(ep2) self.assertEqual(len(consumer.getEndpoints()), 2) #clearing all! consumer.clearEndpoints() self.assertEqual(len(consumer.getEndpoints()), 0) def testEmbed(self): consumer = oembed.OEmbedConsumer() ep1 = oembed.OEmbedEndpoint('http://www.flickr.com/services/oembed') ep1.addUrlScheme('http://*.flickr.com/*') ep2 = oembed.OEmbedEndpoint('http://api.pownce.com/2.1/oembed.{format}') ep2.addUrlScheme('http://*.pownce.com/*') consumer.addEndpoint(ep1) consumer.addEndpoint(ep2) #invalid format self.assertRaises(oembed.OEmbedInvalidRequest, consumer.embed, \ 'http://www.flickr.com/photos/wizardbt/2584979382/', \ format='text') #no matching endpoint for the url self.assertRaises(oembed.OEmbedNoEndpoint, consumer.embed, \ 'http://google.com/123456') def testResponses(self): consumer = oembed.OEmbedConsumer() ep = oembed.OEmbedEndpoint('http://www.flickr.com/services/oembed') ep.addUrlScheme('http://*.flickr.com/*') consumer.addEndpoint(ep) #json resp = consumer.embed('http://www.flickr.com/photos/wizardbt/2584979382/', format='json') #xml resp = consumer.embed('http://www.flickr.com/photos/wizardbt/2584979382/', format='xml') #resource not found self.assertRaises(urllib2.HTTPError, consumer.embed, \ 'http://www.flickr.com/photos/wizardbt/', \ format='json') def testNoEndpoints(self): consumer = oembed.OEmbedConsumer() self.assertRaises(oembed.OEmbedNoEndpoint, consumer.embed, \ 'http://www.flickr.com/photos/wizardbt/2584979382/') def testBrokenEndpoint(self): consumer = oembed.OEmbedConsumer() ep = oembed.OEmbedEndpoint('http://localhost') ep.addUrlScheme('http://localhost/*') consumer.addEndpoint(ep) self.assertRaises(urllib2.URLError, consumer.embed, \ 'http://localhost/test') def suite(): suite = unittest.TestSuite() suite.addTests(unittest.makeSuite(EndpointTest)) suite.addTests(unittest.makeSuite(UrlSchemeTest)) suite.addTests(unittest.makeSuite(ConsumerTest)) return suite if __name__ == '__main__': unittest.main()
Python
0.998138
@@ -20,29 +20,118 @@ ort -urllib2%0Aimport oembed +oembed%0A%0Atry:%0A import urllib.request as urllib2 # Python 3%0Aexcept ImportError:%0A import urllib2 # Python 2 %0A%0A%0Ac @@ -5387,8 +5387,9 @@ t.main() +%0A
ba0923dc8a38bfa3072f74ed1943e9859874e3a8
use timezone in calculating current time
batch.py
batch.py
from datetime import datetime, timedelta import json import celery from emails import send_email from pytz import timezone import requests import stripe from salesforce import SalesforceConnection from config import STRIPE_KEYS from config import ACCOUNTING_MAIL_RECIPIENT from config import TIMEZONE zone = timezone(TIMEZONE) stripe.api_key = STRIPE_KEYS['secret_key'] class Log(object): """ This encapulates sending to the console/stdout and email all in one. """ def __init__(self): self.log = list() def it(self, string): """ Add something to the log. """ print(string) self.log.append(string) def send(self): """ Send the assembled log out as an email. """ body = '\n'.join(self.log) recipient = ACCOUNTING_MAIL_RECIPIENT subject = 'Batch run' send_email(body=body, recipient=recipient, subject=subject) def amount_to_charge(entry): """ Determine the amount to charge. This depends on whether the payer agreed to pay fees or not. If they did then we add that to the amount charged. Stripe charges 2.9% + $0.30. Stripe wants the amount to charge in cents. So we multiply by 100 and return that. """ amount = int(entry['Amount']) if entry['Stripe_Agreed_to_pay_fees__c']: fees = amount * .029 + .30 else: fees = 0 total = amount + fees total_in_cents = total * 100 return int(total_in_cents) def process_charges(query, log): print(query) sf = SalesforceConnection() response = sf.query(query) # TODO: check response code log.it('Found {} opportunities available to process.'.format( len(response))) for item in response: # print (item) amount = amount_to_charge(item) try: log.it("---- Charging ${} to {} ({})".format(amount / 100, item['Stripe_Customer_ID__c'], item['Name'])) charge = stripe.Charge.create( customer=item['Stripe_Customer_ID__c'], amount=amount, currency='usd', description=item['Description'], ) except stripe.error.CardError as e: log.it("The card has been declined: {} ({})".format(e, e.json_body['error']['decline_code'])) continue except stripe.error.InvalidRequestError as e: log.it("Problem: {}".format(e)) continue # print ('Charge: {}'.format(charge)) # TODO: check for success # TODO: catch other errors # print ("Charge id: {}".format(charge.id)) update = { 'Stripe_Transaction_Id__c': charge.id, 'Stripe_Card__c': charge.source.id, 'StageName': 'Closed Won', } path = item['attributes']['url'] url = '{}{}'.format(sf.instance_url, path) # print (url) resp = requests.patch(url, headers=sf.headers, data=json.dumps(update)) # TODO: check 'errors' and 'success' too # print (resp) if resp.status_code == 204: log.it("ok") else: log.it("problem") raise Exception('problem') @celery.task() def charge_cards(): log = Log() log.it('---Starting batch job...') three_days_ago = (datetime.now() - timedelta(days=3)).strftime('%Y-%m-%d') today = datetime.now().strftime('%Y-%m-%d') # regular (non Circle) pledges: log.it('---Processing regular charges...') query = """ SELECT Amount, Name, Stripe_Customer_Id__c, Description, Stripe_Agreed_to_pay_fees__c FROM Opportunity WHERE CloseDate <= {} AND CloseDate >= {} AND StageName = 'Pledged' AND Stripe_Customer_Id__c != '' AND Type != 'Giving Circle' """.format(today, three_days_ago) process_charges(query, log) # # Circle transactions are different from the others. The Close Dates for a # given Circle donation are all identical. That's so that the gift can be # recognized all at once on the donor wall. So we use another field to # determine when the card is actually charged: # Giving_Circle_Expected_Giving_Date__c. So we process charges separately # for Circles. # log.it('---Processing Circle charges...') query = """ SELECT Amount, Name, Stripe_Customer_Id__c, Description, Stripe_Agreed_to_pay_fees__c FROM Opportunity WHERE Giving_Circle_Expected_Giving_Date__c <= {} AND Giving_Circle_Expected_Giving_Date__c >= {} AND StageName = 'Pledged' AND Stripe_Customer_Id__c != '' AND Type = 'Giving Circle' """.format(today, three_days_ago) process_charges(query, log) log.send() if __name__ == '__main__': charge_cards()
Python
0.000155
@@ -3439,16 +3439,23 @@ ime.now( +tz=zone ) - time @@ -3460,16 +3460,25 @@ medelta( +%0A days=3)) @@ -3524,16 +3524,23 @@ ime.now( +tz=zone ).strfti
158eea6914360f45b456c2a4a15f9c9bcd26beca
fix incorrect test name
tests/test_svm.py
tests/test_svm.py
import unittest from context import lily from lily import svm, optimizer from numpy import mat import logging logging.basicConfig(level=logging.WARNING, format="%(lineno)d\t%(message)s") class TestSvm(unittest.TestCase): def setUp(self): self.tolerance = 0.001 self.C = 0.6 self.iterations = 40 self.data_matrix = [] self.label_matrix = [] self.test_data = [[-0.017612, 14.053064, 0], [-1.395634, 4.662541, 1], [-0.752157, 6.538620, 0], [-1.322371, 7.152853, 0], [0.423363, 11.054677, 0], [0.406704, 7.067335, 1], [0.667394, 12.741452, 0], [-2.460150, 6.866805, 1], [0.569411, 9.548755, 0], [-0.026632, 10.427743, 0], [0.850433, 6.920334, 1], [1.347183, 13.175500, 0], [1.176813, 3.167020, 1], [-1.781871, 9.097953, 0], [-0.566606, 5.749003, 1]] for line in self.test_data: self.data_matrix.append([float(line[0]), float(line[1])]) self.label_matrix.append(float(line[2])) self.os = optimizer.Optimizer(mat(self.data_matrix), mat(self.label_matrix).transpose(), self.C, self.tolerance) def test_calculate_ek(self): """svm - calculate_ek calculates E value for a given alpha""" for k in range(len(self.test_data)): ek = svm.calculate_ek(self.os, k) self.assertEqual(ek, -1.0) def test_clip_alpha_greater_than_h(self): """svm - clip_alpha returns H when alpha greater than H""" alpha = 8 H = 6 L = 5 self.assertEqual(svm.clip_alpha(alpha, H, L), 6) def test_clip_alpha_less_than_l(self): """svm - clip_alpha returns L when alpha less than L""" alpha = 8 H = 6 L = 7 self.assertEqual(svm.clip_alpha(alpha, H, L), 7) def test_select_j_rand_doesnt_select_i(self): """svm - select_j_rand does not select i""" i = 4 m = 76 self.assertNotEqual(svm.select_j_rand(i, m), i) def test_needs_optimization_returns_false_for_low_ei(self): """ svm - needs_optimization returns false for small nonneg ei """ self.assertFalse(svm.needs_optimization(self.os, 5, 0.1)) def test_needs_optimization_returns_false_for_high_ei(self): """ svm - needs_optimization returns true for small neg ei """ self.assertTrue(svm.needs_optimization(self.os, 5, -5.1)) if __name__ == '__main__': unittest.main()
Python
0.9998
@@ -1412,16 +1412,21 @@ + mat(self @@ -1490,16 +1490,21 @@ + self.C,%0A @@ -1499,16 +1499,21 @@ self.C,%0A + @@ -2667,22 +2667,20 @@ rns_ -fals +tru e_for_ -high +neg _ei(
e94b2593424518632c704f4a440df3bc51cbcd3e
fix failing tests.
tests/test_uri.py
tests/test_uri.py
# encoding: utf-8 import unittest from resources import URI from resources import IRI class TestURISnowman(unittest.TestCase): def setUp(self): uri = "http://u:p@" + "www.\N{SNOWMAN}".encode('idna') + ":80/path" self.uri = URI(uri) def testFail(self): self.assertRaises(TypeError, URI, u"http://\u2603/") def test_repr(self): expect = "URI('http://www.xn--n3h/path', encoding='idna')".encode('ascii') self.assertEquals(repr(self.uri), expect) def test_netloc(self): expect = "u:p@www.xn--n3h:80".encode('ascii') self.assertEquals(self.uri.netloc, expect) def test_hostname(self): expect = "www.xn--n3h".encode('ascii') self.assertEquals(self.uri.hostname, expect) def test_port(self): expect = "80" self.assertEquals(self.uri.port, expect) def test_path(self): expect = "/path".encode('ascii') self.assertEquals(self.uri.path, expect)
Python
0.000001
@@ -157,66 +157,77 @@ -uri = %22http://u:p@%22 + %22www.%5CN%7BSNOWMAN%7D%22.encode('idna') + %22 +idna = u%22%5CN%7BSNOWMAN%7D%22.encode('idna')%0A uri = %22http://u:p@www.%25s :80/ @@ -231,16 +231,23 @@ 80/path%22 + %25 idna %0A
4104ea04d75b400e7a2a4d71c259ceb0957f8992
include the absolute url to the onsite page
crate_project/apps/packages/api.py
crate_project/apps/packages/api.py
from tastypie import fields from tastypie.resources import ModelResource from packages.models import Package, Release class PackageResource(ModelResource): releases = fields.ToManyField("packages.api.ReleaseResource", "releases") class Meta: allowed_methods = ["get"] queryset = Package.objects.all() resource_name = "package" class ReleaseResource(ModelResource): package = fields.ForeignKey(PackageResource, "package") class Meta: allowed_methods = ["get"] fields = [ "author", "author_email", "created", "description", "download_uri", "license", "maintainer", "maintainer_email", "package", "platform", "requires_python", "summary", "version" ] queryset = Release.objects.all() resource_name = "release"
Python
0
@@ -273,32 +273,68 @@ thods = %5B%22get%22%5D%0A + include_absolute_url = True%0A queryset @@ -816,16 +816,52 @@ %5D%0A + include_absolute_url = True%0A
ebcf003dfaa8b2c375dfbecfe179665a9d0b339c
Print arguments
bench.py
bench.py
#!/usr/bin/env python import argparse import uuid import subprocess import os import re import time class Failed(Exception): pass def launch_pachyderm(args, env): try: manifest = subprocess.check_output("make google-cluster-manifest", env=env, shell=True) except subprocess.CalledProcessError: raise Failed() # Use the user-specified images manifest = re.sub('"pachyderm/pachd:.+"', '"{}"'.format(args.pachd_image), manifest) manifest = re.sub('"pachyderm/job-shim:.+"', '"{}"'.format(args.job_shim_image), manifest) tmp_manifest = '/tmp/pachyderm_benchmark_manifest' with open(tmp_manifest, 'w') as f: f.write(manifest) # deploy pachyderm if subprocess.call('kubectl create -f {}'.format(tmp_manifest), shell=True) != 0: raise Failed() # scale pachd if subprocess.call('kubectl scale rc pachd --replicas={}'.format(args.cluster_size), shell=True) != 0: raise Failed() # wait for all pachd nodes to be ready while subprocess.call("etc/kube/check_pachd_ready.sh") != 0: time.sleep(5) def create_cluster(env): if subprocess.call("make google-cluster", env=env, shell=True) != 0: raise Failed() def clean_cluster(env): if subprocess.call("make clean-google-cluster", env=env, shell=True) != 0: raise Failed() def run_benchmark(env): if subprocess.call('kubectl run -t -i bench --image="{}" --restart=Never -- go test ./src/server -run=XXX -bench={}'.format(args.pachyderm_compile_image, args.benchmark), shell=True) != 0: raise Failed() def gce(args): env = os.environ.copy() env['CLUSTER_NAME'] = args.cluster_name env['CLUSTER_SIZE'] = str(args.cluster_size) env['BUCKET_NAME'] = args.bucket_name env['STORAGE_NAME'] = args.volume_name env['STORAGE_SIZE'] = '10' # defaults to 10GB try: create_cluster(env) launch_pachyderm(args, env) run_benchmark(env) clean_cluster(env) except Failed: print("something went wrong... removing the cluster...") clean_cluster(env) def aws(args): print('AWS benchmark is not currently supported') if __name__ == '__main__': parser = argparse.ArgumentParser(description='Run a Pachyderm benchmark on a cloud provider.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--provider', default='GCE', choices=['GCE', 'AWS'], help='the cloud provider to run the benchmark on.') parser.add_argument('--cluster-name', default='pachyderm-benchmark-{}'.format(str(uuid.uuid4())[:8]), help="the name of the cluster to run the benchmark on; one will be created.") parser.add_argument('--cluster-size', default=4, type=int, help='the number of nodes to run the benchmark on.') parser.add_argument('--bucket-name', default='pachyderm-benchmark-bucket-{}'.format(uuid.uuid4()), help='the GCS/S3 bucket to use with the benchmark; one will be created.') parser.add_argument('--volume-name', default='pachyderm-benchmark-volume-{}'.format(uuid.uuid4()), help='the persistent volume to use with the benchmark; one will be created.') parser.add_argument('--benchmark', default='.', help='a regex expression that specifies the benchmark to run; runs all benchmarks by default.') parser.add_argument('--runs', default=1, type=int, help='how many times the benchmark runs.') parser.add_argument('--pachd-image', default="pachyderm/pachd:latest", help='the pachd image to use.') parser.add_argument('--job-shim-image', default="pachyderm/job-shim:latest", help='the job-shim image to use.') parser.add_argument('--pachyderm-compile-image', default="pachyderm/pachyderm-compile:latest", help='the pachyderm-compile image to use.') args = parser.parse_args() if args.provider == 'GCE': gce(args) elif args.provider == 'AWS': aws(args)
Python
0.000942
@@ -3771,24 +3771,106 @@ rse_args()%0A%0A + print(%22running the benchmark with the following arguments:%22)%0A print(args)%0A%0A if args.
6bc3e784828c1f339ab4fd5fe3ca6dc80a07bb46
Enable logs
crawler/tasks.py
crawler/tasks.py
from __future__ import absolute_import, unicode_literals from .celery import app # from celery.utils.log import get_task_logger from .crawler import crawl_url # logger = get_task_logger(__name__) @app.task(rate_limit="6/s", queue='crawler') def crawl_url_task(url, value): crawl_url(url, value) # response, status, redirected = crawl_url(url) # if response is not None: # logger.info(str(url) + " | " + str(response.status_code) + " | " + str(response.reason) + # " | " + str(response.headers['Content-Type']) + " | " + str(status) + " | Redirected: " + str(redirected)) # else: # logger.info(url + " | " + str(status) + " | Redirected: " + str(redirected))
Python
0.000001
@@ -74,18 +74,16 @@ ort app%0A -# from cel @@ -151,18 +151,16 @@ wl_url%0A%0A -# logger = @@ -294,18 +294,16 @@ lue)%0A - # respons @@ -344,18 +344,16 @@ url)%0A - # if resp @@ -369,26 +369,24 @@ ot None:%0A - # logger. @@ -467,26 +467,24 @@ eason) +%0A - # @@ -598,18 +598,16 @@ ed))%0A - # else:%0A @@ -612,10 +612,8 @@ %0A - #
89d70f5794969cb8d71201504b8645a8359f5b70
read config file strings as unicode
credentials/settings/production.py
credentials/settings/production.py
from os import environ import yaml from credentials.settings.base import * from credentials.settings.utils import get_env_setting, get_logger_config DEBUG = False TEMPLATE_DEBUG = DEBUG ALLOWED_HOSTS = ['*'] LOGGING = get_logger_config() # Keep track of the names of settings that represent dicts. Instead of overriding the values in base.py, # the values read from disk should UPDATE the pre-configured dicts. DICT_UPDATE_KEYS = ('JWT_AUTH',) # AMAZON S3 STORAGE CONFIGURATION # See: https://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings # This may be overridden by the yaml in CREDENTIALS_CFG, but it should # be here as a default. FILE_STORAGE_BACKEND = {} CONFIG_FILE = get_env_setting('CREDENTIALS_CFG') with open(CONFIG_FILE) as f: config_from_yaml = yaml.load(f) # Remove the items that should be used to update dicts, and apply them separately rather # than pumping them into the local vars. dict_updates = {key: config_from_yaml.pop(key, None) for key in DICT_UPDATE_KEYS} for key, value in list(dict_updates.items()): if value: vars()[key].update(value) vars().update(config_from_yaml) # Load the files storage backend settings for django storages vars().update(FILE_STORAGE_BACKEND) if 'EXTRA_APPS' in locals(): INSTALLED_APPS += EXTRA_APPS DB_OVERRIDES = dict( PASSWORD=environ.get('DB_MIGRATION_PASS', DATABASES['default']['PASSWORD']), ENGINE=environ.get('DB_MIGRATION_ENGINE', DATABASES['default']['ENGINE']), USER=environ.get('DB_MIGRATION_USER', DATABASES['default']['USER']), NAME=environ.get('DB_MIGRATION_NAME', DATABASES['default']['NAME']), HOST=environ.get('DB_MIGRATION_HOST', DATABASES['default']['HOST']), PORT=environ.get('DB_MIGRATION_PORT', DATABASES['default']['PORT']), ) for override, value in DB_OVERRIDES.items(): DATABASES['default'][override] = value
Python
0.000344
@@ -761,16 +761,34 @@ FIG_FILE +, encoding='utf-8' ) as f:%0A
8257411a58f03d8a353129f2813cbc516a0e40c6
Make sure API tests are registered
editorsnotes/api/tests/__init__.py
editorsnotes/api/tests/__init__.py
Python
0
@@ -0,0 +1,48 @@ +from .serializers import *%0Afrom .views import *%0A
fa0b12cc00da83c5dfce903a0f118dd609c213dd
Fix for windows path to templates
imports/cisco_tools.py
imports/cisco_tools.py
# ############################### MODULE INFO ################################ # Author: Jamie Caesar # Email: jcaesar@presidio.com # # !!!! NOTE: THIS IS NOT A SCRIPT THAT CAN BE RUN IN SECURECRT. !!!! # # This is a Python module that contains commonly used functions for processing the output of Cisco show commands. # These functions do not require the session data to interact with SecureCRT sessions. # # # ################################# IMPORTS ################################## import re import os from imports.google import ipaddress from imports.google import textfsm def parse_with_textfsm(raw_output, template_path, add_header=True): # Create file object to the TextFSM template and create TextFSM object. with open(template_path, 'r') as template: fsm_table = textfsm.TextFSM(template) # Process our raw data vs the template with TextFSM output = fsm_table.ParseText(raw_output) # Insert a header row into the list, so that when output to a CSV there is a header row. if add_header: output.insert(0, fsm_table.header) return output def update_empty_interfaces(route_table): def recursive_lookup(nexthop): for network in connected: if nexthop in network: return connected[network] for network in statics: if nexthop in network: return recursive_lookup(statics[network]) return None connected = {} unknowns = {} statics = {} for route in route_table: if route['protocol'][0] == 'C' or 'direct' in route['protocol']: connected[route['network']] = route['interface'] if route['protocol'][0] == 'S' or 'static' in route['protocol']: statics[route['network']] = route['nexthop'] if route['nexthop'] and not route['interface']: unknowns[route['nexthop']] = None for nexthop in unknowns: unknowns[nexthop] = recursive_lookup(nexthop) for route in route_table: if not route['interface']: if route['nexthop'] in unknowns: route['interface'] = unknowns[route['nexthop']] def parse_routes(session, routes): """ This function will take the raw route table from a devices (and a supported OS), process it with TextFSM, which will return a list of lists. Each sub-list in the TextFSM output represents a route entry. Each of these entries will be converted into a dictionary so that each item can be referenced by name (used in nexthop_summary) :param session: Sessions data structure :param routes: raw 'show ip route' output :return: A list of dictionaries, with each dict representing a route. """ script_dir = session['settings']['script_dir'] if session['OS'] == "IOS": template_file = os.path.join(script_dir, "textfsm-templates/show-ip-route-ios") elif session['OS'] == "NX-OS": template_file = os.path.join(script_dir, "textfsm-templates/show-ip-route-nxos") else: return [] route_list = parse_with_textfsm(routes, template_file, add_header=False) route_table = [] for route in route_list: nexthop = route[6] if nexthop != '': print nexthop nexthop = ipaddress.ip_address(unicode(route[6])) else: nexthop = None route_entry = {"protocol": route[0], "network": ipaddress.ip_network(u"{0}{1}".format(route[2], route[3])), "AD": route[4], "metric": route[5], "nexthop": nexthop, "lifetime": route[8], "interface": route[7] } route_table.append(route_entry) update_empty_interfaces(route_table) return route_table def get_protocol(raw_protocol): if raw_protocol[0] == 'S' or "static" in raw_protocol: return 'Static' elif raw_protocol[0] == 'C' or 'direct' in raw_protocol: return 'Connected' elif raw_protocol[0] == 'D' or "eigrp" in raw_protocol: return 'EIGRP' elif raw_protocol[0] == 'O' or "ospf" in raw_protocol: return 'OSPF' elif raw_protocol[0] == 'B' or "bgp" in raw_protocol: return 'BGP' elif raw_protocol[0] == 'i' or "isis" in raw_protocol: return 'ISIS' elif raw_protocol[0] == 'R' or "rip" in raw_protocol: return 'RIP' else: return 'Other' def short_int_name(str): """ This function shortens the interface name for easier reading :param str: The input string (long interface name) :return: The shortened interface name """ replace_pairs = [ ('fortygigabitethernet', 'Fo'), ('tengigabitethernet', 'Te'), ('gigabitethernet', 'Gi'), ('fastethernet', 'F'), ('ethernet', 'e'), ('eth', 'e'), ('port-channel', 'Po') ] lower_str = str.lower() for pair in replace_pairs: if pair[0] in lower_str: return lower_str.replace(pair[0], pair[1]) else: return str def long_int_name(int_name): """ This function expands a short interface name to the full name :param str: The input string (short interface name) :return: The shortened interface name """ replace_pairs = [ ('Fo', 'FortyGigabitEthernet'), ('Te', 'TenGigabitEthernet'), ('Gi', 'gigabitethernet'), ('F', 'FastEthernet'), ('Eth', 'Ethernet'), ('e', 'Ethernet'), ('Po', 'port-channel') ] for pair in replace_pairs: if pair[0] in int_name: return int_name.replace(pair[0], pair[1]) else: return str def extract_system_name(device_id): cisco_serial_format = r'[A-Z]{3}[A-Z0-9]{8}' ip_format = r'\d{1-3}\.\d{1-3}\.\d{1-3}\.\d{1-3}' re_serial = re.compile(cisco_serial_format) re_ip = re.compile(ip_format) # If we find an open paren, then we either have "SYSTEM_NAME(SERIAL)" or "SERIAL(SYSTEM-NAME)" format. The latter # format is often seen in older devices. Determine which is the system_name by matching regex for a Cisco serial. if "(" in device_id: left, right = device_id.split('(') right = right.strip(')') left_serial = re_serial.match(left) right_serial = re_serial.match(right) if right_serial: system_name = left elif left_serial: system_name = right else: system_name = device_id else: system_name = device_id # If FQDN, only take the host portion, otherwise return what we have. if "." in system_name: is_ip = re_ip.match(system_name) # Some device return IP as device_id. In those cases, just return IP -- don't treat it like FQDN if is_ip: return system_name else: return system_name.split('.')[0] else: return system_name
Python
0
@@ -3035,16 +3035,163 @@ urn %5B%5D%0A%0A + # Normalize path before attempting to access (e.g. change slash to backslash for windows.)%0A template_file = os.path.normpath(template_file)%0A rout
314de0fd750e582f5156c29d623d9362f5c037e3
Add missing "file=sys.stdout" [skip ci]
test_utils.py
test_utils.py
import _thread import docker import os import socket import sys from http.server import HTTPServer, SimpleHTTPRequestHandler class TestFixtureServer(object): def __init__(self): self.port = 9999 self.ip = self.get_python_server_ip() def get_python_server_ip(self): # https://stackoverflow.com/a/166589 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) host_ip = s.getsockname()[0] s.close() return host_ip def _start_server(self): server = HTTPServer((self.ip, self.port), SimpleHTTPRequestHandler) server.serve_forever() def start_server_in_background(self): print( "Starting Test Fixture Server on: http://{}:{}".format( self.ip, self.port ), file=sys.stdout ) # start the server in a background thread _thread.start_new_thread(self._start_server, ()) class TestContainerRunner(object): def __init__(self): self.client = docker.from_env() self.container_name = os.environ["CONTAINER_NAME"] self.image_name = "image-" + os.environ["STAMP"] self.repository = os.environ["REPO"] self.containers = [] self.test_fixture_server = TestFixtureServer() self.test_fixture_server.start_server_in_background() self.outer_volume_path = "/tmp/" + self.container_name self.inner_volume_path = "/refinery-data" self._pull_image() self._build_image() def __enter__(self): self.run() def __exit__(self, *args): if not os.environ.get("CONTINUOUS_INTEGRATION"): self.docker_cleanup() def _pull_image(self): print("Pulling image: {}".format(self.image_name), file=sys.stdout) self.client.images.pull(self.repository) def _build_image(self): print("Building image: {}".format(self.image_name)) self.client.images.build( path=".", tag=self.image_name, rm=True, forcerm=True, cache_from=[self.repository] ) def run(self): print("Creating container: {}".format(self.container_name), file=sys.stdout) container = self.client.containers.run( self.image_name, detach=True, name=self.container_name, environment={ "INPUT_JSON_URL": "http://{}:{}/test-data/input.json".format( self.test_fixture_server.ip, self.test_fixture_server.port ) }, ports={"80/tcp": None}, publish_all_ports=True, extra_hosts={socket.gethostname(): self.test_fixture_server.ip}, volumes={ self.outer_volume_path: { 'bind': self.inner_volume_path, 'mode': 'rw' } } ) self.containers.append(container) def docker_cleanup(self): print("Cleaning up TestContainerRunner containers/images...", file=sys.stdout) for container in self.containers: container.remove(force=True, v=True) self.client.images.remove(self.image_name)
Python
0.000001
@@ -1940,16 +1940,33 @@ ge_name) +, file=sys.stdout )%0A
e1c750b1e66e8eae4a8558a3dac6bb8038d84535
Fix auto
peewee_migrate/auto.py
peewee_migrate/auto.py
from collections import Hashable, OrderedDict import peewee as pw from playhouse.reflection import Column as VanilaColumn INDENT = ' ' NEWLINE = '\n' + INDENT FIELD_TO_PARAMS = { pw.CharField: lambda f: {'max_length': f.max_length}, pw.DecimalField: lambda f: { 'max_digits': f.max_digits, 'decimal_places': f.decimal_places, 'auto_round': f.auto_round, 'rounding': f.rounding}, pw.ForeignKeyField: lambda f: { 'related_name': f.related_name, 'to_field': f.to_field.name }, } class Column(VanilaColumn): def __init__(self, field, migrator=None): self.name = field.name self.field_class = type(field) self.nullable = field.null self.primary_key = field.primary_key self.db_column = field.db_column self.index = field.index self.unique = field.unique self.params = {} if field.default is not None and not callable(field.default): self.params['default'] = field.default if self.field_class in FIELD_TO_PARAMS: self.params.update(FIELD_TO_PARAMS[self.field_class](field)) self.rel_model = None self.related_name = None self.to_field = None if isinstance(field, pw.ForeignKeyField) and migrator and \ field.rel_model._meta.name in migrator.orm: self.rel_model = "migrator.orm['%s']" % field.rel_model._meta.name def get_field_parameters(self): params = super(Column, self).get_field_parameters() params.update({k: repr(v) for k, v in self.params.items()}) return params def get_field(self, space=' '): # Generate the field definition for this column. field_params = self.get_field_parameters() param_str = ', '.join('%s=%s' % (k, v) for k, v in sorted(field_params.items())) return '{name}{space}={space}pw.{classname}({params})'.format( name=self.name, space=space, classname=self.field_class.__name__, params=param_str) def diff_one(model1, model2, **kwargs): """Find difference between Peewee models.""" changes = [] fields1 = model1._meta.fields fields2 = model2._meta.fields # Add fields names1 = set(fields1) - set(fields2) if names1: fields = [fields1[name] for name in names1] changes.append(create_fields(model1, *fields, **kwargs)) # Drop fields names2 = set(fields2) - set(fields1) if names2: changes.append(drop_fields(model1, *names2)) # Change fields fields_ = [] for name in set(fields1) - names1 - names2: field1, field2 = fields1[name], fields2[name] if compare_fields(field1, field2): fields_.append(field1) if fields_: changes.append(change_fields(model1, *fields_, **kwargs)) return changes def diff_many(models1, models2, migrator=None, reverse=False): models1 = pw.sort_models_topologically(models1) models2 = pw.sort_models_topologically(models2) if reverse: models1 = reversed(models1) models2 = reversed(models2) models1 = OrderedDict([(m._meta.name, m) for m in models1]) models2 = OrderedDict([(m._meta.name, m) for m in models2]) changes = [] # Add models for name in [m for m in models1 if m not in models2]: changes.append(create_model(models1[name], migrator=migrator)) # Remove models for name in [m for m in models2 if m not in models1]: changes.append(remove_model(models2[name])) for name, model1 in models1.items(): if name not in models2: continue changes += diff_one(model1, models2[name], migrator=migrator) return changes def model_to_code(Model, **kwargs): template = """class {classname}(pw.Model): {fields} """ fields = INDENT + NEWLINE.join([ field_to_code(field, **kwargs) for field in Model._meta.sorted_fields if not (isinstance(field, pw.PrimaryKeyField) and field.name == 'id') ]) return template.format( classname=Model.__name__, fields=fields ) def create_model(Model, **kwargs): return '@migrator.create_model\n' + model_to_code(Model, **kwargs) def remove_model(Model, **kwargs): return "migrator.remove_model('%s')" % Model._meta.db_table def create_fields(Model, *fields, **kwargs): return "migrator.add_fields(%s'%s', %s)" % ( NEWLINE, Model._meta.db_table, NEWLINE + (',' + NEWLINE).join([field_to_code(field, False, **kwargs) for field in fields]) ) def drop_fields(Model, *fields, **kwargs): return "migrator.remove_fields('%s', %s)" % ( Model._meta.db_table, ', '.join(map(repr, fields)) ) def field_to_code(field, space=True, **kwargs): col = Column(field, **kwargs) return col.get_field(' ' if space else '') def compare_fields(field1, field2, **kwargs): field_cls1, field_cls2 = type(field1), type(field2) if field_cls1 != field_cls2: # noqa return True params1 = field_to_params(field1) params2 = field_to_params(field2) return set(params1.items()) - set(params2.items()) def field_to_params(field, **kwargs): params = FIELD_TO_PARAMS.get(type(field), lambda f: {})(field) if field.default is not None and \ not callable(field.default) and \ isinstance(field.default, Hashable): params['default'] = field.default return params def change_fields(Model, *fields, **kwargs): return "migrator.change_fields('%s', %s)" % ( Model._meta.db_table, (',' + NEWLINE).join([field_to_code(f, False) for f in fields]) )
Python
0.000002
@@ -1265,46 +1265,37 @@ eld) - and migrator and %5C%0A +:%0A if migrator and fi @@ -1336,16 +1336,20 @@ or.orm:%0A + @@ -1418,16 +1418,92 @@ eta.name +%0A else:%0A self.rel_model = field.rel_model.__name__ %0A%0A de
46b9c0a1d4941c6d3dacb18f5bfc08c8fc09f2b7
add migration filter to multi reports
custom/enikshay/reports/generic.py
custom/enikshay/reports/generic.py
from collections import namedtuple from sqlagg.filters import IN, AND, GTE, LT, RawFilter, EQ from corehq.apps.reports.filters.dates import DatespanFilter from corehq.apps.reports.generic import GenericReportView from corehq.apps.reports.sqlreport import SqlTabularReport, SqlData from corehq.apps.reports.standard import CustomProjectReport, DatespanMixin from corehq.apps.reports.util import get_INFilter_bindparams from corehq.apps.userreports.util import get_table_name from corehq.sql_db.connections import UCR_ENGINE_ID from custom.enikshay.reports.filters import EnikshayLocationFilter, QuarterFilter, EnikshayMigrationFilter from custom.utils.utils import clean_IN_filter_value TABLE_ID = 'episode' EnikshayReportConfig = namedtuple('ReportConfig', ['domain', 'locations_id', 'is_migrated', 'start_date', 'end_date']) class MultiReport(CustomProjectReport, GenericReportView): report_template_path = 'enikshay/multi_report.html' @property def reports(self): raise NotImplementedError() @property def report_context(self): context = { 'reports': [] } for report in self.reports: report.fields = self.fields report_instance = report(self.request, domain=self.domain) report_context = report_instance.report_context report_table = report_context.get('report_table', {}) report_table['slug'] = report_instance.slug context['reports'].append({ 'report': report_instance.context.get('report', {}), 'report_table': report_table }) return context class EnikshayMultiReport(MultiReport): fields = (DatespanFilter, EnikshayLocationFilter) @property def export_table(self): export_table = [] for report in self.reports: report_instance = report(self.request, domain=self.domain) rows = [ [header.html for header in report_instance.headers.header] ] report_table = [ unicode(report.name[:28] + '...'), rows ] export_table.append(report_table) for row in report_instance.rows: row_formatted = [] for element in row: if isinstance(element, dict): row_formatted.append(element['sort_key']) else: row_formatted.append(unicode(element)) rows.append(row_formatted) return export_table class EnikshayReport(DatespanMixin, CustomProjectReport, SqlTabularReport): use_datatables = False @property def report_config(self): is_migrated = EnikshayMigrationFilter.get_value(self.request, self.domain) if is_migrated is not None: is_migrated = int(is_migrated) return EnikshayReportConfig( domain=self.domain, locations_id=EnikshayLocationFilter.get_value(self.request, self.domain), is_migrated=is_migrated, start_date=self.datespan.startdate, end_date=self.datespan.end_of_end_day ) class EnikshaySqlData(SqlData): @property def engine_id(self): return UCR_ENGINE_ID @property def table_name(self): return get_table_name(self.config.domain, TABLE_ID) @property def filter_values(self): filter_values = { 'start_date': self.config.start_date, 'end_date': self.config.end_date, 'locations_id': self.config.locations_id, 'is_migrated': self.config.is_migrated, } clean_IN_filter_value(filter_values, 'locations_id') return filter_values @property def group_by(self): return [] @property def filters(self): filters = [ AND([GTE('opened_on', 'start_date'), LT('opened_on', 'end_date')]) ] locations_id = filter(lambda x: bool(x), self.config.locations_id) if locations_id: filters.append( IN('person_owner_id', get_INFilter_bindparams('locations_id', locations_id)) ) is_migrated = self.config.is_migrated if is_migrated is not None: filters.append(EQ('case_created_by_migration', 'is_migrated')) return filters
Python
0
@@ -1730,16 +1730,41 @@ onFilter +, EnikshayMigrationFilter )%0A%0A @
79bd1165ece909ad8c302f7127c8c8fa1cd67794
Remove unused parameters. When calling, use named parameters to keep things clear.
api/base/filters.py
api/base/filters.py
import re import functools from modularodm import Q from rest_framework.filters import OrderingFilter from rest_framework import serializers as ser class ODMOrderingFilter(OrderingFilter): """Adaptation of rest_framework.filters.OrderingFilter to work with modular-odm.""" # override def filter_queryset(self, request, queryset, view): ordering = self.get_ordering(request, queryset, view) if ordering: return queryset.sort(*ordering) return queryset query_pattern = re.compile(r'filter\[\s*(?P<field>\S*)\s*\]\s*') def query_params_to_fields(query_params): return { query_pattern.match(key).groupdict()['field']: value for key, value in query_params.items() if query_pattern.match(key) } # Used to make intersection "reduce-able" def intersect(x, y): return x & y class ODMFilterMixin(object): """View mixin that adds a get_query_from_request method which converts query params of the form `filter[field_name]=value` into an ODM Query object. Subclasses must define `get_default_odm_query()`. Serializers that want to restrict which fields are used for filtering need to have a variable called filterable_fields which is a list of strings representing the field names as they appear in the serialization. """ # TODO Handle simple and complex non-standard fields TRUTHY = set(['true', 'True', 1, '1']) FALSY = set(['false', 'False', 0, '0']) DEFAULT_OPERATOR = 'eq' # For the field_comparison_operators, instances can be a class or a tuple of classes field_comparison_operators = [ { 'field_type': ser.CharField, 'comparison_operator': 'icontains' }, { 'field_type': ser.ListField, 'comparison_operator': 'in' } ] def get_comparison_operator(self, key, value): for operator in self.field_comparison_operators: if isinstance(self.serializer_class._declared_fields[key], operator['field_type']): return operator['comparison_operator'] return self.DEFAULT_OPERATOR def is_filterable_field(self, key, value): try: return key.strip() in self.serializer_class.filterable_fields except AttributeError: return key.strip() in self.serializer_class._declared_fields def get_default_odm_query(self): raise NotImplementedError('Must define get_default_odm_query') def get_query_from_request(self): query = self.query_params_to_odm_query(self.request.QUERY_PARAMS) if not query: query = self.get_default_odm_query() return query def query_params_to_odm_query(self, query_params): """Convert query params to a modularodm Query object.""" fields_dict = query_params_to_fields(query_params) if fields_dict: query_parts = [ Q(self.convert_key(key, value), self.get_comparison_operator(key, value), self.convert_value(key, value)) for key, value in fields_dict.items() if self.is_filterable_field(key, value) ] # TODO Ensure that if you try to filter on an invalid field, it returns a useful error. try: query = functools.reduce(intersect, query_parts) except TypeError: query = None else: query = None return query # Used so that that queries by _id will work def convert_key(self, key, value): key = key.strip() if self.serializer_class._declared_fields[key].source: return self.serializer_class._declared_fields[key].source return key # Used to convert string values from query params to Python booleans when necessary def convert_value(self, key, value): value = value.strip() if value in self.TRUTHY: return True elif value in self.FALSY: return False # Convert me to current user's pk elif value == 'me' and not self.request.user.is_anonymous(): return self.request.user.pk else: return value
Python
0
@@ -1881,26 +1881,19 @@ elf, key -, value ):%0A + %0A @@ -2161,39 +2161,32 @@ _field(self, key -, value ):%0A try:%0A @@ -2944,31 +2944,28 @@ vert_key(key -, value +=key ), self.get_ @@ -2987,23 +2987,20 @@ ator(key -, value +=key ), self. @@ -3013,21 +3013,22 @@ t_value( -key, +value= value))%0A @@ -3112,23 +3112,20 @@ ield(key -, value +=key )%0A @@ -3504,39 +3504,32 @@ rt_key(self, key -, value ):%0A key = @@ -3782,16 +3782,16 @@ cessary%0A + def @@ -3809,21 +3809,16 @@ ue(self, - key, value):
2982d38d863e6f7654c4939a526d6e783525f8d6
refactor compare_players
cribbage/main.py
cribbage/main.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, print_function from cribbage.game import Game from cribbage.randomplayer import RandomCribbagePlayer from cribbage.simpleplayer import SimpleCribbagePlayer # ------------------------------------------------------------ # Cribbage Game stats = [0,0] for i in range(1000): g = Game([RandomCribbagePlayer(), RandomCribbagePlayer()]) g.play() stats[g.winner] += 1 # stats # [487, 513] stats = [0,0] for i in range(500): g = Game([RandomCribbagePlayer(), SimpleCribbagePlayer()]) g.play() stats[g.winner] += 1 # with discard() # stats # [16, 484] # with play_card() # stats # [12, 488] # 0.976 success against random player # http://www.socscistatistics.com/tests/chisquare/Default2.aspx # The chi-square statistic is 0.5879. The p-value is .443236. stats = [0,0] for i in range(500): g = Game([RandomCribbagePlayer(), SimpleCribbagePlayer(estimate_discard=False)]) g.play() stats[g.winner] += 1 # stats # [161, 339] stats = [0,0] for i in range(500): g = Game([SimpleCribbagePlayer(), SimpleCribbagePlayer(estimate_playcard=False)]) g.play() stats[g.winner] += 1 # stats # [326, 174] # stats (after optimizing code) # [298, 202] # [325, 175] def myfunc(): stats = [0,0] for i in range(100): g = Game([SimpleCribbagePlayer(), SimpleCribbagePlayer(estimate_playcard=False)]) g.play() stats[g.winner] += 1 import cProfile cProfile.run('myfunc()', sort='time') # deck=make_deck() # random.shuffle(deck) # p=SimpleCribbagePlayer() # hand=deck[:6] # def wrap_discard(): # for i in range(1000): # p.discard(hand,False) # import hotshot # prof = hotshot.Profile("stones.prof") # prof.runcall(wrap_discard) # prof.close() # import hotshot.stats # stats = hotshot.stats.load("stones.prof") # stats.sort_stats('time', 'calls') # stats.print_stats(20) stats = [0,0] for i in range(500): g = Game([SimpleCribbagePlayer(estimate_discard=False), SimpleCribbagePlayer(estimate_playcard=False)]) g.play() stats[g.winner] += 1 # stats # [48, 452]
Python
0.000004
@@ -237,16 +237,202 @@ Player%0A%0A +def compare_players(players, num_games=1000):%0A stats = %5B0, 0%5D%0A for i in range(num_games):%0A g = Game(players)%0A g.play()%0A stats%5Bg.winner%5D += 1%0A return stats%0A%0A # ------ @@ -515,48 +515,23 @@ s = -%5B0,0%5D%0Afor i in range(1000):%0A g = Game +compare_players (%5BRa @@ -581,116 +581,54 @@ )%5D)%0A - g.play()%0A stats%5Bg.winner%5D += 1%0A%0A# stats%0A# %5B487, 513%5D%0A%0Astats = %5B0,0%5D%0Afor i in range(500):%0A g = Game +%0A# stats%0A# %5B487, 513%5D%0A%0Astats = compare_players (%5BRa @@ -676,47 +676,14 @@ r()%5D -)%0A g.play()%0A stats%5Bg.winner%5D += 1 +, 500) %0A%0A# @@ -935,72 +935,73 @@ s = -%5B0,0%5D%0Afor i in range(500):%0A g = Game(%5BRandomCribbagePlayer(), +compare_players(%5BRandomCribbagePlayer(),%0A Sim @@ -1046,143 +1046,135 @@ se)%5D -) +, %0A -g.play()%0A stats%5Bg.winner%5D += 1%0A%0A# stats%0A# %5B161, 339%5D%0A%0Astats = %5B0,0%5D%0Afor i in range(500):%0A g = Game(%5BSimpleCribbagePlayer(), + 500)%0A%0A# stats%0A# %5B161, 339%5D%0A%0Astats = compare_players(%5BSimpleCribbagePlayer(),%0A Sim @@ -1220,47 +1220,38 @@ se)%5D -) +, %0A -g.play()%0A stats%5Bg.winner%5D += 1 + 500) %0A%0A# @@ -1359,80 +1359,77 @@ s = -%5B0,0%5D%0A for i in range(100):%0A g = Game(%5BSimpleCribbagePlayer(), +compare_players(%5BSimpleCribbagePlayer(),%0A Sim @@ -1467,25 +1467,25 @@ card=False)%5D -) +, %0A g.p @@ -1485,45 +1485,32 @@ -g.play()%0A stats%5Bg.winner%5D += 1 + 100) %0A%0Aim @@ -1971,47 +1971,23 @@ s = -%5B0,0%5D%0Afor i in range(500):%0A g = Game +compare_players (%5BSi @@ -2029,16 +2029,41 @@ =False), +%0A SimpleC @@ -2105,47 +2105,38 @@ se)%5D -) +, %0A -g.play()%0A stats%5Bg.winner%5D += 1 + 500) %0A%0A#
7ec191ce0b82827013485a98db84cd66aa2ca1b4
use the currently checked out branch
lib/spack/spack/cmd/bootstrap.py
lib/spack/spack/cmd/bootstrap.py
############################################################################## # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the LICENSE file for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import os from subprocess import check_call import llnl.util.tty as tty from llnl.util.filesystem import join_path, mkdirp import spack from spack.util.executable import which _SPACK_UPSTREAM = 'https://github.com/llnl/spack' description = "Create a new installation of spack in another prefix" def setup_parser(subparser): subparser.add_argument('prefix', help="names of prefix where we should install spack") def get_origin_url(): git_dir = join_path(spack.prefix, '.git') git = which('git', required=True) try: origin_url = git( '--git-dir=%s' % git_dir, 'config', '--get', 'remote.origin.url', output=str) except ProcessError: origin_url = _SPACK_UPSTREAM tty.warn('No git repository found; ' 'using default upstream URL: %s' % origin_url) return origin_url.strip() def bootstrap(parser, args): origin_url = get_origin_url() prefix = args.prefix tty.msg("Fetching spack from origin: %s" % origin_url) if os.path.isfile(prefix): tty.die("There is already a file at %s" % prefix) mkdirp(prefix) if os.path.exists(join_path(prefix, '.git')): tty.die("There already seems to be a git repository in %s" % prefix) files_in_the_way = os.listdir(prefix) if files_in_the_way: tty.die("There are already files there! Delete these files before boostrapping spack.", *files_in_the_way) tty.msg("Installing:", "%s/bin/spack" % prefix, "%s/lib/spack/..." % prefix) os.chdir(prefix) git = which('git', required=True) git('init', '--shared', '-q') git('remote', 'add', 'origin', origin_url) git('fetch', 'origin', 'master:refs/remotes/origin/master', '-n', '-q') git('reset', '--hard', 'origin/master', '-q') tty.msg("Successfully created a new spack in %s" % prefix, "Run %s/bin/spack to use this installation." % prefix)
Python
0
@@ -1652,19 +1652,20 @@ _origin_ -url +info ():%0A @@ -1753,16 +1753,216 @@ try:%0A + branch = git('symbolic-ref', '--short', 'HEAD', output=str)%0A except ProcessError:%0A branch = 'develop'%0A tty.warn('No branch found; using default branch: %25s' %25 branch)%0A try:%0A @@ -2275,16 +2275,17 @@ return +( origin_u @@ -2294,16 +2294,33 @@ .strip() +, branch.strip()) %0A%0A%0Adef b @@ -2353,24 +2353,32 @@ origin_url +, branch = get_origi @@ -2379,19 +2379,20 @@ _origin_ -url +info ()%0A p @@ -3184,22 +3184,18 @@ igin', ' -master +%25s :refs/re @@ -3207,24 +3207,66 @@ /origin/ -master', +%25s' %25 (branch, branch),%0A '-n', ' @@ -3309,15 +3309,20 @@ gin/ -master' +%25s' %25 branch , '-
1c432cd4cfb0d6f8bb54727e2b16b58fd7feb390
Add more categorical data
tests/data.py
tests/data.py
import pytest import numpy as np @pytest.fixture def continuous_data(): x = np.array(range(5)) y = np.array(range(5)) return x, y @pytest.fixture def continuous_data_complicated(): x = np.array(range(10)) y = np.array(list(range(5)) + [6, 6.1, 6.2, 7, 7.1]) return x, y @pytest.fixture def categorical_data(): x = np.array(range(6)) y = np.array([0, 0, 0, 1, 1, 1]) return x, y @pytest.fixture def tall_matrix_data(): x = np.array([[1, 2, 3], [1.1, 2.05, 3], [0.99, 2, 3], [0.98, 2.1, 3]]) return x @pytest.fixture def categorical_2Dmatrix_data(): x = np.array([[1, 2], [3, 3], [5, 2], [1, 4], [9, 6], [8, 8]]) y = np.array([0, 0, 0, 0, 1, 1]) return x, y def categorical_2Dmatrix_data_big(): x = np.array([[1.1, 1.5], [1, 2], [3, 3], [5, 2], [1, 4], [9, 6], [8, 8], [8.1, 9], [7.7, 7.1], [6, 12], [10, 6]]) y = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]) return x, y @pytest.fixture def categorical_2Dmatrix_bernoulli_data(): x = np.array([[0, 0], [0, 0], [1, 1], [0, 0], [1, 1], [1, 1]]) y = np.array([0, 0, 0, 0, 1, 1]) return x, y
Python
0.000073
@@ -601,16 +601,239 @@ turn x%0A%0A +%0A@pytest.fixture%0Adef tall_matrix_data_2():%0A x = np.array(%5B%5B1, 2, 3%5D,%0A %5B1.1, 2.05, 3%5D,%0A %5B0.99, 2, 3%5D,%0A %5B0.98, 2.1, 3%5D%5D)%0A y = np.array(range(4))%0A return x, y%0A%0A @pytest. @@ -1085,16 +1085,32 @@ x, y%0A%0A%0A +@pytest.fixture%0A def cate
a0546aafb34d27b55a59fdf499727cb5ef44cca4
Print table-header
performance/routine.py
performance/routine.py
from performance import web import time import threading class Tool: def __init__(self, config): if not isinstance(config, Config): raise TypeError('No performance.routine.Config object') self.config = config def run(self): if self.config.is_valid(): run_event = threading.Event() run_event.set() finish_event = FinishEvent() clients = [] print(' > Starting tests') print(' > Stop tests with CTRL-C') for client_index in range(self.config.clients_count): client = web.Client( host=self.config.host, requests=self.config.requests, do_requests_counter=self.config.requests_per_client, event=run_event, finish_event=finish_event ) clients.append(client) client.start() try: while finish_event.finished < self.config.clients_count: time.sleep(.1) print(' > Finished tests') except KeyboardInterrupt: run_event.clear() for client in clients: client.join() print(' > Exited with CTRL-C') else: print('Invalid configuration') class Config: def __init__(self, host, requests_per_client=10, clients_count=1): self.host = host self.requests = [] self.requests_per_client = requests_per_client self.clients_count = clients_count def add_request(self, request): if not isinstance(request, web.Request): raise TypeError('No performance.web.Request object') self.requests.append(request) def is_valid(self): return not( not self.requests or self.clients_count < 1 or self.requests_per_client < 1 ) class FinishEvent: def __init__(self): self.finished = 0 def finish(self): self.finished = self.finished + 1
Python
0.999599
@@ -505,32 +505,74 @@ s with CTRL-C')%0A + print(' URL Time Code')%0A for
0415361dcd6171f0f407ee528fa0761bf1e914b0
Add proc name to gunicorn conf.
mezzanine/project_template/deploy/gunicorn.conf.py
mezzanine/project_template/deploy/gunicorn.conf.py
import os bind = "127.0.0.1:%(port)s" workers = (os.sysconf("SC_NPROCESSORS_ONLN") * 2) + 1 loglevel = "error"
Python
0
@@ -24,16 +24,25 @@ 0.0.1:%25( +gunicorn_ port)s%22%0A @@ -114,8 +114,36 @@ %22error%22%0A +proc_name = %22%25(proj_name)s%22%0A
56606d3234fbebc504feec201e4a99a3adcd5023
Fix code for pyflake8 convention
mgmtsystem_hazard_risk/models/mgmtsystem_hazard.py
mgmtsystem_hazard_risk/models/mgmtsystem_hazard.py
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2010 Savoir-faire Linux (<http://www.savoirfairelinux.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, fields, api from .common import _parse_risk_formula class MgmtsystemHazard(models.Model): _inherit = "mgmtsystem.hazard" risk_type_id = fields.Many2one( 'mgmtsystem.hazard.risk.type', 'Risk Type', required=True, ) risk = fields.Integer(compute="_compute_risk", string='Risk') residual_risk_ids = fields.One2many( 'mgmtsystem.hazard.residual_risk', 'hazard_id', 'Residual Risk Evaluations', ) @api.depends("probability_id", "severity_id", "usage_id") def _compute_risk(self): mycompany = self.env['res.users'].browse(self._uid).company_id for hazard in self: if hazard.probability_id and hazard.severity_id and hazard.usage_id: hazard.risk = _parse_risk_formula( mycompany.risk_computation_id.name, hazard.probability_id.value, hazard.severity_id.value, hazard.usage_id.value ) else: hazard.risk = False
Python
0.000009
@@ -1713,32 +1713,53 @@ obability_id and +%5C%0A hazard.severity @@ -1765,16 +1765,37 @@ y_id and +%5C%0A hazard. @@ -2065,17 +2065,16 @@ ) - %0A
7c70a7c4b8b4cb002bcb8d683268c91de26d11c4
formatname must be a string
tldp/doctypes/docbook4xml.py
tldp/doctypes/docbook4xml.py
#! /usr/bin/python from ..utils import logger from .common import SignatureChecker class Docbook4XML(SignatureChecker): formatname = ['DocBook 4.x XML'] extensions = ['.xml'] signatures = ['-//OASIS//DTD DocBook XML V4.1.2//EN', '-//OASIS//DTD DocBook XML V4.2//EN', '-//OASIS//DTD DocBook XML V4.2//EN', '-//OASIS//DTD DocBook XML V4.4//EN', '-//OASIS//DTD DocBook XML V4.5//EN', ] tools = ['xsltproc', 'html2text', 'fop', 'dblatex'] files = [''] def create_txt(self): logger.info("Creating txt for %s", self.source.stem) def create_pdf(self): logger.info("Creating PDF for %s", self.source.stem) def create_html(self): logger.info("Creating chunked HTML for %s", self.source.stem) def create_htmls(self): logger.info("Creating single page HTML for %s", self.source.stem) # # -- end of file # # -- end of file
Python
0.999577
@@ -134,17 +134,16 @@ tname = -%5B 'DocBook @@ -151,17 +151,16 @@ 4.x XML' -%5D %0A ext
96113152179ca81f24b85c19420fae7078907035
change to ipn ver
amazon_buttons/views.py
amazon_buttons/views.py
from django.views.decorators.csrf import csrf_exempt from django.http import HttpResponse from amazon_buttons import models import datetime from django.conf import settings import urllib from amazon_buttons import buttonconf from amazon_buttons import _crypt @csrf_exempt def ipn_handler(request): ipn = models.ipn_response() ipn.datetime = datetime.datetime.fromtimestamp(int(request.POST['transactionDate'])) for key, val in request.POST.iteritems(): attrib = getattr(ipn, key, None) if attrib: setattr(ipn, key, val) if settings.AMAZON_IPN_VERIFY: if settings.AMAZON_SANDBOX: ver_url = buttonconf.SANDBOX_VERIFY else: ver_url = buttonconf.LIVE_VERIFY prepd_data = buttonconf.DEFAULT_IPNVER_DATA prepd_data['UrlEndPoint'] = ver_url prepd_data['target_url'] = ver_url prepd_data['HttpParameters'] = urllib.urlencode(request.POST) prepd_data['AWSAccessKeyId'] = settings.AMAZON_ACCESS_KEY prepd_data['Timestamp'] = datetime.datetime.now().isoformat() s_key = settings.AMAZON_SECRET_KEY prepd_data['Signature'] = _crypt.sig_maker(s_key, prepd_data,'GET') del prepd_data['target_url'] fin_url = urllib.urlencode(prepd_data) print fin_url else: ipn.save() return HttpResponse('Done')
Python
0.999401
@@ -747,31 +747,78 @@ ndPoint'%5D = -ver_url +settings.DOMAIN_FOR_AMAZON_IPN + reverse('amazon_ipn') %0A%09%09prepd_dat
62367b8f6979dd90c5d8c4407580e958228792a5
Fix list+tuple error due to Django Upgrade to v2.2+
amy/workshops/fields.py
amy/workshops/fields.py
from dal_select2.widgets import ( Select2WidgetMixin as DALSelect2WidgetMixin, ) from dal.autocomplete import ( Select2 as DALSelect2, Select2Multiple as DALSelect2Multiple, ListSelect2 as DALListSelect2, ModelSelect2 as DALModelSelect2, ModelSelect2Multiple as DALModelSelect2Multiple, TagSelect2 as DALTagSelect2, ) from django.core.validators import RegexValidator, MaxLengthValidator from django.db import models from django import forms GHUSERNAME_MAX_LENGTH_VALIDATOR = MaxLengthValidator(39, message='Maximum allowed username length is 39 characters.', ) # according to https://stackoverflow.com/q/30281026, # GH username can only contain alphanumeric characters and # hyphens (but not consecutive), cannot start or end with # a hyphen, and can't be longer than 39 characters GHUSERNAME_REGEX_VALIDATOR = RegexValidator( # regex inspired by above StackOverflow thread regex=r'^([a-zA-Z\d](?:-?[a-zA-Z\d])*)$', message='This is not a valid GitHub username.', ) class NullableGithubUsernameField(models.CharField): def __init__(self, **kwargs): kwargs.setdefault('null', True) kwargs.setdefault('blank', True) kwargs.setdefault('default', '') # max length of the GH username is 39 characters kwargs.setdefault('max_length', 39) super().__init__(**kwargs) default_validators = [ GHUSERNAME_MAX_LENGTH_VALIDATOR, GHUSERNAME_REGEX_VALIDATOR, ] #------------------------------------------------------------ # "Rewrite" select2 widgets from Django Autocomplete Light so # that they don't use Django's admin-provided jQuery, which # causes errors with jQuery provided by us. class Select2WidgetMixin(DALSelect2WidgetMixin): @property def media(self): m = super().media js = list(m._js) # remove JS import that mess up jQuery jquery_messing_imports = ( 'admin/js/vendor/jquery/jquery.js', 'admin/js/vendor/jquery/jquery.min.js', # 'admin/js/jquery.init.js', # 'autocomplete_light/jquery.init.js', ) for import_ in jquery_messing_imports: try: js.remove(import_) except ValueError: pass # inject select2 bootstrap4 theme select2_bootstrap4_theme = ('@ttskch/select2-bootstrap4-theme/' 'dist/select2-bootstrap4.css') m._css['screen'] = m._css['screen'] + (select2_bootstrap4_theme, ) return forms.Media(css=m._css, js=js) def build_attrs(self, *args, **kwargs): """Set select2 bootstrap4 theme by default.""" attrs = super().build_attrs(*args, **kwargs) attrs.setdefault('data-theme', 'bootstrap4') return attrs class Select2(Select2WidgetMixin, DALSelect2): pass class Select2Multiple(Select2WidgetMixin, DALSelect2Multiple): pass class ListSelect2(Select2WidgetMixin, DALListSelect2): pass class ModelSelect2(Select2WidgetMixin, DALModelSelect2): pass class ModelSelect2Multiple(Select2WidgetMixin, DALModelSelect2Multiple): pass class TagSelect2(Select2WidgetMixin, DALTagSelect2): pass class RadioSelectWithOther(forms.RadioSelect): """A RadioSelect widget that should render additional field ('Other'). We have a number of occurences of two model fields bound together: one containing predefined set of choices, the other being a text input for other input user wants to choose instead of one of our predefined options. This widget should help with rendering two widgets in one table row.""" other_field = None # to be bound later def __init__(self, other_field_name, *args, **kwargs): super().__init__(*args, **kwargs) self.other_field_name = other_field_name class CheckboxSelectMultipleWithOthers(forms.CheckboxSelectMultiple): """A multiple choice widget that should render additional field ('Other'). We have a number of occurences of two model fields bound together: one containing predefined set of choices, the other being a text input for other input user wants to choose instead of one of our predefined options. This widget should help with rendering two widgets in one table row.""" other_field = None # to be bound later def __init__(self, other_field_name, *args, **kwargs): super().__init__(*args, **kwargs) self.other_field_name = other_field_name
Python
0
@@ -2495,17 +2495,17 @@ een'%5D + -( +%5B select2_ @@ -2524,11 +2524,9 @@ heme -, ) +%5D %0A%0A
15b0d333f434568b8c7a6f78b9773cd00d149638
update loadxl tests
src/encoded/tests/test_loadxl.py
src/encoded/tests/test_loadxl.py
import pytest from encoded import loadxl import json from unittest import mock from pkg_resources import resource_filename pytestmark = pytest.mark.working def test_gen_access_keys(testapp, admin): res = loadxl.generate_access_key(testapp, store_access_key='local', email=admin['email']) res = json.loads(res) assert res['default']['server'] == 'http://localhost:8000' assert res['default']['secret'] assert res['default']['key'] def test_gen_access_keys_on_server(testapp, admin): old_get = testapp.get def side_effect(path): from webtest.response import TestResponse if path == '/health?format=json': tr = TestResponse() tr.json_body = {"beanstalk_env": "fourfront-webprod"} tr.content_type = 'application/json' return tr else: return old_get(path) testapp.get = mock.Mock(side_effect=side_effect) with mock.patch('encoded.loadxl.get_beanstalk_real_url') as mocked_url: mocked_url.return_value = 'http://fourfront-hotseat' res = loadxl.generate_access_key(testapp, store_access_key='s3', email=admin['email']) res = json.loads(res) assert res['default']['server'] == 'http://fourfront-hotseat' assert res['default']['secret'] assert res['default']['key'] assert mocked_url.called_once() def test_load_data_endpoint(testapp): master_inserts = resource_filename('encoded', 'tests/data/master-inserts/') data = {} data['user'] = loadxl.read_single_sheet(master_inserts, 'user') with mock.patch('encoded.loadxl.get_app') as mocked_app: mocked_app.return_value = testapp.app res = testapp.post_json('/load_data', data, status=200) assert res.json['status'] == 'success' def test_load_data_endpoint_returns_error_if_incorrect_data(testapp): master_inserts = resource_filename('encoded', 'tests/data/master-inserts/') data = {} data['user'] = loadxl.read_single_sheet(master_inserts, 'user') data['lab'] = loadxl.read_single_sheet(master_inserts, 'lab') with mock.patch('encoded.loadxl.get_app') as mocked_app: mocked_app.return_value = testapp.app res = testapp.post_json('/load_data', data, status=422) assert res.json['status'] == 'error' assert res.json['@graph'] def test_load_data_user_specified_config(testapp): config_uri= 'test.ini' with mock.patch('encoded.loadxl.get_app') as mocked_app: mocked_app.return_value = testapp.app res = testapp.post_json('/load_data', {'config_uri': config_uri}, status=200) assert res.json['status'] == 'success' mocked_app.assert_called_once_with(config_uri, 'app') def test_load_data_local_dir(testapp): expected_dir = resource_filename('encoded', 'tests/data/perf-testing/') with mock.patch('encoded.loadxl.get_app') as mocked_app: with mock.patch('encoded.loadxl.load_all') as load_all: mocked_app.return_value = testapp.app load_all.return_value = None res = testapp.post_json('/load_data', {'local_dir': 'perf-testing'}, status=200) assert res.json['status'] == 'success' load_all.assert_called_once_with(mock.ANY, expected_dir, [])
Python
0
@@ -1482,17 +1482,16 @@ %5B'key'%5D%0A -%0A @@ -1570,66 +1570,28 @@ -master_inserts = resource_filename('encoded', 'tests/data/ +data = %7B'fdn_dir': ' mast @@ -1604,92 +1604,55 @@ erts -/') +', %0A -data = %7B%7D%0A data%5B'user'%5D = loadxl.read_single_sheet(master_inserts + 'itype': %5B'award', 'lab' , 'user' )%0A @@ -1643,25 +1643,26 @@ lab', 'user' -) +%5D%7D %0A with mo @@ -1925,20 +1925,23 @@ correct_ -data +keyword (testapp @@ -1951,66 +1951,28 @@ -master_inserts = resource_filename('encoded', 'tests/data/ +data = %7B'mdn_dir': ' mast @@ -1985,159 +1985,435 @@ erts -/') +', %0A -data = %7B%7D%0A data%5B'user'%5D = loadxl.read_single_sheet(master_inserts, 'user')%0A data%5B'lab'%5D = loadxl.read_single_sheet( + 'itype': %5B'user'%5D%7D%0A with mock.patch('encoded.loadxl.get_app') as mocked_app:%0A mocked_app.return_value = testapp.app%0A res = testapp.post_json('/load_data', data, status=422)%0A assert res.json%5B'status'%5D == 'error'%0A assert res.json%5B'@graph'%5D%0A%0A%0Adef test_load_data_endpoint_returns_error_if_incorrect_data(testapp):%0A data = %7B'fdn_dir': ' master -_ +- inserts -, 'lab') +',%0A 'itype': %5B'user'%5D%7D %0A @@ -2708,24 +2708,112 @@ g(testapp):%0A + data = %7B'fdn_dir': 'master-inserts',%0A 'itype': %5B'user', 'lab', 'award'%5D%7D%0A config_u @@ -2814,16 +2814,17 @@ nfig_uri + = 'test. @@ -2828,16 +2828,52 @@ st.ini'%0A + data%5B'config_uri'%5D = config_uri%0A with @@ -3021,34 +3021,12 @@ a', -%7B'config_uri': config_uri%7D +data , st @@ -3529,21 +3529,19 @@ ata', %7B' -local +fdn _dir': ' @@ -3692,8 +3692,39 @@ ir, -%5B%5D +None, itype=None, overwrite=False )%0A
38b5dc7ae5deb6eba676fb7315f2aafad474a1f4
Allow overriding es.authorization_enabled
annotator/annotation.py
annotator/annotation.py
from annotator import authz, document, es TYPE = 'annotation' MAPPING = { 'id': {'type': 'string', 'index': 'no'}, 'annotator_schema_version': {'type': 'string'}, 'created': {'type': 'date'}, 'updated': {'type': 'date'}, 'quote': {'type': 'string'}, 'tags': {'type': 'string', 'index_name': 'tag'}, 'text': {'type': 'string'}, 'uri': {'type': 'string', 'index': 'not_analyzed'}, 'user': {'type': 'string', 'index': 'not_analyzed'}, 'consumer': {'type': 'string', 'index': 'not_analyzed'}, 'ranges': { 'index_name': 'range', 'properties': { 'start': {'type': 'string', 'index': 'not_analyzed'}, 'end': {'type': 'string', 'index': 'not_analyzed'}, 'startOffset': {'type': 'integer'}, 'endOffset': {'type': 'integer'}, } }, 'permissions': { 'index_name': 'permission', 'properties': { 'read': {'type': 'string', 'index': 'not_analyzed'}, 'update': {'type': 'string', 'index': 'not_analyzed'}, 'delete': {'type': 'string', 'index': 'not_analyzed'}, 'admin': {'type': 'string', 'index': 'not_analyzed'} } }, 'document': { 'properties': document.MAPPING } } class Annotation(es.Model): __type__ = TYPE __mapping__ = MAPPING def save(self, *args, **kwargs): _add_default_permissions(self) # If the annotation includes document metadata look to see if we have # the document modeled already. If we don't we'll create a new one # If we do then we'll merge the supplied links into it. if 'document' in self: d = self['document'] uris = [link['href'] for link in d['link']] docs = document.Document.get_all_by_uris(uris) if len(docs) == 0: doc = document.Document(d) doc.save() else: doc = docs[0] links = d.get('link', []) doc.merge_links(links) doc.save() super(Annotation, self).save(*args, **kwargs) @classmethod def search_raw(cls, query=None, params=None, user=None, **kwargs): if query is None: query = {} if es.authorization_enabled: f = authz.permissions_filter(user) if not f: raise RunTimeError("Authorization filter creation failed") filtered_query = { 'filtered': { 'filter': f } } # Instert original query (if present) if 'query' in query: filtered_query['filtered']['query'] = query['query'] # Use the filtered query instead of the original query['query'] = filtered_query res = super(Annotation, cls).search_raw(query=query, params=params, **kwargs) return res @classmethod def _build_query(cls, query=None, offset=None, limit=None, user=None, **kwargs): if query is None: query = {} q = super(Annotation, cls)._build_query(query, offset, limit, **kwargs) # attempt to expand query to include uris for other representations # using information we may have on hand about the Document if 'uri' in query: term_filter = q['query']['filtered']['filter'] doc = document.Document.get_by_uri(query['uri']) if doc: new_terms = [] for term in term_filter['and']: if 'uri' in term['term']: term = {'or': []} for uri in doc.uris(): term['or'].append({'term': {'uri': uri}}) new_terms.append(term) term_filter['and'] = new_terms if es.authorization_enabled: # Apply a filter to the results. f = authz.permissions_filter(user) if not f: return False # Refuse to perform the query q['query'] = {'filtered': {'query': q['query'], 'filter': f}} return q def _add_default_permissions(ann): if 'permissions' not in ann: ann['permissions'] = {'read': [authz.GROUP_CONSUMER]}
Python
0.000032
@@ -2196,32 +2196,79 @@ None, user=None, +%0A authorization_enabled=None, **kwargs):%0A @@ -2315,35 +2315,135 @@ = %7B%7D%0A if -es. +authorization_enabled is None:%0A authorization_enabled = es.authorization_enabled%0A if authorization_en
804d5808d38c5cb66fa2cfdd91e888fa5b3c8155
Remove keystoneclient deprecation
aodh/keystone_client.py
aodh/keystone_client.py
# # Copyright 2015 eNovance <licensing@enovance.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from keystoneauth1 import exceptions as ka_exception from keystoneauth1.identity.generic import password from keystoneauth1 import loading as ka_loading from keystoneclient import session from keystoneclient.v3 import client as ks_client_v3 from oslo_config import cfg CFG_GROUP = "service_credentials" def get_session(conf): """Get an aodh service credentials auth session.""" auth_plugin = ka_loading.load_auth_from_conf_options(conf, CFG_GROUP) return ka_loading.load_session_from_conf_options( conf, CFG_GROUP, auth=auth_plugin ) def get_client(conf): """Return a client for keystone v3 endpoint.""" sess = get_session(conf) return ks_client_v3.Client(session=sess) def get_trusted_client(conf, trust_id): # Ideally we would use load_session_from_conf_options, but we can't do that # *and* specify a trust, so let's create the object manually. auth_plugin = password.Password( username=conf[CFG_GROUP].username, password=conf[CFG_GROUP].password, auth_url=conf[CFG_GROUP].auth_url, user_domain_id=conf[CFG_GROUP].user_domain_id, trust_id=trust_id) sess = session.Session(auth=auth_plugin) return ks_client_v3.Client(session=sess) def get_auth_token(client): return client.session.auth.get_access(client.session).auth_token def get_client_on_behalf_user(auth_plugin): """Return a client for keystone v3 endpoint.""" sess = session.Session(auth=auth_plugin) return ks_client_v3.Client(session=sess) def create_trust_id(conf, trustor_user_id, trustor_project_id, roles, auth_plugin): """Create a new trust using the aodh service user.""" admin_client = get_client(conf) trustee_user_id = admin_client.session.get_user_id() client = get_client_on_behalf_user(auth_plugin) trust = client.trusts.create(trustor_user=trustor_user_id, trustee_user=trustee_user_id, project=trustor_project_id, impersonation=True, role_names=roles) return trust.id def delete_trust_id(trust_id, auth_plugin): """Delete a trust previously setup for the aodh user.""" client = get_client_on_behalf_user(auth_plugin) try: client.trusts.delete(trust_id) except ka_exception.NotFound: pass OPTS = [ cfg.StrOpt('region-name', default=os.environ.get('OS_REGION_NAME'), deprecated_name="os-region-name", help='Region name to use for OpenStack service endpoints.'), cfg.StrOpt('interface', default=os.environ.get( 'OS_INTERFACE', os.environ.get('OS_ENDPOINT_TYPE', 'public')), deprecated_name="os-endpoint-type", choices=('public', 'internal', 'admin', 'auth', 'publicURL', 'internalURL', 'adminURL'), help='Type of endpoint in Identity service catalog to use for ' 'communication with OpenStack services.'), ] def register_keystoneauth_opts(conf): ka_loading.register_auth_conf_options(conf, CFG_GROUP) ka_loading.register_session_conf_options( conf, CFG_GROUP, deprecated_opts={'cacert': [ cfg.DeprecatedOpt('os-cacert', group=CFG_GROUP), cfg.DeprecatedOpt('os-cacert', group="DEFAULT")] })
Python
0.000005
@@ -771,22 +771,21 @@ keystone -client +auth1 import
0ea044dc978b6356cc49b9e5a47058beef5f38e4
update transformer mean
tools/predict_worker/main.py
tools/predict_worker/main.py
import os import time import numpy as np import caffe import gear import cStringIO as StringIO import json import urllib2 from caffe.io import resize_image # import logging # logging.basicConfig(level=logging.DEBUG) caffe.set_mode_cpu() MAX_PREDICT_LENGTH = 5 RAW_SCALE = 255. def load_binaryproto(fn): blob = caffe.proto.caffe_pb2.BlobProto() data = open(fn, 'rb').read() blob.ParseFromString(data) arr = np.array( caffe.io.blobproto_to_array(blob) ) return arr[0] class Classifier(object): def __init__(self, resoursesPath): # mean_file = resoursesPath + "/mean.binaryproto" model_def_file = resoursesPath + "/deploy.prototxt" pretrained_model_file = resoursesPath + "/models/huabot-brain.caffemodel" self.net = caffe.Classifier( model_def_file, pretrained_model_file, raw_scale=RAW_SCALE, # mean=load_binaryproto(mean_file), channel_swap=(2, 1, 0) ) def classify_image(self, image): try: starttime = time.time() scores = self.net.predict([image], oversample=True).flatten() endtime = time.time() indices = (-scores).argsort()[:MAX_PREDICT_LENGTH] print(indices) meta = [{'id':i, 'score': float(scores[i])} for i in indices] return (True, meta, endtime - starttime) except Exception as err: print(err) return (False, 'Something went wrong when classifying the ' 'image. Maybe try another one?') class Brain(object): def __init__(self, resoursesPath): self._clf = Classifier(resoursesPath) self._funcs = {} self._worker = gear.Worker('huaban-brain') def _add_func(self, func_name, callback): self._worker.registerFunction(func_name) self._funcs[func_name] = callback def add_server(self, host='localhost', port=4730): self._worker.addServer(host, port) def process(self): self._add_func('CAFFE:PREDICT', self.classify_image) self._add_func('CAFFE:PREDICT:URL', self.classify_image_url) while 1: print('wait...') job = self._worker.getJob() func = self._funcs.get(job.name) if func: try: func(job) except Exception as e: job.sendWorkComplete(json.dumps({'err': str(e)})) print('process %s error: %s'%(job.name, e)) def classify_image(self, job): self._classify_image(job, job.arguments) def classify_image_url(self, job): url = job.arguments rsp = urllib2.urlopen(url, timeout=10) data = rsp.read() self._classify_image(job, data) def _classify_image(self, job, data): image = caffe.io.load_image(StringIO.StringIO(data)) result = self._clf.classify_image(image) if result[0]: result = {'bet_result': result[1], 'time': result[2]} else: result = {'err': result[1]} print(result) job.sendWorkComplete(json.dumps(result)) def main(scripts, resoursesPath='resourses'): GEARMAND_PORT = os.environ.get('GEARMAND_PORT', 'tcp://127.0.0.1:4730')[6:].split(':') brain = Brain(resoursesPath) brain.add_server(GEARMAND_PORT[0], int(GEARMAND_PORT[1])) print("brain process") brain.process() if __name__ == "__main__": import sys main(*sys.argv)
Python
0.000001
@@ -149,18 +149,16 @@ e_image%0A -# import l @@ -550,26 +550,24 @@ th):%0A - # mean_file = @@ -745,16 +745,59 @@ emodel%22%0A +%0A mean=load_binaryproto(mean_file)%0A%0A @@ -892,120 +892,382 @@ -raw_scale=RAW_SCALE,%0A # mean=load_binaryproto(mean_file),%0A channel_swap=(2, 1, 0)%0A +image_dims=(256, 256),%0A raw_scale=RAW_SCALE,%0A channel_swap=(2, 1, 0)%0A )%0A%0A in_shape = self.net.transformer.inputs%5Bself.net.inputs%5B0%5D%5D%0A if mean.shape%5B1:%5D != in_shape%5B2:%5D:%0A mean = caffe.io.resize_image(mean.transpose((1,2,0)), in_shape%5B2:%5D).transpose((2,0,1))%0A%0A self.net.transformer.set_mean(self.net.inputs%5B0%5D, mean )%0A%0A @@ -1527,35 +1527,8 @@ TH%5D%0A - print(indices)%0A @@ -1699,19 +1699,33 @@ -print + logging.exception (err)%0A + @@ -2445,37 +2445,8 @@ 1:%0A - print('wait...')%0A @@ -2707,16 +2707,17 @@ r(e)%7D))%0A +
38b36bcf3ba639faff7563f96db9ed45ce9ae5a4
normalize qgis jobname
eventkit_cloud/ui/helpers.py
eventkit_cloud/ui/helpers.py
from __future__ import absolute_import from contextlib import contextmanager import os from django.conf import settings from django.utils import timezone from django.template.loader import get_template, render_to_string from celery.utils.log import get_task_logger logger = get_task_logger(__name__) @contextmanager def cd(newdir): prevdir = os.getcwd() os.chdir(newdir) try: yield finally: os.chdir(prevdir) def get_style_files(): """ :return: A list of all of the static files used for styles (e.g. icons) """ style_dir = os.path.join(os.path.dirname(__file__), 'static', 'ui', 'styles') return get_file_paths(style_dir) def generate_qgs_style(run_uid=None, export_provider_task=None): """ Task to create QGIS project file with styles for osm. """ from eventkit_cloud.tasks.models import ExportRun from ..tasks.export_tasks import TaskStates run = ExportRun.objects.get(uid=run_uid) stage_dir = os.path.join(settings.EXPORT_STAGING_ROOT, str(run_uid)) job_name = run.job.name.lower() provider_tasks = run.provider_tasks.all() provider_details = [] if export_provider_task: provider_slug = export_provider_task.slug provider_detail = {'provider_slug': provider_slug, 'file_path': ''} provider_details += [provider_detail] else: for provider_task in provider_tasks: if TaskStates[provider_task.status] not in TaskStates.get_incomplete_states(): provider_slug = provider_task.slug for export_task in provider_task.tasks.all(): try: filename = export_task.result.filename except Exception: continue full_file_path = os.path.join(settings.EXPORT_STAGING_ROOT, str(run_uid), provider_task.slug, filename) if not os.path.isfile(full_file_path): logger.error("Could not find file {0} for export {1}.".format(full_file_path, export_task.name)) continue # Exclude zip files created by zip_export_provider if not full_file_path.endswith(".zip"): provider_detail = {'provider_slug': provider_slug, 'file_path': full_file_path} provider_details += [provider_detail] style_file = os.path.join(stage_dir, '{0}-{1}.qgs'.format(job_name, timezone.now().strftime("%Y%m%d"))) with open(style_file, 'w') as open_file: open_file.write(render_to_string('styles/Style.qgs', context={'job_name': job_name, 'job_date_time': '{0}'.format( timezone.now().strftime("%Y%m%d%H%M%S%f")[ :-3]), 'provider_details': provider_details, 'bbox': run.job.extents})) return style_file def get_file_paths(directory): paths = {} with cd(directory): for dirpath, _, filenames in os.walk('./'): for f in filenames: paths[os.path.abspath(os.path.join(dirpath, f))] = os.path.join(dirpath, f) return paths
Python
0.999998
@@ -920,16 +920,68 @@ kStates%0A + from ..tasks.task_runners import normalize_name%0A run @@ -2653,24 +2653,39 @@ .format( +normalize_name( job_name ,%0A @@ -2668,32 +2668,33 @@ ze_name(job_name +) ,%0A @@ -2905,24 +2905,39 @@ _name': +normalize_name( job_name ,%0A @@ -2928,16 +2928,17 @@ job_name +) ,%0A
a48085bc8a84f417fad4ae5c358a71dfa0d0570b
remove extra reshape
csaps/_sspndg.py
csaps/_sspndg.py
# -*- coding: utf-8 -*- """ ND-Gridded cubic smoothing spline implementation """ import collections.abc as c_abc import typing as ty import numpy as np from ._base import SplinePPFormBase, ISmoothingSpline from ._types import UnivariateDataType, NdGridDataType from ._sspumv import SplinePPForm, CubicSmoothingSpline def ndgrid_prepare_data_sites(data, name) -> ty.Tuple[np.ndarray, ...]: if not isinstance(data, c_abc.Sequence): raise TypeError(f"'{name}' must be a sequence of the vectors.") data = list(data) for i, di in enumerate(data): di = np.array(di, dtype=np.float64) if di.ndim > 1: raise ValueError(f"All '{name}' elements must be a vector.") if di.size < 2: raise ValueError(f"'{name}' must contain at least 2 data points.") data[i] = di return tuple(data) class NdGridSplinePPForm(SplinePPFormBase[ty.Sequence[np.ndarray], ty.Tuple[int, ...]]): """N-D grid spline representation in PP-form Parameters ---------- breaks : np.ndarray Breaks values 1-D array coeffs : np.ndarray Spline coefficients 2-D array """ def __init__(self, breaks: ty.Sequence[np.ndarray], coeffs: np.ndarray) -> None: self._breaks = breaks self._coeffs = coeffs self._pieces = tuple(x.size - 1 for x in breaks) self._order = tuple(s // p for s, p in zip(coeffs.shape, self._pieces)) self._ndim = len(breaks) @property def breaks(self) -> ty.Sequence[np.ndarray]: return self._breaks @property def coeffs(self) -> np.ndarray: return self._coeffs @property def pieces(self) -> ty.Tuple[int, ...]: return self._pieces @property def order(self) -> ty.Tuple[int, ...]: return self._order @property def ndim(self) -> int: return self._ndim def evaluate(self, xi: ty.Sequence[np.ndarray]) -> np.ndarray: yi = self.coeffs.copy() sizey = list(yi.shape) nsize = tuple(x.size for x in xi) d = self.ndim - 1 permute_axes = (d, *range(d)) for i in reversed(range(self.ndim)): ndim = int(np.prod(sizey[:d])) coeffs = yi.reshape((ndim * self.pieces[i], self.order[i]), order='F') spp = SplinePPForm(self.breaks[i], coeffs, ndim=ndim, shape=(ndim, xi[i].size)) yi = spp.evaluate(xi[i]) yi = yi.reshape((*sizey[:d], nsize[i]), order='F') yi = yi.transpose(permute_axes) sizey = list(yi.shape) return yi.reshape(nsize, order='F') class NdGridCubicSmoothingSpline(ISmoothingSpline[NdGridSplinePPForm, ty.Tuple[float, ...], NdGridDataType]): """ND-Gridded cubic smoothing spline Class implements ND-gridded data smoothing (piecewise tensor product polynomial). Parameters ---------- xdata : list, tuple, Sequence[vector-like] X data site vectors for each dimensions. These vectors determine ND-grid. For example:: # 2D grid x = [ np.linspace(0, 5, 21), np.linspace(0, 6, 25), ] ydata : np.ndarray Y data ND-array with shape equal ``xdata`` vector sizes weights : [*Optional*] list, tuple, Sequence[vector-like] Weights data vector(s) for all dimensions or each dimension with size(s) equal to ``xdata`` sizes smooth : [*Optional*] float, Sequence[float] The smoothing parameter (or a sequence of parameters for each dimension) in range ``[0, 1]`` where: - 0: The smoothing spline is the least-squares straight line fit - 1: The cubic spline interpolant with natural condition """ def __init__(self, xdata: NdGridDataType, ydata: np.ndarray, weights: ty.Optional[ty.Union[UnivariateDataType, NdGridDataType]] = None, smooth: ty.Optional[ty.Union[float, ty.Sequence[ty.Optional[float]]]] = None) -> None: (self._xdata, self._ydata, self._weights, _smooth) = self._prepare_data(xdata, ydata, weights, smooth) self._ndim = len(self._xdata) self._spline, self._smooth = self._make_spline(_smooth) @property def smooth(self) -> ty.Tuple[float, ...]: """Returns a tuple of smoothing parameters for each axis Returns ------- smooth : Tuple[float, ...] The smoothing parameter in the range ``[0, 1]`` for each axis """ return self._smooth @property def spline(self) -> NdGridSplinePPForm: """Returns the spline description in 'NdGridSplinePPForm' instance Returns ------- spline : NdGridSplinePPForm The spline description in :class:`NdGridSplinePPForm` instance """ return self._spline @classmethod def _prepare_data(cls, xdata, ydata, weights, smooth): xdata = ndgrid_prepare_data_sites(xdata, 'xdata') data_ndim = len(xdata) if ydata.ndim != data_ndim: raise ValueError(f'ydata must have dimension {data_ndim} according to xdata') for yd, xs in zip(ydata.shape, map(len, xdata)): if yd != xs: raise ValueError(f'ydata ({yd}) and xdata ({xs}) dimension size mismatch') if not weights: weights = [None] * data_ndim else: weights = ndgrid_prepare_data_sites(weights, 'weights') if len(weights) != data_ndim: raise ValueError(f'weights ({len(weights)}) and xdata ({data_ndim}) dimensions mismatch') for w, x in zip(weights, xdata): if w is not None: if w.size != x.size: raise ValueError(f'weights ({w}) and xdata ({x}) dimension size mismatch') if not smooth: smooth = [None] * data_ndim if not isinstance(smooth, c_abc.Sequence): smooth = [float(smooth)] * data_ndim else: smooth = list(smooth) if len(smooth) != data_ndim: raise ValueError( f'Number of smoothing parameter values must ' f'be equal number of dimensions ({data_ndim})') return xdata, ydata, weights, smooth def __call__(self, xi: NdGridDataType) -> np.ndarray: """Evaluate the spline for given data """ xi = ndgrid_prepare_data_sites(xi, 'xi') if len(xi) != self._ndim: # pragma: no cover raise ValueError(f'xi ({len(xi)}) and xdata ({self._ndim}) dimensions mismatch') return self._spline.evaluate(xi) def _make_spline(self, smooth: ty.List[ty.Optional[float]]) -> ty.Tuple[NdGridSplinePPForm, ty.Tuple[float, ...]]: sizey = list(self._ydata.shape) coeffs = self._ydata.reshape(sizey, order='F').copy() smooths = [] permute_axes = (self._ndim - 1, *range(self._ndim - 1)) # computing coordinatewise smoothing spline for i in reversed(range(self._ndim)): shape_i = (np.prod(sizey[:-1]), sizey[-1]) ydata_i = coeffs.reshape(shape_i, order='F') s = CubicSmoothingSpline( self._xdata[i], ydata_i, weights=self._weights[i], smooth=smooth[i]) smooths.append(s.smooth) sizey[-1] = s.spline.pieces * s.spline.order coeffs = s.spline.coeffs.reshape(sizey, order='F') if self._ndim > 1: coeffs = coeffs.transpose(permute_axes) sizey = list(coeffs.shape) return NdGridSplinePPForm(breaks=self._xdata, coeffs=coeffs), tuple(smooths)
Python
0.000048
@@ -6865,34 +6865,8 @@ ata. -reshape(sizey, order='F'). copy
859f97e2ba209479b0e882946afdf235ccd9e648
Fix #1 Busy loop
pigv2/backends/glue.py
pigv2/backends/glue.py
import threading import ipaddr #Hub: takes one message from the input queue and replicates it across all output queues class hub(object): def __init__(self,input,output): #Input and output functions (usually q1.get and [q2.put,q3.put....]) self.input = input; self.output = output; self.x=threading.Thread(target=self.process) self.x.daemon=True self.x.start() def process(self): while True: data = self.input() for i in self.output: i(data) #Network range gate: takes an IP packet from the input queue and passes it to the output queue if and only if the IP source is within a list of dymanically changing networks. #Takes an input function, an output function and an update function (which returns a list of addresses, usually database.ip_network_table.ip_list) class network_range_gate(object): def __init__(self,input,output,update): self.input = input; self.output = output; self.addresses = [] self.db_semaphore = threading.Semaphore() self.passed = [] self.update_function = update self.x=threading.Thread(target=self.process) self.x.daemon=True self.x.start() self.y=threading.Thread(target=self.update_addresses) self.y.daemon=True self.y.start() # def debug_data(self): # print "Gating list", self.addresses # print "Recently passed", self.passed # self.passed = [] def process(self): while True: data = self.input() self.db_semaphore.acquire() try: for i in self.addresses: if i.Contains(data['source']): self.output(data) #self.passed.append(data['source']) break except: pass self.db_semaphore.release() def update_addresses(self): while True: self.db_semaphore.acquire() self.addresses = self.update_function() self.db_semaphore.release()
Python
0
@@ -23,16 +23,28 @@ t ipaddr +%0Aimport time %0A%0A#Hub: @@ -871,16 +871,37 @@ t,update +,update_frequency=0.1 ):%0A%09%09sel @@ -1058,16 +1058,59 @@ = update +%0A%09%09self.update_frequency = update_frequency %0A%0A%09%09self @@ -1743,16 +1743,58 @@ e True:%0A +%09%09%09time.sleep(0.1) # to avoid a busy loop%0A %09%09%09self. @@ -1894,10 +1894,11 @@ e()%0A%09%09%09%0A - %09%09 +%0A
ce73fe56375bef32a0997bdbe4ab305f232d605e
rename variable
daemon/rpcservice/systemservice.py
daemon/rpcservice/systemservice.py
import psutil import json from rpcservice.rpcservice import RPCService from decorator.serialize import json_decorate from decorator.singleton import singleton @singleton @json_decorate class SystemService(RPCService): def get_server_status(self): cpu_status = { "cpu": psutil.cpu_percent(), "memory": psutil.virtual_memory().percent, } json_obj = [] json_obj.append(cpu_status) return json_obj def get_server_version(self): pass
Python
0.000018
@@ -257,19 +257,22 @@ -cpu +system _status @@ -427,19 +427,22 @@ .append( -cpu +system _status)
3586c3634a6e7ab21a32fcd3c62c33d6786aaaaa
Support -Xuse-fir (#717)
src/main/starlark/rkt_1_6/kotlin/opts.bzl
src/main/starlark/rkt_1_6/kotlin/opts.bzl
def _map_optin_class_to_flag(values): return ["-opt-in=%s" % v for v in values] _KOPTS = { "warn": struct( args = dict( default = "report", doc = "Control warning behaviour.", values = ["off", "report", "error"], ), type = attr.string, value_to_flag = { "off": ["-nowarn"], "report": None, "error": ["-Werror"], }, ), "include_stdlibs": struct( args = dict( default = "all", doc = "Don't automatically include the Kotlin standard libraries into the classpath (stdlib and reflect).", values = ["all", "stdlib", "none"], ), type = attr.string, value_to_flag = { "all": None, "stdlib": ["-no-reflect"], "none": ["-no-stdlib"], }, ), "x_skip_prerelease_check": struct( args = dict( default = False, doc = "Suppress errors thrown when using pre-release classes.", ), type = attr.bool, value_to_flag = { True: ["-Xskip-prerelease-check"], }, ), "x_inline_classes": struct( args = dict( default = False, doc = "Enable experimental inline classes", ), type = attr.bool, value_to_flag = { True: ["-Xinline-classes"], }, ), "x_allow_result_return_type": struct( args = dict( default = False, doc = "Enable kotlin.Result as a return type", ), type = attr.bool, value_to_flag = { True: ["-Xallow-result-return-type"], }, ), "x_jvm_default": struct( args = dict( default = "off", doc = "Specifies that a JVM default method should be generated for non-abstract Kotlin interface member.", values = ["off", "enable", "disable", "compatibility", "all-compatibility", "all"], ), type = attr.string, value_to_flag = { "off": None, "enable": ["-Xjvm-default=enable"], "disable": ["-Xjvm-default=disable"], "compatibility": ["-Xjvm-default=compatibility"], "all-compatibility": ["-Xjvm-default=all-compatibility"], "all": ["-Xjvm-default=all"], }, ), "x_no_optimized_callable_references": struct( args = dict( default = False, doc = "Do not use optimized callable reference superclasses. Available from 1.4.", ), type = attr.bool, value_to_flag = { True: ["-Xno-optimized-callable-reference"], }, ), "x_explicit_api_mode": struct( args = dict( default = "off", doc = "Enable explicit API mode for Kotlin libraries.", values = ["off", "warning", "strict"], ), type = attr.string, value_to_flag = { "off": None, "warning": ["-Xexplicit-api=warning"], "strict": ["-Xexplicit-api=strict"], }, ), "java_parameters": struct( args = dict( default = False, doc = "Generate metadata for Java 1.8+ reflection on method parameters.", ), type = attr.bool, value_to_flag = { True: ["-java-parameters"], }, ), "x_multi_platform": struct( args = dict( default = False, doc = "Enable experimental language support for multi-platform projects", ), type = attr.bool, value_to_flag = { True: ["-Xmulti-platform"], }, ), "x_sam_conversions": struct( args = dict( default = "class", doc = "Change codegen behavior of SAM/functional interfaces", values = ["class", "indy"], ), type = attr.string, value_to_flag = { "class": ["-Xsam-conversions=class"], "indy": ["-Xsam-conversions=indy"], }, ), "x_lambdas": struct( args = dict( default = "class", doc = "Change codegen behavior of lambdas", values = ["class", "indy"], ), type = attr.string, value_to_flag = { "class": ["-Xlambdas=class"], "indy": ["-Xlambdas=indy"], }, ), "x_optin": struct( args = dict( default = [], doc = "Define APIs to opt-in to.", ), type = attr.string_list, value_to_flag = None, map_value_to_flag = _map_optin_class_to_flag, ), } KotlincOptions = provider( fields = { name: o.args["doc"] for name, o in _KOPTS.items() }, ) def _kotlinc_options_impl(ctx): return struct( providers = [ KotlincOptions(**{n: getattr(ctx.attr, n, None) for n in _KOPTS}), ], ) kt_kotlinc_options = rule( implementation = _kotlinc_options_impl, doc = "Define kotlin compiler options.", provides = [KotlincOptions], attrs = { n: o.type(**o.args) for n, o in _KOPTS.items() }, ) def kotlinc_options_to_flags(kotlinc_options): """Translate KotlincOptions to worker flags Args: kotlinc_options maybe containing KotlincOptions Returns: list of flags to add to the command line. """ if not kotlinc_options: return "" flags = [] for n, o in _KOPTS.items(): value = getattr(kotlinc_options, n, None) flag = o.value_to_flag.get(value, None) if o.value_to_flag else o.map_value_to_flag(value) if flag: flags.extend(flag) return flags
Python
0
@@ -4668,16 +4668,298 @@ %0A ),%0A + %22x_use_fir%22: struct(%0A args = dict(%0A default = False,%0A doc = %22Compile using the experimental Kotlin Front-end IR. Available from 1.6.%22,%0A ),%0A type = attr.bool,%0A value_to_flag = %7B%0A True: %5B%22-Xuse-fir%22%5D,%0A %7D,%0A ),%0A %7D%0A%0AKotli
6e8be0bf525d386cfd83ac1c0c3f66475e308234
fix id tag
examples/RooUnfoldExample.py
examples/RooUnfoldExample.py
# ============================================================================== # File and Version Information: # $Id: RooUnfoldExample.py 248 2010-10-04 22:18:19Z T.J.Adye $ # # Description: # Simple example usage of the RooUnfold package using toy MC. # # Author: Tim Adye <T.J.Adye@rl.ac.uk> # # ============================================================================== from ROOT import gRandom, TH1, TH1D, cout from ROOT import RooUnfoldResponse from ROOT import RooUnfold from ROOT import RooUnfoldBayes # from ROOT import RooUnfoldSvd # from ROOT import RooUnfoldTUnfold # ============================================================================== # Gaussian smearing, systematic translation, and variable inefficiency # ============================================================================== def smear(xt): xeff= 0.3 + (1.0-0.3)/20*(xt+10.0); # efficiency x= gRandom.Rndm(); if x>xeff: return None; xsmear= gRandom.Gaus(-2.5,0.2); # bias and smear return xt+xsmear; # ============================================================================== # Example Unfolding # ============================================================================== print "==================================== TRAIN ====================================" response= RooUnfoldResponse (40, -10.0, 10.0); # Train with a Breit-Wigner, mean 0.3 and width 2.5. for i in xrange(100000): xt= gRandom.BreitWigner (0.3, 2.5); x= smear (xt); if x!=None: response.Fill (x, xt); else: response.Miss (xt); print "==================================== TEST =====================================" hTrue= TH1D ("true", "Test Truth", 40, -10.0, 10.0); hMeas= TH1D ("meas", "Test Measured", 40, -10.0, 10.0); # Test with a Gaussian, mean 0 and width 2. for i in xrange(10000): xt= gRandom.Gaus (0.0, 2.0) x= smear (xt); hTrue.Fill(xt); if x!=None: hMeas.Fill(x); print "==================================== UNFOLD ===================================" unfold= RooUnfoldBayes (response, hMeas, 4); # OR # unfold= RooUnfoldSvd (response, hMeas, 20); # OR # unfold= RooUnfoldTUnfold (response, hMeas); hReco= unfold.Hreco(); unfold.PrintTable (cout, hTrue); hReco.Draw(); hMeas.Draw("SAME"); hTrue.SetLineColor(8); hTrue.Draw("SAME");
Python
0.000005
@@ -177,17 +177,16 @@ Adye $%0A# - %0A# Desc @@ -263,17 +263,16 @@ oy MC.%0A# - %0A# Auth @@ -305,17 +305,16 @@ ac.uk%3E%0A# - %0A# =====
61aea6972b9bc36e36bca9b7e971c798166c4dae
Revert "Support multiple Vagrant VM limit (comma separated)."
pyinfra/api/connectors/vagrant.py
pyinfra/api/connectors/vagrant.py
import json from os import path from threading import Thread from six.moves.queue import Queue from pyinfra import local, logger from pyinfra.api.exceptions import InventoryError from pyinfra.progress import progress_spinner VAGRANT_CONFIG = None VAGRANT_OPTIONS = None def _get_vagrant_ssh_config(queue, progress, target): logger.debug('Loading SSH config for {0}'.format(target)) queue.put(local.shell( 'vagrant ssh-config {0}'.format(target), splitlines=True, )) progress(target) def _get_vagrant_config(limit=None): if limit: limit = limit.split(',') # accept multiple comma separated VM names with progress_spinner({'vagrant status'}) as progress: output = local.shell( 'vagrant status --machine-readable', splitlines=True, ) progress('vagrant status') targets = [] for line in output: _, target, type_, data = line.split(',', 3) # Skip anything not in the limit if limit is not None and target not in limit: continue # For each running container - fetch it's SSH config in a thread - this # is because Vagrant *really* slow to run each command. if type_ == 'state' and data == 'running': targets.append(target) threads = [] config_queue = Queue() with progress_spinner(targets) as progress: for target in targets: thread = Thread( target=_get_vagrant_ssh_config, args=(config_queue, progress, target), ) threads.append(thread) thread.start() for thread in threads: thread.join() queue_items = list(config_queue.queue) lines = [] for output in queue_items: lines.extend(output) return lines def get_vagrant_config(limit=None): global VAGRANT_CONFIG if VAGRANT_CONFIG is None: logger.info('Getting vagrant config...') VAGRANT_CONFIG = _get_vagrant_config(limit=limit) return VAGRANT_CONFIG def get_vagrant_options(): global VAGRANT_OPTIONS if VAGRANT_OPTIONS is None: if path.exists('@vagrant.json'): with open('@vagrant.json', 'r') as f: VAGRANT_OPTIONS = json.loads(f.read()) else: VAGRANT_OPTIONS = {} return VAGRANT_OPTIONS def _make_name_data(host): vagrant_options = get_vagrant_options() vagrant_host = host['Host'] # Build data data = { 'ssh_hostname': host['HostName'], 'ssh_port': host['Port'], 'ssh_user': host['User'], 'ssh_key': host['IdentityFile'], } # Update any configured JSON data if vagrant_host in vagrant_options.get('data', {}): data.update(vagrant_options['data'][vagrant_host]) # Work out groups groups = vagrant_options.get('groups', {}).get(vagrant_host, []) if '@vagrant' not in groups: groups.append('@vagrant') return '@vagrant/{0}'.format(host['Host']), data, groups def make_names_data(limit=None): vagrant_ssh_info = get_vagrant_config(limit) logger.debug('Got Vagrant SSH info: \n{0}'.format(vagrant_ssh_info)) hosts = [] current_host = None for line in vagrant_ssh_info: # Vagrant outputs an empty line between each host if not line: if current_host: hosts.append(_make_name_data(current_host)) current_host = None continue key, value = line.split(' ', 1) if key == 'Host': if current_host: hosts.append(_make_name_data(current_host)) # Set the new host current_host = { key: value, } elif current_host: current_host[key] = value else: logger.debug('Extra Vagrant SSH key/value ({0}={1})'.format( key, value, )) if current_host: hosts.append(_make_name_data(current_host)) if not hosts: raise InventoryError('No running Vagrant instances found!') return hosts
Python
0
@@ -571,86 +571,65 @@ imit -:%0A limit = limit.split(',') # accept multiple comma separated VM names + and not isinstance(limit, list):%0A limit = %5Blimit%5D %0A%0A
4f8fe4c584f3dd4f5cc612c43534f6b2dc149a11
Fix a bug that caused the from_config to ignore the base_path when method is set to local
simplefsabstraction/interface.py
simplefsabstraction/interface.py
import uuid class SimpleFS: class BadExtensionError(Exception): def __init__(self): super().__init__('Extension not allowed') def exists(self, file_name): """ Check whether a file exists in the file system :param file_name: the name of the file :return: true if the file exists, false otherwise """ raise NotImplementedError def save(self, source_file, dest_name, randomize=False): """ Save a file to the file system :param source_file: the source file :param dest_name: the destination name :param randomize: use a random file name :return the generated filename """ raise NotImplementedError @staticmethod def _check_extension(filename, extensions): """ Check is a filename has an allowed extension :param filename: the filename :return: true if allowed extension, false otherwise """ return any(filename.endswith(".{}".format(ext)) for ext in extensions) @staticmethod def _random_filename(): """ Generate a random filename """ return str(uuid.uuid4()) @staticmethod def from_config(config): from simplefsabstraction import S3FS, LocalFS def s3_from_config(config): """ Create an instance of S3FS from the config """ if 'access_key' in config and 'secret_key' in config: credentials = {'access_key': config['access_key'], 'secret_key': config['secret_key']} else: credentials = None try: bucket_name = config['bucket_name'] except KeyError: raise Exception('Please specify the bucket name in the config') allowed_extensions = config['allowed_extensions'] if 'allowed_extensions' in config else None return S3FS(bucket_name, allowed_extensions=allowed_extensions, credentials=credentials) def local_from_config(config): """ Create an instance of LocalFS from the config """ allowed_extensions = config['allowed_extensions'] if 'allowed_extensions' in config else None return LocalFS(allowed_extensions=allowed_extensions) try: method = config['method'].lower() except KeyError: raise Exception('Please specify the key "method" in the config') if method == 's3': return s3_from_config(config) elif method == 'local': return local_from_config(config) else: raise Exception('Method "{}" not known'.format(method))
Python
0.000001
@@ -2287,32 +2287,193 @@ onfig else None%0A + if 'base_path' in config:%0A return LocalFS(allowed_extensions=allowed_extensions, base_path=config%5B'base_path'%5D)%0A else:%0A retu
b193a4035a0a77ba2555c41d977cf31975ac3b47
Disable destructive action challenge for codelab. (#1059)
pylib/spinnaker/codelab_config.py
pylib/spinnaker/codelab_config.py
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from spinnaker.yaml_util import YamlBindings def configure_codelab_igor_jenkins(): """Configures Igor to be enabled and to point to the codelab jenkins instance. """ YamlBindings.update_yml_source( '/opt/spinnaker/config/spinnaker-local.yml', { 'jenkins': { 'defaultMaster': { 'name': 'CodelabJenkins', 'baseUrl': 'http://localhost:9090', 'username': 'admin', 'password': 'admin' } }, 'igor': { 'enabled': 'true' } } ) if __name__ == '__main__': configure_codelab_igor_jenkins()
Python
0
@@ -1120,16 +1120,304 @@ %7D%0A )%0A%0A +def disable_destructive_action_challenge():%0A %22%22%22Disables destructive action challenge for codelab.%0A%0A %22%22%22%0A YamlBindings.update_yml_source(%0A '/opt/spinnaker/config/clouddriver.yml',%0A %7B%0A 'credentials': %7B%0A 'challengeDestructiveActionsEnvironments': ''%0A %7D%0A %7D%0A )%0A%0A if __nam @@ -1470,8 +1470,49 @@ nkins()%0A + disable_destructive_action_challenge()%0A
1926e8d45017ed81228d79d43a03d5ef821ce341
use pickle to serialize session
everyclass/server/utils/session.py
everyclass/server/utils/session.py
# https://github.com/SaintFlipper/EncryptedSession/blob/master/main.py import base64 import json import zlib from json import JSONDecoder, JSONEncoder from Crypto.Cipher import AES from flask.sessions import SessionInterface, SessionMixin from werkzeug.datastructures import CallbackDict class EncryptedSession(CallbackDict, SessionMixin): def __init__(self, initial=None): def on_update(self): self.modified = True CallbackDict.__init__(self, initial, on_update) self.modified = False class EncryptedSessionInterface(SessionInterface): session_class = EncryptedSession compress_threshold = 1024 session_cookie_name = "e_session" # use special cookie name to avoid historical compatibility issues def open_session(self, app, request): """ @param app: Flask app @param request: Flask HTTP Request @summary: Sets the current session from the request's session cooke. This overrides the default Flask implementation, adding AES decryption of the client-side session cookie. """ # Get the session cookie session_cookie = request.cookies.get(self.session_cookie_name) if not session_cookie: return self.session_class() # Get the crypto key crypto_key = app.config['SESSION_CRYPTO_KEY'] if 'SESSION_CRYPTO_KEY' in app.config else app.crypto_key # Split the session cookie : <z|u>.<base64 cipher text>.<base64 mac>.<base64 nonce> itup = session_cookie.split(".") if len(itup) is not 4: return self.session_class() # Session cookie not in the right format try: # Compressed data? if itup[0] == 'z': # session cookie for compressed data starts with "z." is_compressed = True else: is_compressed = False # Decode the cookie parts from base64 ciphertext = base64.b64decode(bytes(itup[1], 'utf-8')) mac = base64.b64decode(bytes(itup[2], 'utf-8')) nonce = base64.b64decode(bytes(itup[3], 'utf-8')) # Decrypt cipher = AES.new(crypto_key, AES.MODE_EAX, nonce) data = cipher.decrypt_and_verify(ciphertext, mac) # Convert back to a dict and pass that onto the session if is_compressed: data = zlib.decompress(data) session_dict = json.loads(str(data, 'utf-8'), cls=BinaryAwareJSONDecoder) return self.session_class(session_dict) except ValueError: return self.session_class() def save_session(self, app, session, response): """ @param app: Flask app @param session: Flask / Werkzeug Session @param response: Flask HTTP Response @summary: Saves the current session. This overrides the default Flask implementation, adding AES encryption of the client-side session cookie. """ domain = self.get_cookie_domain(app) if not session: if session.modified: response.delete_cookie(app.session_cookie_name, domain=domain) return expires = self.get_expiration_time(app, session) # Decide whether to compress bdict = bytes(json.dumps(dict(session), cls=BinaryAwareJSONEncoder), 'utf-8') if len(bdict) > self.compress_threshold: prefix = "z" # session cookie for compressed data starts with "z." bdict = zlib.compress(bdict) else: prefix = "u" # session cookie for uncompressed data starts with "u." # Get the crypto key crypto_key = app.config['SESSION_CRYPTO_KEY'] if 'SESSION_CRYPTO_KEY' in app.config else app.crypto_key # Encrypt using AES in EAX mode cipher = AES.new(crypto_key, AES.MODE_EAX) ciphertext, mac = cipher.encrypt_and_digest(bdict) nonce = cipher.nonce # Convert the ciphertext, mac, and nonce to base64 b64_ciphertext = base64.b64encode(ciphertext) b64_mac = base64.b64encode(mac) b64_nonce = base64.b64encode(nonce) # Create the session cookie as <u|z>.<base64 cipher text>.<base64 mac>.<base64 nonce> tup = [prefix, b64_ciphertext.decode(), b64_mac.decode(), b64_nonce.decode()] session_cookie = ".".join(tup) # Set the session cookie response.set_cookie(self.session_cookie_name, session_cookie, expires=expires, httponly=True, domain=domain) class BinaryAwareJSONEncoder(JSONEncoder): """ Converts a python object, where binary data is converted into an object that can be decoded using the BinaryAwareJSONDecoder. """ def default(self, obj): if isinstance(obj, bytes): return { '__type__': 'bytes', 'b' : base64.b64encode(obj).decode() } else: return JSONEncoder.default(self, obj) class BinaryAwareJSONDecoder(JSONDecoder): """ Converts a json string, where binary data was converted into objects form using the BinaryAwareJSONEncoder, back into a python object. """ def __init__(self): JSONDecoder.__init__(self, object_hook=self.dict_to_object) def dict_to_object(self, d): if '__type__' not in d: return d typ = d.pop('__type__') if typ == 'bytes': return base64.b64decode(bytes(d['b'], 'utf-8')) else: # Oops... better put this back together. d['__type__'] = typ return d
Python
0
@@ -85,20 +85,22 @@ %0Aimport -json +pickle %0Aimport @@ -2434,65 +2434,25 @@ t = -json +pickle .loads( -str(data, 'utf-8'), cls=BinaryAwareJSONDecoder +data )%0A%0A @@ -3229,24 +3229,26 @@ ress%0A + # bdict = byt @@ -3314,16 +3314,60 @@ utf-8')%0A + bdict = pickle.dumps(dict(session))%0A
555236f769c13518db70f5df36e5688d63486bd5
Remove -detect_leaks=0 from test/fuzz/test_runner.py - no longer needed
test/fuzz/test_runner.py
test/fuzz/test_runner.py
#!/usr/bin/env python3 # Copyright (c) 2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Run fuzz test targets. """ import argparse import configparser import os import sys import subprocess import logging # Fuzzers known to lack a seed corpus in https://github.com/bitcoin-core/qa-assets/tree/master/fuzz_seed_corpus FUZZERS_MISSING_CORPORA = [ "addr_info_deserialize", "asmap", "base_encode_decode", "block", "block_file_info_deserialize", "block_filter_deserialize", "block_header_and_short_txids_deserialize", "decode_tx", "fee_rate_deserialize", "flat_file_pos_deserialize", "hex", "integer", "key_origin_info_deserialize", "merkle_block_deserialize", "out_point_deserialize", "parse_hd_keypath", "parse_numbers", "parse_script", "parse_univalue", "partial_merkle_tree_deserialize", "partially_signed_transaction_deserialize", "prefilled_transaction_deserialize", "psbt_input_deserialize", "psbt_output_deserialize", "pub_key_deserialize", "script_deserialize", "strprintf", "sub_net_deserialize", "tx_in", "tx_in_deserialize", "tx_out", ] def main(): parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "-l", "--loglevel", dest="loglevel", default="INFO", help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console.", ) parser.add_argument( '--export_coverage', action='store_true', help='If true, export coverage information to files in the seed corpus', ) parser.add_argument( '--valgrind', action='store_true', help='If true, run fuzzing binaries under the valgrind memory error detector', ) parser.add_argument( 'seed_dir', help='The seed corpus to run on (must contain subfolders for each fuzz target).', ) parser.add_argument( 'target', nargs='*', help='The target(s) to run. Default is to run all targets.', ) args = parser.parse_args() # Set up logging logging.basicConfig( format='%(message)s', level=int(args.loglevel) if args.loglevel.isdigit() else args.loglevel.upper(), ) # Read config generated by configure. config = configparser.ConfigParser() configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini" config.read_file(open(configfile, encoding="utf8")) if not config["components"].getboolean("ENABLE_FUZZ"): logging.error("Must have fuzz targets built") sys.exit(1) # Build list of tests test_list_all = parse_test_list(makefile=os.path.join(config["environment"]["SRCDIR"], 'src', 'Makefile.test.include')) if not test_list_all: logging.error("No fuzz targets found") sys.exit(1) logging.info("Fuzz targets found: {}".format(test_list_all)) args.target = args.target or test_list_all # By default run all test_list_error = list(set(args.target).difference(set(test_list_all))) if test_list_error: logging.error("Unknown fuzz targets selected: {}".format(test_list_error)) test_list_selection = list(set(test_list_all).intersection(set(args.target))) if not test_list_selection: logging.error("No fuzz targets selected") logging.info("Fuzz targets selected: {}".format(test_list_selection)) try: help_output = subprocess.run( args=[ os.path.join(config["environment"]["BUILDDIR"], 'src', 'test', 'fuzz', test_list_selection[0]), '-help=1', ], timeout=10, check=True, stderr=subprocess.PIPE, universal_newlines=True, ).stderr if "libFuzzer" not in help_output: logging.error("Must be built with libFuzzer") sys.exit(1) except subprocess.TimeoutExpired: logging.error("subprocess timed out: Currently only libFuzzer is supported") sys.exit(1) run_once( corpus=args.seed_dir, test_list=test_list_selection, build_dir=config["environment"]["BUILDDIR"], export_coverage=args.export_coverage, use_valgrind=args.valgrind, ) def run_once(*, corpus, test_list, build_dir, export_coverage, use_valgrind): for t in test_list: corpus_path = os.path.join(corpus, t) if t in FUZZERS_MISSING_CORPORA: os.makedirs(corpus_path, exist_ok=True) args = [ os.path.join(build_dir, 'src', 'test', 'fuzz', t), '-runs=1', '-detect_leaks=0', corpus_path, ] if use_valgrind: args = ['valgrind', '--quiet', '--error-exitcode=1'] + args logging.debug('Run {} with args {}'.format(t, args)) result = subprocess.run(args, stderr=subprocess.PIPE, universal_newlines=True) output = result.stderr logging.debug('Output: {}'.format(output)) result.check_returncode() if not export_coverage: continue for l in output.splitlines(): if 'INITED' in l: with open(os.path.join(corpus, t + '_coverage'), 'w', encoding='utf-8') as cov_file: cov_file.write(l) break def parse_test_list(makefile): with open(makefile, encoding='utf-8') as makefile_test: test_list_all = [] read_targets = False for line in makefile_test.readlines(): line = line.strip().replace('test/fuzz/', '').replace(' \\', '') if read_targets: if not line: break test_list_all.append(line) continue if line == 'FUZZ_TARGETS =': read_targets = True return test_list_all if __name__ == '__main__': main()
Python
0.000001
@@ -4867,39 +4867,8 @@ 1',%0A - '-detect_leaks=0',%0A
c2c846b2e658e184e46a7e4d8375d4dcffbe04c3
edit doc strings
src/ezdxf/math/transformtools.py
src/ezdxf/math/transformtools.py
# Created: 02.05.2020 # Copyright (c) 2020, Manfred Moitzi # License: MIT License from typing import TYPE_CHECKING, Tuple import math from .matrix44 import Matrix44 from .vector import Vector, X_AXIS, Y_AXIS from .ucs import OCS if TYPE_CHECKING: from ezdxf.eztypes import DXFGraphic, Vertex class TransformError(Exception): pass class NonUniformScalingError(TransformError): pass def transform_thickness_and_extrusion_without_ocs(entity: 'DXFGraphic', m: Matrix44) -> None: if entity.dxf.hasattr('thickness'): thickness = m.transform_direction(entity.dxf.extrusion * entity.dxf.thickness) entity.dxf.thickness = thickness.magnitude entity.dxf.extrusion = thickness.normalize() elif entity.dxf.hasattr('extrusion'): # without thickness? extrusion = m.transform_direction(entity.dxf.extrusion) entity.dxf.extrusion = extrusion.normalize() def transform_extrusion(extrusion: 'Vertex', m: Matrix44) -> Tuple[Vector, bool]: """ Transforms the old `extrusion` vector into a new extrusion vector. Returns the new extrusion vector and a boolean value: ``True`` if the new OCS established by the new extrusion vector has a uniform scaled xy-plane, else ``False``. The new extrusion vector is perpendicular to plane defined by the transformed x- and y-axis. Args: extrusion: extrusion vector of the old OCS m: transformation matrix Returns: """ ocs = OCS(extrusion) ocs_x_axis_in_wcs = ocs.to_wcs(X_AXIS) ocs_y_axis_in_wcs = ocs.to_wcs(Y_AXIS) x_axis, y_axis = m.transform_directions((ocs_x_axis_in_wcs, ocs_y_axis_in_wcs)) # Not sure if this is the correct test for a uniform scaled xy-plane is_uniform = math.isclose(x_axis.magnitude_square, y_axis.magnitude_square, abs_tol=1e-9) new_extrusion = x_axis.cross(y_axis).normalize() return new_extrusion, is_uniform class OCSTransform: def __init__(self, extrusion: Vector, m: Matrix44): self.m = m self.old_extrusion = extrusion self.old_ocs = OCS(extrusion) self.new_extrusion, self.scale_uniform = transform_extrusion(extrusion, m) self.new_ocs = OCS(self.new_extrusion) def transform_length(self, length: 'Vertex') -> float: """ Returns length of transformed `length` vector. Args: length: length vector in old OCS """ return self.m.transform_direction(self.old_ocs.to_wcs(length)).magnitude transform_scale_factor = transform_length def transform_vertex(self, vertex: 'Vertex'): """ Returns vertex transformed from old OCS into new OCS by transformation matrix `m`. """ return self.new_ocs.from_wcs(self.m.transform(self.old_ocs.to_wcs(vertex))) def transform_direction(self, direction: 'Vertex'): """ Returns direction transformed from old OCS into new OCS by transformation matrix `m`. """ return self.new_ocs.from_wcs(self.m.transform_direction(self.old_ocs.to_wcs(direction))) def transform_angle(self, angle: float) -> float: """ Returns new angle in radians. Transform old `angle` from old OCS to a WCS vector, transforms this WCS vector by transformation matrix `m` and calculates the angle in the OCS established by the new `extrusion` vector between to the new OCS x-axis and the transformed angle vector. Args: angle: old angle in radians """ new_angle_vec = self.m.transform_direction(self.old_ocs.to_wcs(Vector.from_angle(angle))) return self.new_extrusion.angle_about(X_AXIS, new_angle_vec) def transform_deg_angle(self, angle: float) -> float: return math.degrees(self.transform_angle(math.radians(angle)))
Python
0.000001
@@ -2292,107 +2292,85 @@ rns -length of transformed %60length%60 vector.%0A%0A Args:%0A length: length vector in old OCS%0A +magnitude of %60length%60 direction vector transformed form old OCS into new OCS. %0A @@ -2589,34 +2589,34 @@ ex transformed f -r o +r m old OCS into n @@ -2625,37 +2625,8 @@ OCS - by transformation matrix %60m%60 .%0A @@ -2847,37 +2847,8 @@ OCS - by transformation matrix %60m%60 .%0A @@ -3033,18 +3033,15 @@ rns -new angle +( in r @@ -3050,340 +3050,48 @@ ians -.%0A%0A Transform old %60angle%60 from old OCS to a WCS vector, transforms this WCS vector by transformation matrix %60m%60 and%0A calculates the angle in the OCS established by the new %60extrusion%60 vector between to the new OCS x-axis and the%0A transformed angle vector.%0A%0A Args:%0A angle: old angle in radians%0A +) from old OCS transformed into new OCS. %0A @@ -3267,16 +3267,16 @@ e_vec)%0A%0A - def @@ -3317,32 +3317,122 @@ loat) -%3E float:%0A + %22%22%22 Returns angle (in degrees) from old OCS transformed into new OCS.%0A %22%22%22%0A return m