commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
7c683cbb4a9a15e15bc522c90e018c1fb5707418
Fix "upload" command garbling and truncating files on Windows. If it's a binary file, use 'rb'!
command/upload.py
command/upload.py
"""distutils.command.upload Implements the Distutils 'upload' subcommand (upload package to PyPI).""" from distutils.errors import * from distutils.core import Command from distutils.spawn import spawn from distutils import log from md5 import md5 import os import socket import platform import ConfigParser import httplib import base64 import urlparse import cStringIO as StringIO class upload(Command): description = "upload binary package to PyPI" DEFAULT_REPOSITORY = 'http://www.python.org/pypi' user_options = [ ('repository=', 'r', "url of repository [default: %s]" % DEFAULT_REPOSITORY), ('show-response', None, 'display full response text from server'), ('sign', 's', 'sign files to upload using gpg'), ] boolean_options = ['show-response', 'sign'] def initialize_options(self): self.username = '' self.password = '' self.repository = '' self.show_response = 0 self.sign = False def finalize_options(self): if os.environ.has_key('HOME'): rc = os.path.join(os.environ['HOME'], '.pypirc') if os.path.exists(rc): self.announce('Using PyPI login from %s' % rc) config = ConfigParser.ConfigParser({ 'username':'', 'password':'', 'repository':''}) config.read(rc) if not self.repository: self.repository = config.get('server-login', 'repository') if not self.username: self.username = config.get('server-login', 'username') if not self.password: self.password = config.get('server-login', 'password') if not self.repository: self.repository = self.DEFAULT_REPOSITORY def run(self): if not self.distribution.dist_files: raise DistutilsOptionError("No dist file created in earlier command") for command, pyversion, filename in self.distribution.dist_files: self.upload_file(command, pyversion, filename) def upload_file(self, command, pyversion, filename): # Sign if requested if self.sign: spawn(("gpg", "--detach-sign", "-a", filename), dry_run=self.dry_run) # Fill in the data content = open(filename).read() data = { ':action':'file_upload', 'protcol_version':'1', 'name':self.distribution.get_name(), 'version':self.distribution.get_version(), 'content':(os.path.basename(filename),content), 'filetype':command, 'pyversion':pyversion, 'md5_digest':md5(content).hexdigest(), } comment = '' if command == 'bdist_rpm': dist, version, id = platform.dist() if dist: comment = 'built for %s %s' % (dist, version) elif command == 'bdist_dumb': comment = 'built for %s' % platform.platform(terse=1) data['comment'] = comment if self.sign: data['gpg_signature'] = (os.path.basename(filename) + ".asc", open(filename+".asc").read()) # set up the authentication auth = "Basic " + base64.encodestring(self.username + ":" + self.password).strip() # Build up the MIME payload for the POST data boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' sep_boundary = '\n--' + boundary end_boundary = sep_boundary + '--' body = StringIO.StringIO() for key, value in data.items(): # handle multiple entries for the same name if type(value) != type([]): value = [value] for value in value: if type(value) is tuple: fn = ';filename="%s"' % value[0] value = value[1] else: fn = "" value = str(value) body.write(sep_boundary) body.write('\nContent-Disposition: form-data; name="%s"'%key) body.write(fn) body.write("\n\n") body.write(value) if value and value[-1] == '\r': body.write('\n') # write an extra newline (lurve Macs) body.write(end_boundary) body.write("\n") body = body.getvalue() self.announce("Submitting %s to %s" % (filename, self.repository), log.INFO) # build the Request # We can't use urllib2 since we need to send the Basic # auth right with the first request schema, netloc, url, params, query, fragments = \ urlparse.urlparse(self.repository) assert not params and not query and not fragments if schema == 'http': http = httplib.HTTPConnection(netloc) elif schema == 'https': http = httplib.HTTPSConnection(netloc) else: raise AssertionError, "unsupported schema "+schema data = '' loglevel = log.INFO try: http.connect() http.putrequest("POST", url) http.putheader('Content-type', 'multipart/form-data; boundary=%s'%boundary) http.putheader('Content-length', str(len(body))) http.putheader('Authorization', auth) http.endheaders() http.send(body) except socket.error, e: self.announce(e.msg, log.ERROR) return r = http.getresponse() if r.status == 200: self.announce('Server response (%s): %s' % (r.status, r.reason), log.INFO) else: self.announce('Upload failed (%s): %s' % (r.status, r.reason), log.ERROR) if self.show_response: print '-'*75, r.read(), '-'*75
Python
0
@@ -2421,16 +2421,21 @@ filename +,'rb' ).read()
c1682bf0f795d74d376fb22364b691ae7db97995
Fix python test to work on Windows
src/com/facebook/buck/parser/buck_test.py
src/com/facebook/buck/parser/buck_test.py
from buck import glob_internal, LazyBuildEnvPartial from pathlib import Path, PurePosixPath import os import shutil import sys import tempfile import unittest class FakePath(PurePosixPath): def glob(self, pattern): return self.glob_results.get(pattern) def is_file(self): return True def fake_path(path, glob_results={}): # Path does magic in __new__ with its args; it's hard to add more without # changing that class. So we use a wrapper function to diddle with # FakePath's members. result = FakePath(path) result.glob_results = {} for pattern, paths in glob_results.iteritems(): result.glob_results[pattern] = [result / FakePath(p) for p in paths] return result def split_path(path): """Splits /foo/bar/baz.java into ['', 'foo', 'bar', 'baz.java'].""" return path.split('/') class TestBuck(unittest.TestCase): def test_split_path(self): self.assertEqual( ['', 'foo', 'bar', 'baz.java'], split_path('/foo/bar/baz.java')) self.assertEqual( ['foo', 'bar', 'baz.java'], split_path('foo/bar/baz.java')) self.assertEqual(['', 'foo', 'bar', ''], split_path('/foo/bar/')) def test_glob_includes_simple(self): search_base = fake_path( 'foo', glob_results={'*.java': ['A.java', 'B.java']}) self.assertEqual( ['A.java', 'B.java'], glob_internal( includes=['*.java'], excludes=[], include_dotfiles=False, search_base=search_base)) def test_glob_includes_sort(self): search_base = fake_path( 'foo', glob_results={'*.java': ['A.java', 'E.java', 'D.java', 'C.java', 'B.java']}) self.assertEqual( ['A.java', 'B.java', 'C.java', 'D.java', 'E.java'], glob_internal( includes=['*.java'], excludes=[], include_dotfiles=False, search_base=search_base)) def test_glob_includes_multi(self): search_base = fake_path( 'foo', glob_results={ 'bar/*.java': ['bar/A.java', 'bar/B.java'], 'baz/*.java': ['baz/C.java', 'baz/D.java'], }) self.assertEqual( ['bar/A.java', 'bar/B.java', 'baz/C.java', 'baz/D.java'], glob_internal( includes=['bar/*.java', 'baz/*.java'], excludes=[], include_dotfiles=False, search_base=search_base)) def test_glob_excludes_double_star(self): search_base = fake_path( 'foo', glob_results={ '**/*.java': ['A.java', 'B.java', 'Test.java'], }) self.assertEqual( ['A.java', 'B.java'], glob_internal( includes=['**/*.java'], excludes=['**/*Test.java'], include_dotfiles=False, search_base=search_base)) def test_glob_excludes_multi(self): search_base = fake_path( 'foo', glob_results={ 'bar/*.java': ['bar/A.java', 'bar/B.java'], 'baz/*.java': ['baz/C.java', 'baz/D.java'], }) self.assertEqual( ['bar/B.java', 'baz/D.java'], glob_internal( includes=['bar/*.java', 'baz/*.java'], excludes=['*/[AC].java'], include_dotfiles=False, search_base=search_base)) def test_glob_excludes_relative(self): search_base = fake_path( 'foo', glob_results={ '**/*.java': ['foo/A.java', 'foo/bar/B.java', 'bar/C.java'], }) self.assertEqual( ['foo/A.java', 'foo/bar/B.java'], glob_internal( includes=['**/*.java'], excludes=['bar/*.java'], include_dotfiles=False, search_base=search_base)) def test_glob_includes_skips_dotfiles(self): search_base = fake_path( 'foo', glob_results={'*.java': ['A.java', '.B.java']}) self.assertEqual( ['A.java'], glob_internal( includes=['*.java'], excludes=[], include_dotfiles=False, search_base=search_base)) def test_glob_includes_does_not_skip_dotfiles_if_include_dotfiles(self): search_base = fake_path( 'foo', glob_results={'*.java': ['A.java', '.B.java']}) self.assertEqual( ['.B.java', 'A.java'], glob_internal( includes=['*.java'], excludes=[], include_dotfiles=True, search_base=search_base)) def test_glob_double_star_integration(self): d = tempfile.mkdtemp() try: subdir = os.path.join(d, 'b', 'a', 'c', 'a') os.makedirs(subdir) f = open(os.path.join(subdir, 'A.java'), 'w') f.close() f = open(os.path.join(subdir, 'B.java'), 'w') f.close() f = open(os.path.join(subdir, 'Test.java'), 'w') f.close() f = open(os.path.join(subdir, '.tmp.java'), 'w') f.close() os.makedirs(os.path.join(subdir, 'NotAFile.java')) self.assertEqual( ['b/a/c/a/A.java', 'b/a/c/a/B.java'], glob_internal( includes=['b/a/**/*.java'], excludes=['**/*Test.java'], include_dotfiles=False, search_base=Path(d))) finally: shutil.rmtree(d) def test_lazy_build_env_partial(self): def cobol_binary( name, deps=[], build_env=None): return (name, deps, build_env) testLazy = LazyBuildEnvPartial(cobol_binary) testLazy.build_env = {} self.assertEqual( ('HAL', [1, 2, 3], {}), testLazy.invoke(name='HAL', deps=[1, 2, 3])) testLazy.build_env = {'abc': 789} self.assertEqual( ('HAL', [1, 2, 3], {'abc': 789}), testLazy.invoke(name='HAL', deps=[1, 2, 3])) if __name__ == '__main__': unittest.main()
Python
0.000002
@@ -5490,42 +5490,153 @@ %5B -'b/a/c/a/A.java', 'b/a/c/a/B.java' +%0A os.path.join('b', 'a', 'c', 'a', 'A.java'),%0A os.path.join('b', 'a', 'c', 'a', 'B.java'),%0A %5D,%0A
24b86c78a6420006eabf6c27535f946edc612385
Handle no tags in repository better
git_gutter_compare.py
git_gutter_compare.py
import sublime import sublime_plugin ST3 = int(sublime.version()) >= 3000 if ST3: from GitGutter.view_collection import ViewCollection else: from view_collection import ViewCollection class GitGutterCompareCommit(sublime_plugin.WindowCommand): def run(self): self.view = self.window.active_view() key = ViewCollection.get_key(self.view) self.handler = ViewCollection.views[key] self.results = self.commit_list() self.window.show_quick_panel(self.results, self.on_select) def commit_list(self): result = self.handler.git_commits().decode("utf-8") return [r.split('\a', 2) for r in result.strip().split('\n')] def item_to_commit(self, item): return item[1].split(' ')[0] def on_select(self, selected): if 0 > selected < len(self.results): return item = self.results[selected] commit = self.item_to_commit(item) ViewCollection.set_compare(commit) ViewCollection.clear_git_time(self.view) ViewCollection.add(self.view) class GitGutterCompareBranch(GitGutterCompareCommit): def commit_list(self): result = self.handler.git_branches().decode("utf-8") return [self.parse_result(r) for r in result.strip().split('\n')] def parse_result(self, result): pieces = result.split('\a') message = pieces[0] branch = pieces[1].split("/")[2] commit = pieces[2][0:7] return [branch, commit + " " + message] class GitGutterCompareTag(GitGutterCompareCommit): def commit_list(self): result = self.handler.git_tags().decode("utf-8") return [self.parse_result(r) for r in result.strip().split('\n')] def parse_result(self, result): if not result: sublime.message_dialog("No tags found in repository") return pieces = result.split(' ') commit = pieces[0] tag = pieces[1].replace("refs/tags/", "") return [tag, commit] def item_to_commit(self, item): return item[1] class GitGutterCompareHead(sublime_plugin.WindowCommand): def run(self): self.view = self.window.active_view() ViewCollection.set_compare("HEAD") ViewCollection.clear_git_time(self.view) ViewCollection.add(self.view) class GitGutterShowCompare(sublime_plugin.WindowCommand): def run(self): comparing = ViewCollection.get_compare() sublime.message_dialog("GitGutter is comparing against: " + comparing)
Python
0
@@ -452,16 +452,45 @@ _list()%0A + if self.results:%0A @@ -1660,32 +1660,55 @@ decode(%22utf-8%22)%0A + if result:%0A return %5B @@ -1769,66 +1769,20 @@ ')%5D%0A -%0A -def parse_result(self, result):%0A if not result + else :%0A @@ -1845,34 +1845,52 @@ itory%22)%0A - +%0A - return +def parse_result(self, result): %0A
0e627888f214947b8e021542cb3f2fc0fb07e087
correct testing
testing/test_sct_segment_graymatter.py
testing/test_sct_segment_graymatter.py
#!/usr/bin/env python ######################################################################################### # # Test function sct_segment_graymatter # # --------------------------------------------------------------------------------------- # Copyright (c) 2014 Polytechnique Montreal <www.neuro.polymtl.ca> # Author: Sara Dupont # modified: 2015/08/31 # # About the license: see the file LICENSE.TXT ######################################################################################### # import commands # import sys import os from pandas import DataFrame import sct_segment_graymatter # from msct_image import Image import sct_utils as sct from numpy import sum, mean # import time from sct_warp_template import get_file_label # append path that contains scripts, to be able to load modules # status, path_sct = commands.getstatusoutput('echo $SCT_DIR') # sys.path.append(path_sct + '/scripts') def test(path_data, parameters=''): if not parameters: # get file name of vertebral labeling from template # file_vertfile = get_file_label(path_data+'mt/label/template', 'vertebral', output='file') parameters = '-i t2s/t2s.nii.gz -s t2s/t2s_seg.nii.gz -vertfile t2s/MNI-Poly-AMU_level_crop.nii.gz -ref t2s/t2s_gmseg_manual.nii.gz -qc 0' parser = sct_segment_graymatter.get_parser() dict_param = parser.parse(parameters.split(), check_file_exist=False) dict_param_with_path = parser.add_path_to_file(dict_param, path_data, input_file=True) #if -model is used : do not add the path before. if '-model' in dict_param_with_path.keys(): dict_param_with_path['-model'] = dict_param_with_path['-model'][len(path_data):] param_with_path = parser.dictionary_to_string(dict_param_with_path) # Check if input files exist if not (os.path.isfile(dict_param_with_path['-i']) and os.path.isfile(dict_param_with_path['-s']) and os.path.isfile(dict_param_with_path['-vertfile'])): # and os.path.isfile(dict_param_with_path['-ref'])): status = 200 output = 'ERROR: the file(s) provided to test function do not exist in folder: ' + path_data return status, output, DataFrame(data={'status': status, 'output': output, 'dice_gm': float('nan'), 'dice_wm': float('nan'), 'hausdorff': float('nan'), 'med_dist': float('nan'), 'duration_[s]': float('nan')}, index=[path_data]) import time, random subject_folder = path_data.split('/') if subject_folder[-1] == '' and len(subject_folder) > 1: subject_folder = subject_folder[-2] else: subject_folder = subject_folder[-1] path_output = sct.slash_at_the_end('sct_segment_graymatter_' + subject_folder + '_' + time.strftime("%y%m%d%H%M%S") + '_'+str(random.randint(1, 1000000)), slash=1) param_with_path += ' -ofolder ' + path_output cmd = 'sct_segment_graymatter ' + param_with_path time_start = time.time() status, output = sct.run(cmd, 0) duration = time.time() - time_start # initialization of results: must be NaN if test fails result_dice_gm, result_dice_wm, result_hausdorff, result_median_dist = float('nan'), float('nan'), float('nan'), float('nan') if status == 0 and "-ref" in dict_param_with_path.keys() : target_name = sct.extract_fname(dict_param_with_path["-i"])[1] dice_fname = path_output+'dice_coefficient_'+target_name+'.txt' hausdorff_fname = path_output+'hausdorff_dist_'+target_name+'.txt' # Extracting dice results: dice = open(dice_fname, 'r') dice_lines = dice.readlines() dice.close() gm_start = dice_lines.index('Dice coefficient on the Gray Matter segmentation:\n') wm_start = dice_lines.index('Dice coefficient on the White Matter segmentation:\n') # extracting dice on GM gm_dice_lines = dice_lines[gm_start:wm_start-1] gm_dice_lines = gm_dice_lines[gm_dice_lines.index('2D Dice coefficient by slice:\n')+1:-1] null_slices = [] gm_dice = [] for line in gm_dice_lines: n_slice, dc = line.split(' ') # remove \n from dice result dc = dc[:-1] dc = dc[:-4] if '[0m' in dc else dc if dc == '0' or dc == 'nan': null_slices.append(n_slice) else: try: gm_dice.append(float(dc)) except ValueError: gm_dice.append(float(dc[:-4])) result_dice_gm = mean(gm_dice) # extracting dice on WM wm_dice_lines = dice_lines[wm_start:] wm_dice_lines = wm_dice_lines[wm_dice_lines.index('2D Dice coefficient by slice:\n')+1:] wm_dice = [] for line in wm_dice_lines: n_slice, dc = line.split(' ') # remove \n from dice result if line is not wm_dice_lines[-1]: dc = dc[:-1] if n_slice not in null_slices: try: wm_dice.append(float(dc)) except ValueError: wm_dice.append(float(dc[:-4])) result_dice_wm = mean(wm_dice) # Extracting hausdorff distance results hd = open(hausdorff_fname, 'r') hd_lines = hd.readlines() hd.close() # remove title of columns and last empty/non important lines hd_lines = hd_lines[1:-4] hausdorff = [] max_med = [] for line in hd_lines: slice_id, res = line.split(':') slice, n_slice = slice_id.split(' ') if n_slice not in null_slices: hd, med1, med2 = res[:-1].split(' - ') hd, med1, med2 = float(hd), float(med1), float(med2) hausdorff.append(hd) max_med.append(max(med1, med2)) result_hausdorff = mean(hausdorff) result_median_dist = mean(max_med) # Integrity check hd_threshold = 3 # in mm wm_dice_threshold = 0.8 if result_hausdorff > hd_threshold or result_dice_wm < wm_dice_threshold: status = 99 output += '\nResulting segmentation is too different from manual segmentation:\n' \ 'WM dice: '+str(result_dice_wm)+'\n' \ 'Hausdorff distance: '+str(result_hausdorff)+'\n' # transform results into Pandas structure results = DataFrame(data={'status': status, 'output': output, 'dice_gm': result_dice_gm, 'dice_wm': result_dice_wm, 'hausdorff': result_hausdorff, 'med_dist': result_median_dist, 'duration_[s]': duration}, index=[path_data]) return status, output, results if __name__ == "__main__": # call main function test(path_sct+'/data')
Python
0
@@ -1193,16 +1193,29 @@ ertfile +'+path_data+' t2s/MNI-
b5ea2179f9c89badf50fdd51217d4e0f70a5cb88
Remove unnecessary try/except functools.update_wrapper import.
django_extensions/admin/__init__.py
django_extensions/admin/__init__.py
# coding=utf-8 # # Autocomplete feature for admin panel # import six import operator from six.moves import reduce import django from django.http import HttpResponse, HttpResponseNotFound from django.conf import settings from django.db import models from django.db.models.query import QuerySet from django.utils.encoding import smart_str from django.utils.translation import ugettext as _ from django.utils.text import get_text_list from django.contrib.admin import ModelAdmin from django_extensions.admin.widgets import ForeignKeySearchInput from django_extensions.compat import get_model_compat try: from functools import update_wrapper assert update_wrapper except ImportError: from django.utils.functional import update_wrapper class ForeignKeyAutocompleteAdmin(ModelAdmin): """Admin class for models using the autocomplete feature. There are two additional fields: - related_search_fields: defines fields of managed model that have to be represented by autocomplete input, together with a list of target model fields that are searched for input string, e.g.: related_search_fields = { 'author': ('first_name', 'email'), } - related_string_functions: contains optional functions which take target model instance as only argument and return string representation. By default __unicode__() method of target object is used. And also an optional additional field to set the limit on the results returned by the autocomplete query. You can set this integer value in your settings file using FOREIGNKEY_AUTOCOMPLETE_LIMIT or you can set this per ForeignKeyAutocompleteAdmin basis. If any value is set the results will not be limited. """ related_search_fields = {} related_string_functions = {} autocomplete_limit = getattr(settings, 'FOREIGNKEY_AUTOCOMPLETE_LIMIT', None) def get_urls(self): from django.conf.urls import url def wrap(view): def wrapper(*args, **kwargs): return self.admin_site.admin_view(view)(*args, **kwargs) return update_wrapper(wrapper, view) # model._meta.module_name is deprecated in django version 1.7 and removed in django version 1.8. # It is replaced by model._meta.model_name if django.VERSION < (1, 7): info = self.model._meta.app_label, self.model._meta.module_name else: info = self.model._meta.app_label, self.model._meta.model_name _url = url(r'foreignkey_autocomplete/$', wrap(self.foreignkey_autocomplete), name='%s_%s_autocomplete' % info) # django.conf.urls.patterns is deprecated in django version 1.9 and removed in django version 1.10. # It is replaced by a simple Python list if django.VERSION < (1, 9): from django.conf.urls import patterns urlpatterns = patterns('', _url) else: urlpatterns = [_url] urlpatterns += super(ForeignKeyAutocompleteAdmin, self).get_urls() return urlpatterns def foreignkey_autocomplete(self, request): """ Searches in the fields of the given related model and returns the result as a simple string to be used by the jQuery Autocomplete plugin """ query = request.GET.get('q', None) app_label = request.GET.get('app_label', None) model_name = request.GET.get('model_name', None) search_fields = request.GET.get('search_fields', None) object_pk = request.GET.get('object_pk', None) try: to_string_function = self.related_string_functions[model_name] except KeyError: if six.PY3: to_string_function = lambda x: x.__str__() else: to_string_function = lambda x: x.__unicode__() if search_fields and app_label and model_name and (query or object_pk): def construct_search(field_name): # use different lookup methods depending on the notation if field_name.startswith('^'): return "%s__istartswith" % field_name[1:] elif field_name.startswith('='): return "%s__iexact" % field_name[1:] elif field_name.startswith('@'): return "%s__search" % field_name[1:] else: return "%s__icontains" % field_name # As of Django 1.7 the 'get_model' method was moved to 'apps' model = get_model_compat(app_label, model_name) queryset = model._default_manager.all() data = '' if query: for bit in query.split(): or_queries = [models.Q(**{construct_search(smart_str(field_name)): smart_str(bit)}) for field_name in search_fields.split(',')] other_qs = QuerySet(model) other_qs.query.select_related = queryset.query.select_related other_qs = other_qs.filter(reduce(operator.or_, or_queries)) queryset = queryset & other_qs additional_filter = self.get_related_filter(model, request) if additional_filter: queryset = queryset.filter(additional_filter) if self.autocomplete_limit: queryset = queryset[:self.autocomplete_limit] data = ''.join([six.u('%s|%s\n') % (to_string_function(f), f.pk) for f in queryset]) elif object_pk: try: obj = queryset.get(pk=object_pk) except: pass else: data = to_string_function(obj) return HttpResponse(data) return HttpResponseNotFound() def get_related_filter(self, model, request): """Given a model class and current request return an optional Q object that should be applied as an additional filter for autocomplete query. If no additional filtering is needed, this method should return None.""" def get_help_text(self, field_name, model_name): searchable_fields = self.related_search_fields.get(field_name, None) if searchable_fields: help_kwargs = { 'model_name': model_name, 'field_list': get_text_list(searchable_fields, _('and')), } return _('Use the left field to do %(model_name)s lookups in the fields %(field_list)s.') % help_kwargs return '' def formfield_for_dbfield(self, db_field, **kwargs): """ Overrides the default widget for Foreignkey fields if they are specified in the related_search_fields class attribute. """ if isinstance(db_field, models.ForeignKey) and db_field.name in self.related_search_fields: model_name = db_field.rel.to._meta.object_name help_text = self.get_help_text(db_field.name, model_name) if kwargs.get('help_text'): help_text = six.u('%s %s' % (kwargs['help_text'], help_text)) kwargs['widget'] = ForeignKeySearchInput(db_field.rel, self.related_search_fields[db_field.name]) kwargs['help_text'] = help_text return super(ForeignKeyAutocompleteAdmin, self).formfield_for_dbfield(db_field, **kwargs)
Python
0
@@ -78,16 +78,53 @@ perator%0A +from functools import update_wrapper%0A from six @@ -633,156 +633,8 @@ at%0A%0A -try:%0A from functools import update_wrapper%0A assert update_wrapper%0Aexcept ImportError:%0A from django.utils.functional import update_wrapper%0A%0A %0Acla
920872db456987e5bd5002b3bf3fc2168dcbdff4
fix name
django_extra_tools/conf/defaults.py
django_extra_tools/conf/defaults.py
"""Default configuration""" # auth.backends.SuperUserAuthenticateMixin username separator AUTH_BACKEND_USERNAME_SEPARATOR = ':' XHR_MIDDLEWARE_ALLOWED_ORIGINS = '*' XHR_MIDDLEWARE_ALLOWED_METHODS = ['POST', 'GET', 'OPTIONS', 'PUT', 'DELETE'] XHR_MIDDLEWARE_ALLOWED_HEADERS = ['Content-Type', 'Authorization', 'Location', '*'] XHR_MIDDLEWARE_ALLOWED_CREDENTIALS = 'true' XHR_MIDDLEWARE_EXPOSE_HEADERS = ['Location'] PRIVATE_IPS_PREFIX = ('10.', '172.', '192.', )
Python
0.019891
@@ -42,34 +42,36 @@ nds. +Through Super -UserAuthenticateMixin +userModelBackend use
a4a01c466c916f5c4ff44d40bc5e052e98951f1d
Bump version
sqlitebiter/__version__.py
sqlitebiter/__version__.py
__author__ = "Tsuyoshi Hombashi" __copyright__ = "Copyright 2016, {}".format(__author__) __license__ = "MIT License" __version__ = "0.29.0" __maintainer__ = __author__ __email__ = "tsuyoshi.hombashi@gmail.com"
Python
0
@@ -130,17 +130,17 @@ = %220.29. -0 +1 %22%0A__main
8ac2103bdef4bc3fe0c81dbbc0015ebad1a1c624
fix syntax
doc/examples/edges/plot_skeleton.py
doc/examples/edges/plot_skeleton.py
""" =========== Skeletonize =========== Skeletonization reduces binary objects to 1 pixel wide representations. This can be useful for feature extraction, and/or representing an object's topology. ``skeletonize`` works by making successive passes of the image. On each pass, border pixels are identified and removed on the condition that they do not break the connectivity of the corresponding object. """ from skimage.morphology import skeletonize from skimage import draw import numpy as np import matplotlib.pyplot as plt # an empty image image = np.zeros((400, 400)) # foreground object 1 image[10:-10, 10:100] = 1 image[-100:-10, 10:-10] = 1 image[10:-10, -100:-10] = 1 # foreground object 2 rs, cs = draw.line(250, 150, 10, 280) for i in range(10): image[rs + i, cs] = 1 rs, cs = draw.line(10, 150, 250, 280) for i in range(20): image[rs + i, cs] = 1 # foreground object 3 ir, ic = np.indices(image.shape) circle1 = (ic - 135)**2 + (ir - 150)**2 < 30**2 circle2 = (ic - 135)**2 + (ir - 150)**2 < 20**2 image[circle1] = 1 image[circle2] = 0 # perform skeletonization skeleton = skeletonize(image) # display results fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 4), sharex=True, sharey=True, subplot_kw={'adjustable': 'box-forced'}) ax = axes.ravel() ax[0].imshow(image, cmap=plt.cm.gray) ax[0].axis('off') ax[0].set_title('original', fontsize=20) ax[1].imshow(skeleton, cmap=plt.cm.gray) ax[1].axis('off') ax[1].set_title('skeleton', fontsize=20) fig.tight_layout() plt.show() ###################################################################### # **skeletonize vs skeletonize 3d** # # ``skeletonize`` [Zha84]_ works by making successive passes of # the image, removing pixels on object borders. This continues until no # more pixels can be removed. The image is correlated with a # mask that assigns each pixel a number in the range [0...255] # corresponding to each possible pattern of its 8 neighbouring # pixels. A look up table is then used to assign the pixels a # value of 0, 1, 2 or 3, which are selectively removed during # the iterations. # # ``skeletonize_3d`` [Lee94]_ uses an octree data # structure to examine a 3x3x3 neighborhood of a pixel. The algorithm # proceeds by iteratively sweeping # over the image, and removing pixels at each iteration until the image # stops changing. Each iteration consists of two steps: first, a list of # candidates for removal is assembled; then pixels from this list are # rechecked sequentially, to better preserve connectivity of the image. import numpy as np from scipy import ndimage as ndi import matplotlib.pyplot as plt from skimage.morphology import skeletonize, skeletonize_3d from skimage.data import binary_blobs data = binary_blobs(200, blob_size_fraction=.2, volume_fraction=.35, seed=1) skeleton = skeletonize(data) skeleton3d = skeletonize_3d(data) fig, axes = plt.subplots(1, 3, figsize=(8, 4), sharex=True, sharey=True, subplot_kw={'adjustable': 'box-forced'}) ax = axes.ravel() ax[0].imshow(data, cmap=plt.cm.gray, interpolation='nearest') ax[0].set_title('original') ax[0].axis('off') ax[1].imshow(skeleton, cmap=plt.cm.gray, interpolation='nearest') ax[1].set_title('skeletonize') ax[1].axis('off') ax[2].imshow(skeleton3d, cmap=plt.cm.gray, interpolation='nearest') ax[2].set_title('skeletonize_3d') ax[2].axis('off') fig.tight_layout() plt.show() ###################################################################### # **Medial axis skeletonization** # # The medial axis of an object is the set of all points having more than one # closest point on the object's boundary. It is often called the *topological # skeleton*, because it is a 1-pixel wide skeleton of the object, with the same # connectivity as the original object. # # Here, we use the medial axis transform to compute the width of the foreground # objects. As the function ``medial_axis`` returns the distance transform in # addition to the medial axis (with the keyword # argument ``return_distance=True``), it is possible to compute the distance to # the background for all points of the medial axis with this function. This gives # an estimate of the local width of the objects. # # For a skeleton with fewer branches, ``skeletonize`` or ``skeletonize_3d`` must # be preferred. from skimage.morphology import medial_axis, skeletonize, skeletonize_3d # Generate the data data = binary_blobs(200, blob_size_fraction=.2, volume_fraction=.35, seed=1) # Compute the medial axis (skeleton) and the distance transform skel, distance = medial_axis(data, return_distance=True) # Compare with other skeletonization algorithms skeleton = skeletonize(data) skeleton3d = skeletonize_3d(data) # Distance to the background for pixels of the skeleton dist_on_skel = distance * skel fig, axes = plt.subplots(2, 2, figsize=(8, 8), sharex=True, sharey=True, subplot_kw={'adjustable': 'box-forced'}) ax = axes.ravel() ax[0].imshow(data, cmap=plt.cm.gray, interpolation='nearest') ax[0].set_title('original') ax[0].axis('off') ax[1].imshow(dist_on_skel, cmap=plt.cm.spectral, interpolation='nearest') ax[1].contour(data, [0.5], colors='w') ax[1].set_title('medial_axis') ax[1].axis('off') ax[2].imshow(skeleton, cmap=plt.cm.gray, interpolation='nearest') ax[2].set_title('skeletonize') ax[2].axis('off') ax[3].imshow(skeleton3d, cmap=plt.cm.gray, interpolation='nearest') ax[3].set_title('skeletonize_3d') ax[3].axis('off') fig.tight_layout() plt.show()
Python
0.000023
@@ -397,17 +397,16 @@ object.%0A -%0A %22%22%22%0Afrom @@ -2133,17 +2133,16 @@ ons.%0A#%0A# - %60%60skele
05458457f12618cc69970cd2bda87e25e29384a4
simplify the code (Thx Stefan)
doc/examples/plot_peak_local_max.py
doc/examples/plot_peak_local_max.py
""" ==================== Finding local maxima ==================== The ``peak_local_max`` function returns the coordinates of local peaks (maxima) in an image. A maximum filter is used for finding local maxima. This operation dilates the original image and merges neighboring local maxima closer than the size of the dilation. Locations where the original image is equal to the dilated image are returned as local maxima. """ from scipy import ndimage import matplotlib.pyplot as plt from skimage.feature import peak_local_max from skimage import data, img_as_float im = img_as_float(data.coins()) # image_max is the dilation of im with a 20*20 structuring element # It is used within peak_local_max function image_max = ndimage.maximum_filter(im, size=20, mode='constant') # Comparison between image_max and im to find the coordinates of local maxima coordinates = peak_local_max(im, min_distance=20) # display results fig, ax = plt.subplots(1, 3, figsize=(8, 3)) ax1, ax2, ax3 = ax.ravel() ax1.imshow(im, cmap=plt.cm.gray) ax1.axis('off') ax1.set_title('Original') ax2.imshow(image_max, cmap=plt.cm.gray) ax2.axis('off') ax2.set_title('Maximum filter') ax3.imshow(im, cmap=plt.cm.gray) ax3.autoscale(False) ax3.plot([p[1] for p in coordinates], [p[0] for p in coordinates], 'r.') ax3.axis('off') ax3.set_title('Peak local max') fig.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9, bottom=0.02, left=0.02, right=0.98) plt.show()
Python
0.000001
@@ -1223,51 +1223,26 @@ lot( -%5Bp%5B1%5D for p in coordinates%5D, %5Bp%5B0%5D for p in +coordinates%5B:, 1%5D, coo @@ -1249,16 +1249,21 @@ rdinates +%5B:, 0 %5D, 'r.')
c19ed101135b090de15bb13e940a62a2b28dd5b3
Add style and image as allowed tags in markdown
dpb/settings.py
dpb/settings.py
""" Django settings for dpb project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ import os SECRET_KEY = os.getenv("SECRET_KEY", "CHANGE_ME") DEBUG = os.getenv("DEBUG", "").lower() != "false" # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # ALLOWED_HOSTS = ['.deutscher-pfadfinderbund.de', 'deutscher-pfadfinderbund.de', '.jungenbund.de', '.maedchenbund.de', # '127.0.0.1'] ALLOWED_HOSTS = ['*'] SITE_ID = 1 LOGIN_URL = '/login/' LOGOUT_REDIRECT_URL = 'index' # Application definition INSTALLED_APPS = ( 'django.contrib.contenttypes', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.postgres', 'django.contrib.staticfiles', 'django.contrib.sites', 'django.contrib.sitemaps', # 3rd party 'dpb.apps.MyFilerConfig', # Use Django-Filer with own config for verbose name 'easy_thumbnails', 'django_forms_bootstrap', 'autoslug', 'pagedown', 'markdownify', # Own apps 'pages', 'archive', 'contact', 'feedback', 'intern', 'blog', 'links', 'evening_program', 'maps', ) MIDDLEWARE = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) MIDDLEWARE_CLASSES = MIDDLEWARE ROOT_URLCONF = 'dpb.urls' WSGI_APPLICATION = 'dpb.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': os.getenv("POSTGRES_DB", "dpb"), 'USER': os.getenv("POSTGRES_USER", "dpb"), 'PASSWORD': os.getenv("POSTGRES_PASSWORD", "razupaltuff"), 'HOST': os.getenv("DB_HOST", "localhost"), 'PORT': os.getenv("DB_PORT", "5432"), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'de' TIME_ZONE = 'Europe/Berlin' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) FILE_UPLOAD_PERMISSIONS = 0o777 STATIC_ROOT = os.path.join(BASE_DIR, 'static') STATIC_URL = '/static/' STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'templates/dpb'), os.path.join(BASE_DIR, 'contact/templates/contact'), ("bootstrap", os.path.join(BASE_DIR, 'node_modules/bootstrap/dist/js')), ("jquery", os.path.join(BASE_DIR, 'node_modules/jquery/dist')), ("styles", os.path.join(BASE_DIR, 'styles')), ) MEDIA_ROOT = os.path.join(BASE_DIR, 'media') MEDIA_URL = '/media/' # Configure Templates TEMPLATES = [{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], 'debug': DEBUG, }, }, ] # Filer Settings FILER_CANONICAL_URL = 'filer/' # Configure Easy Thumbnails THUMBNAIL_ALIASES = { '': { 'pages': {'size': (350, 350), 'crop': "scale", 'quality': 100}, }, } MARKDOWNIFY_WHITELIST_TAGS = [ 'table', 'thead', 'tbody', 'th', 'tr', 'td', 'a', 'abbr', 'acronym', 'b', 'blockquote', 'em', 'i', 'li', 'ol', 'p', 'strong', 'ul', ] MARKDOWNIFY_MARKDOWN_EXTENSIONS = [ 'markdown.extensions.fenced_code', 'markdown.extensions.extra', ] MARKDOWNIFY_WHITELIST_ATTRS = [ 'href', 'src', 'alt', 'class', 'id', ]
Python
0
@@ -4091,16 +4091,40 @@ 'ul',%0A + 'img',%0A 'style',%0A %5D%0AMARKDO
43cbfcab71b0ec92f39e97c87edc2e7c60516784
Fix to allow Overlay.from_values to work with nested containers
holoviews/core/overlay.py
holoviews/core/overlay.py
""" Supplies Layer and related classes that allow overlaying of Views, including Overlay. A Layer is the final extension of View base class that allows Views to be overlayed on top of each other. Also supplies ViewMap which is the primary multi-dimensional Map type for indexing, slicing and animating collections of Views. """ from functools import reduce import numpy as np import param from .dimension import Dimension, Dimensioned, ViewableElement from .ndmapping import UniformNdMapping from .layout import Composable, Layout from .util import sanitize_identifier class Overlayable(object): """ Overlayable provides a mix-in class to support the mul operation for overlaying multiple elements. """ def __mul__(self, other): if isinstance(other, UniformNdMapping) and not isinstance(other, CompositeOverlay): items = [(k, self * v) for (k, v) in other.items()] return other.clone(items) self_item = [((self.group, self.label if self.label else 'I'), self)] other_items = (other.items() if isinstance(other, Overlay) else [((other.group, other.label if other.label else 'I'), other)]) return Overlay(items=Overlay.relabel_item_paths(list(self_item) + list(other_items))) class CompositeOverlay(ViewableElement, Composable): """ CompositeOverlay provides a common baseclass for Overlay classes. """ _deep_indexable = True def hist(self, index=0, adjoin=True, **kwargs): valid_ind = isinstance(index, int) and (0 <= index < len(self)) valid_label = index in [el.label for el in self] if not any([valid_ind, valid_label]): raise TypeError("Please supply a suitable index or label for the histogram data") hist = self[index].hist(adjoin=False, **kwargs) if adjoin: layout = self << hist layout.main_layer = index return layout else: return hist def dimension_values(self, dimension): values = [] found = False for el in self: if dimension in el.dimensions(label=True): values.append(el.dimension_values(dimension)) found = True if not found: return super(CompositeOverlay, self).dimension_values(dimension) values = [v for v in values if v is not None and len(v)] return np.concatenate(values) if len(values) else [] class Overlay(Layout, CompositeOverlay): """ An Overlay consists of multiple Views (potentially of heterogeneous type) presented one on top each other with a particular z-ordering. Overlays along with Views constitute the only valid leaf types of a Layout and in fact extend the Layout structure. Overlays are constructed using the * operator (building an identical structure to the + operator) and are the only objects that inherit both from Layout and CompositeOverlay. """ @classmethod def _from_values(cls, val): return reduce(lambda x,y: x*y, val).display('auto') def __init__(self, items=None, group=None, label=None, **params): view_params = ViewableElement.params().keys() self.__dict__['_fixed'] = False self.__dict__['_group'] = group self.__dict__['_label'] = label Layout.__init__(self, items, **{k:v for k,v in params.items() if k not in view_params}) ViewableElement.__init__(self, self.data, **{k:v for k,v in params.items() if k in view_params}) def __add__(self, other): return Layout.from_values(self) + Layout.from_values(other) def __mul__(self, other): if isinstance(other, Overlay): items = list(self.data.items()) + list(other.data.items()) elif isinstance(other, ViewableElement): label = other.label if other.label else 'I' items = list(self.data.items()) + [((other.group, label), other)] elif isinstance(other, UniformNdMapping): raise NotImplementedError return Overlay(items=self.relabel_item_paths(items)).display('all') def collapse(self, function): """ Collapses all the Elements in the Overlay using the supplied function if they share a common type and group. """ elements = list(self) types = [type(el) for el in elements] values = [el.group for el in elements] if not len(set(types)) == 1 and len(set(values)) == 1: raise Exception("Overlay is not homogenous in type or group " "and cannot be collapsed.") else: return elements[0].clone(types[0].collapse_data([el.data for el in elements], function)) @property def group(self): if self._group: return self._group elements = [el for el in self if not el._auxiliary_component] values = {el.group for el in elements} types = {type(el) for el in elements} if values: group = list(values)[0] vtype = list(types)[0].__name__ else: group, vtype = [], '' if len(values) == 1 and group != vtype: return group else: return type(self).__name__ @group.setter def group(self, group): if not sanitize_identifier.allowable(group): raise ValueError("Supplied group %s contains invalid characters." % group) else: self._group = group @property def label(self): if self._label: return self._label labels = {el.label for el in self if not el._auxiliary_component} if len(labels) == 1: return list(labels)[0] else: return '' @label.setter def label(self, label): if not sanitize_identifier.allowable(label): raise ValueError("Supplied group %s contains invalid characters." % label) self._label = label @property def deep_dimensions(self): dimensions = [] dimension_names = [] for el in self: for dim in el.dimensions(): if dim.name not in dimension_names: dimensions.append(dim) dimension_names.append(dim.name) return dimensions @property def shape(self): raise NotImplementedError class NdOverlay(UniformNdMapping, CompositeOverlay, Overlayable): """ An NdOverlay allows a group of NdOverlay to be overlaid together. NdOverlay can be indexed out of an overlay and an overlay is an iterable that iterates over the contained layers. """ key_dimensions = param.List(default=[Dimension('Element')], constant=True, doc="""List of dimensions the NdOverlay can be indexed by.""") _deep_indexable = True def __init__(self, overlays=None, **params): super(NdOverlay, self).__init__(overlays, **params) def hist(self, num_bins=20, bin_range=None, adjoin=True, individually=True, **kwargs): from ..operation import histogram return histogram(self, num_bins=num_bins, bin_range=bin_range, adjoin=adjoin, individually=individually, **kwargs) __all__ = list(set([_k for _k, _v in locals().items() if isinstance(_v, type) and issubclass(_v, Dimensioned)])) + ['Overlayable']
Python
0
@@ -3073,16 +3073,32 @@ y, val). +map(lambda x: x. display( @@ -3104,16 +3104,28 @@ ('auto') +, %5BOverlay%5D) %0A%0A%0A d
e00a82a31de820f28474cb5de47c5715dafd8d18
use the largest remainder method for distributing change in ratio_split()
hordak/utilities/money.py
hordak/utilities/money.py
from decimal import Decimal def ratio_split(amount, ratios): """ Split in_value according to the ratios specified in `ratios` This is special in that it ensures the returned values always sum to in_value (i.e. we avoid losses or gains due to rounding errors). As a result, this method returns a list of `Decimal` values with length equal to that of `ratios`. Examples: .. code-block:: python >>> from hordak.utilities.money import ratio_split >>> from decimal import Decimal >>> ratio_split(Decimal('10'), [Decimal('1'), Decimal('2')]) [Decimal('3.33'), Decimal('6.67')] Note the returned values sum to the original input of ``10``. If we were to do this calculation in a naive fashion then the returned values would likely be ``3.33`` and ``6.66``, which would sum to ``9.99``, thereby loosing ``0.01``. Args: amount (Decimal): The amount to be split ratios (list[Decimal]): The ratios that will determine the split Returns: list(Decimal) """ ratio_total = sum(ratios) divided_value = amount / ratio_total values = [] for ratio in ratios: value = divided_value * ratio values.append(value) # Now round the values, keeping track of the bits we cut off rounded = [v.quantize(Decimal("0.01")) for v in values] remainders = [v - rounded[i] for i, v in enumerate(values)] remainder = sum(remainders) # Give the last person the (positive or negative) remainder rounded[-1] = (rounded[-1] + remainder).quantize(Decimal("0.01")) assert sum(rounded) == amount return rounded
Python
0
@@ -22,16 +22,59 @@ ecimal%0A%0A +from hordak.defaults import DECIMAL_PLACES%0A %0Adef rat @@ -1138,120 +1138,196 @@ -ratio_total = sum(ratios)%0A divided_value = amount / ratio_total%0A values = %5B%5D%0A for ratio in ratios:%0A +precision = Decimal(10) ** Decimal(-DECIMAL_PLACES)%0A assert amount == amount.quantize(precision)%0A %0A # Distribute the amount according to the ratios:%0A ratio_total = sum(ratios)%0A @@ -1335,127 +1335,131 @@ alue +s = -divided_value * ratio%0A values.append(value)%0A%0A # Now round the values, keeping track of the bits we cut off +%5Bamount * ratio / ratio_total for ratio in ratios%5D%0A%0A # Now round the values to the desired number of decimal places: %0A @@ -1481,31 +1481,25 @@ uantize( -Decimal(%220.01%22) +precision ) for v @@ -1517,234 +1517,534 @@ -remainders = %5Bv - rounded%5Bi%5D for i, v in enumerate(values)%5D%0A remainder = sum(remainders)%0A # Give the last person the (positive or negative) remainder%0A rounded%5B-1%5D = (rounded%5B-1%5D + remainder).quantize(Decimal(%220.01%22))%0A +%0A # The rounded values may not add up to the exact amount.%0A # Use the Largest Remainder algorithm to distribute the%0A # difference between participants with non-zero ratios:%0A participants = %5Bi for i in range(len(ratios)) if ratios%5Bi%5D != Decimal(0)%5D%0A for p in sorted(participants, key=lambda i: rounded%5Bi%5D - values%5Bi%5D):%0A total = sum(rounded)%0A if total %3C amount:%0A rounded%5Bp%5D += precision%0A elif total %3E amount:%0A rounded%5Bp%5D -= precision%0A else:%0A break%0A %0A
535e646a66186b04347e1ada5c82c957df5e7cff
Add test for invalid ignore entries.
test/runner/lib/sanity/validate_modules.py
test/runner/lib/sanity/validate_modules.py
"""Sanity test using validate-modules.""" from __future__ import absolute_import, print_function import collections import json import os from lib.sanity import ( SanitySingleVersion, SanityMessage, SanityFailure, SanitySuccess, SanitySkipped, ) from lib.util import ( SubprocessError, display, run_command, read_lines_without_comments, ) from lib.ansible_util import ( ansible_environment, ) from lib.config import ( SanityConfig, ) from lib.test import ( calculate_confidence, calculate_best_confidence, ) VALIDATE_SKIP_PATH = 'test/sanity/validate-modules/skip.txt' VALIDATE_IGNORE_PATH = 'test/sanity/validate-modules/ignore.txt' UNSUPPORTED_PYTHON_VERSIONS = ( '2.6', '2.7', ) class ValidateModulesTest(SanitySingleVersion): """Sanity test using validate-modules.""" def test(self, args, targets): """ :type args: SanityConfig :type targets: SanityTargets :rtype: TestResult """ if args.python_version in UNSUPPORTED_PYTHON_VERSIONS: display.warning('Skipping validate-modules on unsupported Python version %s.' % args.python_version) return SanitySkipped(self.name) skip_paths = read_lines_without_comments(VALIDATE_SKIP_PATH) skip_paths_set = set(skip_paths) env = ansible_environment(args, color=False) paths = sorted([i.path for i in targets.include if i.module and i.path not in skip_paths_set]) if not paths: return SanitySkipped(self.name) cmd = [ args.python_executable, 'test/sanity/validate-modules/validate-modules', '--format', 'json', '--arg-spec', ] + paths invalid_ignores = [] ignore_entries = read_lines_without_comments(VALIDATE_IGNORE_PATH) ignore = collections.defaultdict(dict) line = 0 for ignore_entry in ignore_entries: line += 1 if not ignore_entry: continue if ' ' not in ignore_entry: invalid_ignores.append((line, 'Invalid syntax')) continue path, code = ignore_entry.split(' ', 1) ignore[path][code] = line if args.base_branch: cmd.extend([ '--base-branch', args.base_branch, ]) else: display.warning('Cannot perform module comparison against the base branch. Base branch not detected when running locally.') try: stdout, stderr = run_command(args, cmd, env=env, capture=True) status = 0 except SubprocessError as ex: stdout = ex.stdout stderr = ex.stderr status = ex.status if stderr or status not in (0, 3): raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout) if args.explain: return SanitySuccess(self.name) messages = json.loads(stdout) errors = [] for filename in messages: output = messages[filename] for item in output['errors']: errors.append(SanityMessage( path=filename, line=int(item['line']) if 'line' in item else 0, column=int(item['column']) if 'column' in item else 0, level='error', code='E%s' % item['code'], message=item['msg'], )) filtered = [] for error in errors: if error.code in ignore[error.path]: ignore[error.path][error.code] = None # error ignored, clear line number of ignore entry to track usage else: filtered.append(error) # error not ignored errors = filtered for invalid_ignore in invalid_ignores: errors.append(SanityMessage( code='A201', message=invalid_ignore[1], path=VALIDATE_IGNORE_PATH, line=invalid_ignore[0], column=1, confidence=calculate_confidence(VALIDATE_IGNORE_PATH, line, args.metadata) if args.metadata.changes else None, )) line = 0 for path in skip_paths: line += 1 if not path: continue if not os.path.exists(path): # Keep files out of the list which no longer exist in the repo. errors.append(SanityMessage( code='A101', message='Remove "%s" since it does not exist' % path, path=VALIDATE_SKIP_PATH, line=line, column=1, confidence=calculate_best_confidence(((VALIDATE_SKIP_PATH, line), (path, 0)), args.metadata) if args.metadata.changes else None, )) for path in paths: if path not in ignore: continue for code in ignore[path]: line = ignore[path][code] if not line: continue errors.append(SanityMessage( code='A102', message='Remove since "%s" passes "%s" test' % (path, code), path=VALIDATE_IGNORE_PATH, line=line, column=1, confidence=calculate_best_confidence(((VALIDATE_IGNORE_PATH, line), (path, 0)), args.metadata) if args.metadata.changes else None, )) if errors: return SanityFailure(self.name, messages=errors) return SanitySuccess(self.name)
Python
0
@@ -4925,32 +4925,704 @@ ))%0A%0A + for path in sorted(ignore.keys()):%0A if os.path.exists(path):%0A continue%0A%0A for line in sorted(ignore%5Bpath%5D.values()):%0A # Keep files out of the list which no longer exist in the repo.%0A errors.append(SanityMessage(%0A code='A101',%0A message='Remove %22%25s%22 since it does not exist' %25 path,%0A path=VALIDATE_IGNORE_PATH,%0A line=line,%0A column=1,%0A confidence=calculate_best_confidence(((VALIDATE_IGNORE_PATH, line), (path, 0)), args.metadata) if args.metadata.changes else None,%0A ))%0A%0A for path
36427387c053fa77b8fef1f529235f6f7107f467
move working tree assertion after veryfing dir
dvc/utils/fs.py
dvc/utils/fs.py
import errno import logging import os import shutil import stat import nanotime from shortuuid import uuid from dvc.exceptions import DvcException from dvc.scm.tree import is_working_tree from dvc.system import System from dvc.utils import dict_md5 from dvc.utils import fspath from dvc.utils import fspath_py35 from dvc.utils import relpath logger = logging.getLogger(__name__) def fs_copy(src, dst): if os.path.isdir(src): shutil.copytree(src, dst) else: shutil.copy2(src, dst) def get_inode(path): inode = System.inode(path) logger.debug("Path {} inode {}".format(path, inode)) return inode def get_mtime_and_size(path, tree): assert is_working_tree(tree) if os.path.isdir(fspath_py35(path)): size = 0 files_mtimes = {} for file_path in tree.walk_files(path): try: stat = os.stat(file_path) except OSError as exc: # NOTE: broken symlink case. if exc.errno != errno.ENOENT: raise continue size += stat.st_size files_mtimes[file_path] = stat.st_mtime # We track file changes and moves, which cannot be detected with simply # max(mtime(f) for f in non_ignored_files) mtime = dict_md5(files_mtimes) else: base_stat = os.stat(fspath_py35(path)) size = base_stat.st_size mtime = base_stat.st_mtime mtime = int(nanotime.timestamp(mtime)) # State of files handled by dvc is stored in db as TEXT. # We cast results to string for later comparisons with stored values. return str(mtime), str(size) class BasePathNotInCheckedPathException(DvcException): def __init__(self, path, base_path): msg = "Path: {} does not overlap with base path: {}".format( path, base_path ) super().__init__(msg) def contains_symlink_up_to(path, base_path): base_path = fspath(base_path) path = fspath(path) if base_path not in path: raise BasePathNotInCheckedPathException(path, base_path) if path == base_path: return False if System.is_symlink(path): return True if os.path.dirname(path) == path: return False return contains_symlink_up_to(os.path.dirname(path), base_path) def move(src, dst, mode=None): """Atomically move src to dst and chmod it with mode. Moving is performed in two stages to make the whole operation atomic in case src and dst are on different filesystems and actual physical copying of data is happening. """ src = fspath_py35(src) dst = fspath_py35(dst) dst = os.path.abspath(dst) tmp = "{}.{}".format(dst, uuid()) if os.path.islink(src): shutil.copy(os.readlink(src), tmp) os.unlink(src) else: shutil.move(src, tmp) if mode is not None: os.chmod(tmp, mode) shutil.move(tmp, dst) def _chmod(func, p, excinfo): perm = os.lstat(p).st_mode perm |= stat.S_IWRITE try: os.chmod(p, perm) except OSError as exc: # broken symlink or file is not owned by us if exc.errno not in [errno.ENOENT, errno.EPERM]: raise func(p) def remove(path): path = fspath_py35(path) logger.debug("Removing '{}'".format(relpath(path))) try: if os.path.isdir(path): shutil.rmtree(path, onerror=_chmod) else: _chmod(os.unlink, path, None) except OSError as exc: if exc.errno != errno.ENOENT: raise def path_isin(child, parent): """Check if given `child` path is inside `parent`.""" def normalize_path(path): return os.path.normpath(fspath_py35(path)) parent = os.path.join(normalize_path(parent), "") child = normalize_path(child) return child != parent and child.startswith(parent)
Python
0.000001
@@ -672,41 +672,8 @@ ee): -%0A assert is_working_tree(tree) %0A%0A @@ -711,16 +711,54 @@ path)):%0A + assert is_working_tree(tree)%0A%0A
29d151366d186ed75da947f2861741ed87af902b
Add missing import to settings
website/addons/badges/settings/__init__.py
website/addons/badges/settings/__init__.py
from .defaults import * # noqa logger = logging.getLogger(__name__) try: from .local import * # noqa except ImportError as error: logger.warn('No local.py settings file found')
Python
0.000001
@@ -1,12 +1,52 @@ +# -*- coding: utf-8 -*-%0Aimport logging%0A%0A from .defaul
29aad4894a31f5b7b58cf8434fd3d88b253b2609
Add tests for min/maxValue ranges for Histogram
test/src/unittests/stats/test_histogram.py
test/src/unittests/stats/test_histogram.py
#!/usr/bin/env python # Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra # # This file is part of Essentia # # Essentia is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the Free # Software Foundation (FSF), either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the Affero GNU General Public License # version 3 along with this program. If not, see http://www.gnu.org/licenses/ from essentia_test import * class TestHistogram(TestCase): def testZero(self): histogram, binEdges = Histogram(normalize="none", maxValue=1., minValue=0., numberBins=10)(zeros(1000)) self.assertEqualVector(histogram, [1000., 0., 0., 0., 0., 0., 0., 0., 0., 0.]) self.assertAlmostEqualVector(binEdges, [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], 0.0001) def testOutOfRangeConfiguration(self): self.assertConfigureFails(Histogram(), {'normalize' : 'y'}) self.assertConfigureFails(Histogram(), {'maxValue' : -1}) self.assertConfigureFails(Histogram(), {'minValue' : -1}) self.assertConfigureFails(Histogram(), {'numberBins' : 0}) self.assertConfigureFails(Histogram(), {'numberBins' : -1}) def testInvalidConfigurationCombination(self): self.assertConfigureFails(Histogram(), {'minValue' : 1, 'maxValue' : 0}) self.assertConfigureFails(Histogram(), {'minValue' : 1, 'maxValue' : 1, 'numberBins' : 2}) def testRegression(self): inputArray = readVector(join(filedir(), 'stats/input.txt')) expectedEdges = [ 0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1. ] expectedHistogramNone = [113, 87, 98, 104, 114, 86, 99, 88, 102, 109] expectedHistogramUnitSum = [ 0.113, 0.087, 0.098, 0.104, 0.114, 0.086, 0.099, 0.088, 0.102, 0.109] expectedHistogramUnitMax = [ 0.99122807, 0.76315789, 0.85964912, 0.9122807 , 1., 0.75438596, 0.86842105, 0.77192982, 0.89473684, 0.95614035] (outputHistogramNone, outputEdgesNone) = Histogram(normalize="none", numberBins=10, minValue=0., maxValue=1.)(inputArray) (outputHistogramUnitSum, outputEdgesUnitSum) = Histogram(normalize="unit_sum", numberBins=10, minValue=0., maxValue=1.)(inputArray) (outputHistogramUnitMax, outputEdgesUnitMax) = Histogram(normalize="unit_max", numberBins=10, minValue=0., maxValue=1.)(inputArray) self.assertAlmostEqualVector(outputEdgesNone, expectedEdges, 0.001) self.assertAlmostEqualVector(outputHistogramNone, expectedHistogramNone, 0.001) self.assertAlmostEqualVector(outputEdgesUnitSum, expectedEdges, 0.001) self.assertAlmostEqualVector(outputHistogramUnitSum, expectedHistogramUnitSum, 0.001) self.assertAlmostEqualVector(outputEdgesUnitMax, expectedEdges, 0.001) self.assertAlmostEqualVector(outputHistogramUnitMax, expectedHistogramUnitMax, 0.001) suite = allTests(TestHistogram) if __name__ == '__main__': TextTestRunner(verbosity=2).run(suite)
Python
0
@@ -1168,18 +1168,17 @@ 0.0001) - %0A + %0A def t @@ -2118,18 +2118,16 @@ 0.109%5D - %0A exp @@ -2271,17 +2271,16 @@ 5614035%5D - %0A%0A (o @@ -3060,32 +3060,32 @@ edEdges, 0.001)%0A - self.assertA @@ -3159,16 +3159,871 @@ 0.001)%0A%0A + # Test minValue/maxValue ranges.%0A expectedEdgesNoneMin = expectedEdges%5B5:%5D%0A expectedHistogramNoneMin = expectedHistogramNone%5B5:%5D%0A expectedEdgesNoneMax = expectedEdges%5B:6%5D%0A expectedHistogramNoneMax = expectedHistogramNone%5B:5%5D%0A%0A (outputHistogramNoneMin, outputEdgesNoneMin) = Histogram(normalize=%22none%22, numberBins=5, minValue=0.5, maxValue=1.)(inputArray)%0A (outputHistogramNoneMax, outputEdgesNoneMax) = Histogram(normalize=%22none%22, numberBins=5, minValue=0.0, maxValue=0.5)(inputArray)%0A%0A self.assertAlmostEqualVector(outputEdgesNoneMin, expectedEdgesNoneMin, 0.001)%0A self.assertAlmostEqualVector(outputHistogramNoneMin, expectedHistogramNoneMin, 0.001)%0A%0A self.assertAlmostEqualVector(outputEdgesNoneMax, expectedEdgesNoneMax, 0.001)%0A self.assertAlmostEqualVector(outputHistogramNoneMax, expectedHistogramNoneMax, 0.001)%0A%0A%0A suite =
6c64674447bd988eef80a4a927acde2eabe04236
Modify error messag
googkit/lib/plugin.py
googkit/lib/plugin.py
import os import googkit.lib.path from googkit.lib.error import GoogkitError INIT_FILE = '__init__.py' COMMAND_FILE = 'command.py' def load(tree): base_dir = googkit.lib.path.plugin() for filename in os.listdir(base_dir): plugin_dir = os.path.join(base_dir, filename) if not os.path.isdir(plugin_dir): continue init_path = os.path.join(plugin_dir, INIT_FILE) if not os.path.exists(init_path): raise GoogkitError('{init_path} is not found.'.format(init_path=init_path)) command_path = os.path.join(plugin_dir, COMMAND_FILE) if not os.path.exists(command_path): continue module_name = 'plugins.{filename}.command'.format(filename=filename) module = __import__(module_name, fromlist=['command']) if not hasattr(module, 'register'): msg = 'Invalid plugin {module_name} do not have register method.'.format( module_name=module_name) raise GoogkitError(msg) module.register(tree)
Python
0.000001
@@ -863,157 +863,80 @@ -msg = 'Invalid plugin %7Bmodule_name%7D do not have register method.'.format(%0A module_name=module_name)%0A raise GoogkitError(msg +raise GoogkitError('No register method found for plugin: ' + module_name )%0A%0A
5da59174363eacc49a08587a93701242c833cfd9
change 'types' to 'type' in line 85 to remove an error
hs_modelinstance/forms.py
hs_modelinstance/forms.py
__author__ = 'Mohamed' from django.forms import ModelForm from django import forms from crispy_forms.layout import * from crispy_forms.bootstrap import * from models import * from hs_core.forms import BaseFormHelper from hs_core.hydroshare import users class ModelOutputFormHelper(BaseFormHelper): def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None, *args, **kwargs): # the order in which the model fields are listed for the FieldSet is the order these fields will be displayed # for ModelOutput we have only one field includes_output field_width = 'form-control input-sm' layout = Layout( Field('includes_output', css_class=field_width), ) kwargs['element_name_label'] = 'Includes output files?' super(ModelOutputFormHelper, self).__init__(allow_edit, res_short_id, element_id, element_name, layout, *args, **kwargs) class ModelOutputForm(ModelForm): includes_output = forms.TypedChoiceField(choices=((True, 'Yes'), (False, 'No')), widget=forms.RadioSelect) def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs): super(ModelOutputForm, self).__init__(*args, **kwargs) self.helper = ModelOutputFormHelper(allow_edit, res_short_id, element_id, element_name='ModelOutput') self.fields['includes_output'].widget.attrs['style'] = "width:auto;margin-top:-5px" # if len(self.initial) == 0: # self.initial['includes_output'] = False class Meta: model = ModelOutput fields = ['includes_output'] exclude = ['content_object'] class ModelOutputValidationForm(forms.Form): includes_output = forms.TypedChoiceField(choices=((True, 'Yes'), (False, 'No'))) def clean_includes_output(self): data = self.cleaned_data['includes_output'] if data == u'False': return False else: return True # ExecutedBy element forms class ExecutedByFormHelper(BaseFormHelper): def __init__(self, allow_edit=True, res_short_id=None, element_id=None, element_name=None, *args, **kwargs): # the order in which the model fields are listed for the FieldSet is the order these fields will be displayed field_width = 'form-control input-sm' layout = Layout( Field('model_name', css_class=field_width), HTML(""" <div id=progam_details_div style="display:none"> <table id="program_details_table" class="modelprogram"> <tr><td>Description: </td><td></td></tr> <tr><td>Release Date: </td><td></td></tr> <tr><td>Version: </td><td></td></tr> <tr><td>Language: </td><td></td></tr> <tr><td>Operating System: </td><td></td></tr> <tr><td>Url: </td><td></td></tr> </table> </div> """), ) kwargs['element_name_label'] = 'Model Program used for execution' super(ExecutedByFormHelper, self).__init__(allow_edit, res_short_id, element_id, element_name, layout, *args, **kwargs) class ExecutedByForm(ModelForm): def __init__(self, allow_edit=True, res_short_id=None, element_id=None, *args, **kwargs): super(ExecutedByForm, self).__init__(*args, **kwargs) self.helper = ExecutedByFormHelper(allow_edit, res_short_id, element_id, element_name='ExecutedBy') # get all model program resources mp_resource = users.get_resource_list(types=['ModelProgramResource']) # set model programs resources in choice list CHOICES = (('Unknown', 'Unknown'),) + tuple((r.short_id, r.title) for r in mp_resource.values()[0]) # Set the choice lists as the file names in the content model self.fields['model_name'].choices = CHOICES class Meta: model = ExecutedBy exclude = ['content_object', 'model_program_fk'] class ExecutedByValidationForm(forms.Form): model_name = forms.CharField(max_length=200) model_program_fk = forms
Python
0.000008
@@ -3637,17 +3637,16 @@ ist(type -s =%5B'Model
1b84cc660848fdee7ed68c17772542956f47e89d
Add `lower` parameter to grab.tools.russian::slugify method
grab/tools/russian.py
grab/tools/russian.py
# coding: utf-8 from __future__ import absolute_import from ..tools.encoding import smart_unicode from pytils.translit import translify import re MONTH_NAMES = u'января февраля марта апреля мая июня июля августа '\ u'сентября октября ноября декабря'.split() RE_NOT_ENCHAR = re.compile(u'[^-a-zA-Z0-9]', re.U) RE_NOT_ENRUCHAR = re.compile(u'[^-a-zA-Zа-яА-ЯёЁ0-9]', re.U) RE_RUSSIAN_CHAR = re.compile(u'[а-яА-ЯёЁ]', re.U) RE_DASH = re.compile(r'-+') def slugify(value, limit=None, default=''): value = smart_unicode(value) # Replace all non russian/english chars with "-" char # to help pytils not to crash value = RE_NOT_ENRUCHAR.sub('-', value) # Do transliteration value = translify(value) # Replace trash with safe "-" char value = RE_NOT_ENCHAR.sub('-', value).strip('-').lower() # Replace sequences of dashes value = RE_DASH.sub('-', value) if limit is not None: value = value[:limit] if value != "": return value else: return default def get_month_number(name): return MONTH_NAMES.index(name) + 1
Python
0.000001
@@ -500,16 +500,28 @@ fault='' +, lower=True ):%0A v @@ -832,16 +832,52 @@ rip('-') +%0A if lower:%0A value = value .lower()
88f0faa73beeafc30248210c4c6b99b7a9ccbdba
Add AUTOMATIC_REVERSE_PTR option to cfg
config_template.py
config_template.py
import os basedir = os.path.abspath(os.path.dirname(__file__)) # BASIC APP CONFIG WTF_CSRF_ENABLED = True SECRET_KEY = 'We are the world' BIND_ADDRESS = '127.0.0.1' PORT = 9393 LOGIN_TITLE = "PDNS" # TIMEOUT - for large zones TIMEOUT = 10 # LOG CONFIG LOG_LEVEL = 'DEBUG' LOG_FILE = 'logfile.log' # For Docker, leave empty string #LOG_FILE = '' # Upload UPLOAD_DIR = os.path.join(basedir, 'upload') # DATABASE CONFIG #You'll need MySQL-python SQLA_DB_USER = 'powerdnsadmin' SQLA_DB_PASSWORD = 'powerdnsadminpassword' SQLA_DB_HOST = 'mysqlhostorip' SQLA_DB_NAME = 'powerdnsadmin' #MySQL SQLALCHEMY_DATABASE_URI = 'mysql://'+SQLA_DB_USER+':'\ +SQLA_DB_PASSWORD+'@'+SQLA_DB_HOST+'/'+SQLA_DB_NAME #SQLite #SQLALCHEMY_DATABASE_URI = 'sqlite:////path/to/your/pdns.db' SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository') SQLALCHEMY_TRACK_MODIFICATIONS = True # LDAP CONFIG LDAP_TYPE = 'ldap' LDAP_URI = 'ldaps://your-ldap-server:636' LDAP_USERNAME = 'cn=dnsuser,ou=users,ou=services,dc=duykhanh,dc=me' LDAP_PASSWORD = 'dnsuser' LDAP_SEARCH_BASE = 'ou=System Admins,ou=People,dc=duykhanh,dc=me' # Additional options only if LDAP_TYPE=ldap LDAP_USERNAMEFIELD = 'uid' LDAP_FILTER = '(objectClass=inetorgperson)' ## AD CONFIG #LDAP_TYPE = 'ad' #LDAP_URI = 'ldaps://your-ad-server:636' #LDAP_USERNAME = 'cn=dnsuser,ou=Users,dc=domain,dc=local' #LDAP_PASSWORD = 'dnsuser' #LDAP_SEARCH_BASE = 'dc=domain,dc=local' ## You may prefer 'userPrincipalName' instead #LDAP_USERNAMEFIELD = 'sAMAccountName' ## AD Group that you would like to have accesss to web app #LDAP_FILTER = 'memberof=cn=DNS_users,ou=Groups,dc=domain,dc=local' # Github Oauth GITHUB_OAUTH_ENABLE = False GITHUB_OAUTH_KEY = 'G0j1Q15aRsn36B3aD6nwKLiYbeirrUPU8nDd1wOC' GITHUB_OAUTH_SECRET = '0WYrKWePeBDkxlezzhFbDn1PBnCwEa0vCwVFvy6iLtgePlpT7WfUlAa9sZgm' GITHUB_OAUTH_SCOPE = 'email' GITHUB_OAUTH_URL = 'http://127.0.0.1:5000/api/v3/' GITHUB_OAUTH_TOKEN = 'http://127.0.0.1:5000/oauth/token' GITHUB_OAUTH_AUTHORIZE = 'http://127.0.0.1:5000/oauth/authorize' #Default Auth BASIC_ENABLED = True SIGNUP_ENABLED = True # POWERDNS CONFIG PDNS_STATS_URL = 'http://172.16.214.131:8081/' PDNS_API_KEY = 'you never know' PDNS_VERSION = '3.4.7' # RECORDS ALLOWED TO EDIT RECORDS_ALLOW_EDIT = ['A', 'AAAA', 'CNAME', 'SPF', 'PTR', 'MX', 'TXT'] # EXPERIMENTAL FEATURES PRETTY_IPV6_PTR = False
Python
0
@@ -2341,16 +2341,133 @@ PV6_PTR = False%0A +%0A# Create reverse lookup domain if not exists and PTR record from%0A# A and AAAA records%0AAUTOMATIC_REVERSE_PTR = False%0A
0cc0d4a5ddf938f176c2384503ef88bb31c91898
Transform new schema to old schema, to keep share_v1 up to date
scrapi/processing/elasticsearch.py
scrapi/processing/elasticsearch.py
from __future__ import absolute_import import logging from elasticsearch import Elasticsearch from elasticsearch.exceptions import NotFoundError from elasticsearch.exceptions import ConnectionError from scrapi import settings from scrapi.processing.base import BaseProcessor logger = logging.getLogger(__name__) logging.getLogger('urllib3').setLevel(logging.WARN) logging.getLogger('requests').setLevel(logging.WARN) logging.getLogger('elasticsearch').setLevel(logging.FATAL) logging.getLogger('elasticsearch.trace').setLevel(logging.FATAL) try: # If we cant connect to elastic search dont define this class es = Elasticsearch(settings.ELASTIC_URI, request_timeout=settings.ELASTIC_TIMEOUT) # body = { # 'mappings': { # harvester: settings.ES_SEARCH_MAPPING # for harvester in registry.keys() # } # } # es.cluster.health(wait_for_status='yellow') es.indices.create(index=settings.ELASTIC_INDEX, body={}, ignore=400) except ConnectionError: # pragma: no cover logger.error('Could not connect to Elasticsearch, expect errors.') if 'elasticsearch' in settings.NORMALIZED_PROCESSING or 'elasticsearch' in settings.RAW_PROCESSING: raise class ElasticsearchProcessor(BaseProcessor): NAME = 'elasticsearch' def process_normalized(self, raw_doc, normalized, index=settings.ELASTIC_INDEX): normalized['releaseDate'] = self.version(raw_doc, normalized) data = { key: value for key, value in normalized.attributes.items() if key in settings.FRONTEND_KEYS } es.index( body=data, refresh=True, index=index, doc_type=raw_doc['source'], id=raw_doc['docID'], ) def version(self, raw, normalized): try: old_doc = es.get_source( index=settings.ELASTIC_INDEX, doc_type=raw['source'], id=raw['docID'] ) except NotFoundError: # pragma: no cover # Normally I don't like exception-driven logic, # but this was the best way to handle missing # types, indices and documents together date = normalized['releaseDate'] else: date = old_doc.get('releaseDate') or normalized['releaseDate'] return date
Python
0
@@ -270,16 +270,68 @@ ocessor%0A +from scrapi.base.transformer import JSONTransformer%0A %0A%0Alogger @@ -1032,16 +1032,68 @@ re=400)%0A + es.indices.create(index='share_v1', ignore=400)%0A except C @@ -1865,16 +1865,72 @@ ) +%0A self.process_normalized_v1(raw_doc, normalized) %0A%0A de @@ -2500,16 +2500,16 @@ Date'%5D%0A%0A - @@ -2520,8 +2520,1085 @@ rn date%0A +%0A def process_normalized_v1(self, raw_doc, normalized):%0A index = 'share_v1'%0A transformer = PreserveOldSchema()%0A data = transformer.transform(normalized.attributes)%0A es.index(%0A body=data,%0A refresh=True,%0A index=index,%0A doc_type=raw_doc%5B'source'%5D,%0A id=raw_doc%5B'docID'%5D%0A )%0A%0A%0Aclass PreserveOldContributors(JSONTransformer):%0A schema = %7B%0A 'given': '/givenName',%0A 'family': '/familyName',%0A 'middle': '/additionalName',%0A 'email': '/email'%0A %7D%0A%0A def process_contributors(self, contributors):%0A return %5Bself.transform(contributor) for contributor in contributors%5D%0A%0A%0Aclass PreserveOldSchema(JSONTransformer):%0A schema = %7B%0A 'title': '/title',%0A 'description': '/description',%0A 'tags': '/otherProperties/tags',%0A 'contributors': ('/contributor', PreserveOldContributors().process_contributors),%0A 'dateUpdated': '/releaseDate',%0A 'source': '/source',%0A 'id': %7B%0A 'url': '/directLink'%0A %7D%0A %7D%0A
400df6a27da9225cd6ae68d064fafb588b25d503
Change computation for numerical stability
libs/boxes/bbox_transform.py
libs/boxes/bbox_transform.py
# -------------------------------------------------------- # Fast R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Ross Girshick # -------------------------------------------------------- import numpy as np import warnings def bbox_transform(ex_rois, gt_rois): """ computes the distance from ground-truth boxes to the given boxes, normed by their size :param ex_rois: n * 4 numpy array, given boxes :param gt_rois: n * 4 numpy array, ground-truth boxes :return: deltas: n * 4 numpy array, ground-truth boxes """ ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0 ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0 ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights # assert np.min(ex_widths) > 0.1 and np.min(ex_heights) > 0.1, \ # 'Invalid boxes found: {} {}'. \ # format(ex_rois[np.argmin(ex_widths), :], ex_rois[np.argmin(ex_heights), :]) gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0 gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0 gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights # warnings.catch_warnings() # warnings.filterwarnings('error') targets_dx = 10.0 * (gt_ctr_x - ex_ctr_x) / ex_widths targets_dy = 10.0 * (gt_ctr_y - ex_ctr_y) / ex_heights targets_dw = 5.0 * np.log(gt_widths / ex_widths) targets_dh = 5.0 * np.log(gt_heights / ex_heights) targets = np.vstack( (targets_dx, targets_dy, targets_dw, targets_dh)).transpose() return targets def bbox_transform_inv(boxes, deltas): if boxes.shape[0] == 0: return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype) boxes = boxes.astype(deltas.dtype, copy=False) widths = boxes[:, 2] - boxes[:, 0] + 1.0 heights = boxes[:, 3] - boxes[:, 1] + 1.0 ctr_x = boxes[:, 0] + 0.5 * widths ctr_y = boxes[:, 1] + 0.5 * heights dx = deltas[:, 0::4] * 0.1 dy = deltas[:, 1::4] * 0.1 dw = deltas[:, 2::4] * 0.2 dh = deltas[:, 3::4] * 0.2 pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis] pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis] pred_w = np.exp(dw) * widths[:, np.newaxis] pred_h = np.exp(dh) * heights[:, np.newaxis] pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype) # x1 pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # y1 pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # x2 pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w - 1 # y2 pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h - 1 return pred_boxes def clip_boxes(boxes, im_shape): """ Clip boxes to image boundaries. """ # x1 >= 0 boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0) # y1 >= 0 boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0) # x2 < im_shape[1] boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0) # y2 < im_shape[0] boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0) return boxes
Python
0.000001
@@ -2223,24 +2223,26 @@ newaxis%5D%0A + # pred_w = np @@ -2273,24 +2273,26 @@ newaxis%5D%0A + # pred_h = np @@ -2326,16 +2326,131 @@ waxis%5D%0A%0A + pred_w = np.exp(dw + np.log(widths%5B:, np.newaxis%5D))%0A pred_h = np.exp(dh + np.log(heights%5B:, np.newaxis%5D))%0A%0A%0A pred
909087e5badca063b6124ebda19c4315a1e6a3a8
fix dynamoDB doc test
tests/functional/docs/test_dynamodb.py
tests/functional/docs/test_dynamodb.py
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from tests.functional.docs import BaseDocsFunctionalTests from boto3.session import Session from boto3.docs.service import ServiceDocumenter class TestDynamoDBCustomizations(BaseDocsFunctionalTests): def setUp(self): self.documenter = ServiceDocumenter( 'dynamodb', session=Session(region_name='us-east-1')) self.generated_contents = self.documenter.document_service() self.generated_contents = self.generated_contents.decode('utf-8') def test_batch_writer_is_documented(self): self.assert_contains_lines_in_order([ '.. py:class:: DynamoDB.Table(name)', ' * :py:meth:`batch_writer()`', ' .. py:method:: batch_writer()'], self.generated_contents ) def test_document_interface_is_documented(self): contents = self.get_class_document_block( 'DynamoDB.Table', self.generated_contents) # Take an arbitrary method that uses the customization. method_contents = self.get_method_document_block('put_item', contents) # Make sure the request syntax is as expected. request_syntax_contents = self.get_request_syntax_document_block( method_contents) self.assert_contains_lines_in_order([ 'response = table.put_item(', 'Item={', ('\'string\': \'string\'|123|Binary(b\'bytes\')' '|True|None|set([\'string\'])|set([123])|' 'set([Binary(b\'bytes\')])|[]|{}'), '},', 'Expected={', '\'string\': {', ('\'Value\': \'string\'|123' '|Binary(b\'bytes\')|True|None|set([\'string\'])' '|set([123])|set([Binary(b\'bytes\')])|[]|{},'), '\'AttributeValueList\': [', ('\'string\'|123|Binary(b\'bytes\')' '|True|None|set([\'string\'])|set([123])|' 'set([Binary(b\'bytes\')])|[]|{},')], request_syntax_contents) # Make sure the response syntax is as expected. response_syntax_contents = self.get_response_syntax_document_block( method_contents) self.assert_contains_lines_in_order([ '{', '\'Attributes\': {', ('\'string\': \'string\'|123|' 'Binary(b\'bytes\')|True|None|set([\'string\'])|' 'set([123])|set([Binary(b\'bytes\')])|[]|{}'), '},'], response_syntax_contents) # Make sure the request parameter is documented correctly. request_param_contents = self.get_request_parameter_document_block( 'Item', method_contents) self.assert_contains_lines_in_order([ ':type Item: dict', ':param Item: **[REQUIRED]**', '- *(string) --*', ('- *(valid DynamoDB type) --* - The value of the ' 'attribute. The valid value types are listed in the ' ':ref:`DynamoDB Reference Guide<ref_valid_dynamodb_types>`.')], request_param_contents ) # Make sure the response parameter is documented correctly. response_param_contents = self.get_response_parameter_document_block( 'Attributes', method_contents) self.assert_contains_lines_in_order([ '- **Attributes** *(dict) --*', '- *(string) --*', ('- *(valid DynamoDB type) --* - The value of ' 'the attribute. The valid value types are listed in the ' ':ref:`DynamoDB Reference Guide<ref_valid_dynamodb_types>`.')], response_param_contents) def test_conditions_is_documented(self): contents = self.get_class_document_block( 'DynamoDB.Table', self.generated_contents) # Take an arbitrary method that uses the customization. method_contents = self.get_method_document_block('query', contents) # Make sure the request syntax is as expected. request_syntax_contents = self.get_request_syntax_document_block( method_contents) self.assert_contains_lines_in_order([ 'response = table.query(', ('FilterExpression=Attr(\'myattribute\').' 'eq(\'myvalue\'),'), ('KeyConditionExpression=Key(\'mykey\')' '.eq(\'myvalue\'),')], request_syntax_contents) # Make sure the request parameter is documented correctly. self.assert_contains_lines_in_order([ (':type FilterExpression: condition from ' ':py:class:`boto3.dynamodb.conditions.Attr` method'), (':param FilterExpression: The condition(s) an ' 'attribute(s) must meet. Valid conditions are listed in ' 'the :ref:`DynamoDB Reference Guide<ref_dynamodb_conditions>`.'), (':type KeyConditionExpression: condition from ' ':py:class:`boto3.dynamodb.conditions.Key` method'), (':param KeyConditionExpression: The condition(s) a ' 'key(s) must meet. Valid conditions are listed in the ' ':ref:`DynamoDB Reference Guide<ref_dynamodb_conditions>`.')], method_contents)
Python
0
@@ -1266,16 +1266,32 @@ _writer( +auto_dedup=False )'%5D,%0A
959897478bbda18f02aa6e38f2ebdd837581f1f0
Fix test for changed SctVerificationResult
tests/test_sct_verify_signature.py
tests/test_sct_verify_signature.py
from os.path import join, dirname from utlz import flo from ctutlz.sct.verification import verify_signature def test_verify_signature(): basedir = join(dirname(__file__), 'data', 'test_sct_verify_signature') signature_input = \ open(flo('{basedir}/signature_input_valid.bin'), 'rb').read() signature = open(flo('{basedir}/signature.der'), 'rb').read() pubkey = open(flo('{basedir}/pubkey.pem'), 'rb').read() got_verified, got_output, got_cmd_res = \ verify_signature(signature_input, signature, pubkey) assert got_verified is True assert got_output == 'Verified OK\n' assert got_cmd_res.exitcode == 0 signature_input = b'some invalid signature input' got_verified, got_output, got_cmd_res = \ verify_signature(signature_input, signature, pubkey) assert got_verified is False assert got_output == 'Verification Failure\n' assert got_cmd_res.exitcode == 1
Python
0.000001
@@ -439,57 +439,14 @@ -got_verified, got_output, got_cmd_res = %5C%0A +assert ver @@ -498,119 +498,16 @@ key) -%0A%0A assert got_verified is True%0A assert got_output == 'Verified OK%5Cn'%0A assert got_cmd_res.exitcode == 0 + is True %0A%0A @@ -567,57 +567,14 @@ -got_verified, got_output, got_cmd_res = %5C%0A +assert ver @@ -626,126 +626,14 @@ key) -%0A%0A assert got_verified is False%0A assert got_output == 'Verification Failure%5Cn'%0A assert got_cmd_res.exitcode == 1 + is False %0A
f72277113ce8155a1725bb69929c83cb95183bd8
order events by room
pyconca2017/pycon_schedule/models.py
pyconca2017/pycon_schedule/models.py
from datetime import datetime from django.db import models """ Presentation """ class Speaker(models.Model): """ Who """ email = models.EmailField(unique=True) full_name = models.CharField(max_length=255) bio = models.TextField(default='') twitter_username = models.CharField(max_length=255, null=True, blank=True) company_name = models.CharField(max_length=255, null=True, blank=True) url = models.URLField(max_length=2048, null=True, blank=True) shirt_size = models.CharField(max_length=255) location = models.CharField(max_length=255, null=True, blank=True) is_keynote = models.BooleanField(default=False) def __str__(self): return self.full_name @property def twitter_url(self): if not self.twitter_username: return None return 'https://twitter.com/{}'.format(self.twitter_username) class Presentation(models.Model): """ What """ papercall_id = models.IntegerField(null=True, blank=True, unique=True) title = models.CharField(max_length=255) description = models.TextField(default='') notes = models.TextField(default='') abstract = models.TextField(default='') audience_level = models.CharField(max_length=255) presentation_format = models.CharField(max_length=255) speaker = models.ForeignKey(Speaker) def __str__(self): return self.title class Meta: ordering = ('title',) """ Schedule """ class Schedule(models.Model): """ When (what day) """ day = models.DateField(unique=True) def __str__(self): return self.day.strftime('%b %d') class Location(models.Model): """ Where """ name = models.CharField(max_length=255) order = models.PositiveIntegerField(default=0) capacity = models.PositiveIntegerField(default=0) notes = models.TextField(default='', blank=True) def __str__(self): return self.name class ScheduleSlot(models.Model): """ When (what time) """ schedule = models.ForeignKey(Schedule, related_name='slots') start_time = models.TimeField() end_time = models.TimeField() def __str__(self): return '{} - {} ({})'.format(self.start_time, self.end_time, self.schedule) class Meta: unique_together = (('schedule', 'start_time', 'end_time'),) ordering = ('schedule', 'start_time', 'end_time') @property def duration(self): return datetime.combine(self.schedule.day, self.end_time) - datetime.combine(self.schedule.day, self.start_time) @property def start_events(self): return SlotEvent.objects.filter(slot__schedule=self.schedule, slot__start_time=self.start_time) class SlotEvent(models.Model): """ Glue what with when and where """ slot = models.ForeignKey(ScheduleSlot, related_name='events') location = models.ForeignKey(Location, null=True, blank=True) content = models.TextField(blank=True) presentation = models.OneToOneField(Presentation, null=True, blank=True) def __str__(self): return self.title class Meta: unique_together = ( ('slot', 'location'), ) @property def title(self): if self.presentation: return self.presentation.title return self.content @property def is_presentation(self): return bool(self.presentation) @property def duration(self): return self.slot.duration @property def duration_str(self): return ':'.join(str(self.duration).split(':')[:2]) @property def presenter(self): if self.presentation: return self.presentation.speaker
Python
0.998462
@@ -3144,16 +3144,55 @@ ) +%0A ordering = ('location__name',) %0A%0A @p
b174dbb9cdc352bb83cbedf4ffce54bf29c22729
Remove debug print statement from history_parser Chromium import
lib/history_parser.py
lib/history_parser.py
#!/usr/bin/env python # Copyright 2009 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Parse the history file for just about anything.""" import glob import operator import os import re import sys class HistoryParser(object): """Parse the history file from files and web browsers and such.""" # At 32MB, switch from replay format to sorted_unique MAX_REPLAY_SIZE = 33554432 INTERNAL_RE = re.compile('\.prod|\.corp|\.bor|internal|dmz') TYPES = {} def __init__(self): self.TYPES = { 'chrome': self.GoogleChromeHistoryPath, 'chromium': self.ChromiumHistoryPath, 'epiphany': self.EpiphanyHistoryPath, 'opera': self.OperaHistoryPath, 'safari': self.SafariHistoryPath, 'firefox': self.FirefoxHistoryPath, 'internet_explorer': self.InternetExplorerHistoryPath, 'squid': self.SquidLogPath } def Parse(self, path_or_type): if path_or_type.lower() in self.TYPES: return self.ParseByType(path_or_type.lower()) else: return self.ParseByFilename(path_or_type) def ReadHistoryFile(self, filename): # Only matches http://host.domain type entries (needs at least one sub) parse_re = re.compile('\w+://([\-\w]+\.[\-\w\.]+)') print '- Reading history from %s' % filename # binary mode is necessary for running under Windows return parse_re.findall(open(filename, 'rb').read()) def _HostnameMayBeInternal(self, hostname): if self.INTERNAL_RE.search(hostname): return True def GenerateTestData(self, hosts, sorted_unique=False): """Given a set of hosts, generate test data. Args: hosts: A list of hostnames sorted_unique: Return a sorted unique list of tests. Useful for large data sets. Returns: A list of strings representing DNS requests to test with. """ history = [] hits = {} last_host = None for host in hosts: if not host.endswith('.'): host = host + '.' if self._HostnameMayBeInternal(host): continue if host != last_host: if sorted_unique: hits[host] = hits.get(host, 0) + 1 else: history.append('A %s' % host) last_host = host if sorted_unique: for (hit, count) in sorted(hits.items(), key=operator.itemgetter(1), reverse=True): history.append('A %s # %s hits' % (hit, count)) return history def ParseByFilename(self, filename): """Parse a history file, returning a history. Args: filename: duh Returns: a list of hosts If the filename passed is greater than MAX_REPLAY_SIZE, we return a unique list of hosts, sorted by descending popularity. If there are multiple subsequent records for a host, only the first one is parsed. """ if os.path.getsize(filename) > self.MAX_REPLAY_SIZE: sorted_unique = True else: sorted_unique = False return self.GenerateTestData(self.ReadHistoryFile(filename), sorted_unique=sorted_unique) def ParseByType(self, source): (history_file_path, tried) = self.FindGlobPath(self.TYPES[source]()) if not history_file_path: print "* Could not find data for '%s'. Tried:" for path in tried: print path return None return self.ParseByFilename(history_file_path) def FindGlobPath(self, paths): """Given a list of glob paths, return the first one with a real file. Returns: A tuple with (file path (str), list of paths checked) """ tried = [] for path_elements in paths: path = os.path.join(*path_elements) tried.append(path) for filename in glob.glob(path): if os.path.getsize(filename) > 1: return (filename, tried) return (None, tried) def GoogleChromeHistoryPath(self): paths = ( (os.getenv('HOME', ''), 'Library', 'Application Support', 'Google', 'Chrome', 'Default', 'History'), (os.getenv('HOME', ''), '.config', 'google-chrome', 'Default', 'History'), (os.getenv('APPDATA', ''), 'Google', 'Chrome', 'User Data', 'Default', 'History'), (os.getenv('USERPROFILE', ''), 'Local Settings', 'Application Data', 'Google', 'Chrome', 'User Data', 'Default', 'History'), ) return paths def ChromiumHistoryPath(self): """It's like Chrome, but with the branding stripped out.""" # TODO(tstromberg): Find a terser way to do this. paths = [] for path in self.GoogleChromeHistoryPath(): new_path = list(path) if 'Google' in new_path: new_path.remove('Google') for (index, part) in enumerate(new_path): if part == 'Chrome': new_path[index] = 'Chromium' elif part == 'chrome': new_path[index] = 'chromium' print new_path paths.append(new_path) return paths def OperaHistoryPath(self): paths = ( (os.getenv('HOME', ''), 'Library', 'Preferences', 'Opera Preferences', 'global_history.dat'), ) return paths def SafariHistoryPath(self): paths = ( (os.getenv('HOME', ''), 'Library', 'Safari', 'History.plist'), (os.getenv('APPDATA', ''), 'Apple Computer', 'Safari', 'History.plist') ) return paths def FirefoxHistoryPath(self): paths = ( (os.getenv('HOME', ''), 'Library', 'Application Support', 'Firefox', 'Profiles', '*', 'places.sqlite'), (os.getenv('HOME', ''), '.mozilla', 'firefox', '*', 'places.sqlite'), (os.getenv('APPDATA', ''), 'Mozilla', 'Firefox', 'Profiles', '*', 'places.sqlite') ) return paths def InternetExplorerHistoryPath(self): paths = ( # XP (os.getenv('USERPROFILE', ''), 'Local Settings', 'History', 'History.IE5', 'index.dat'), # ? (os.getenv('APPDATA', ''), 'Microsoft', 'Windows', 'History', 'History.IE5', 'index.dat'), ) return paths def EpiphanyHistoryPath(self): paths = ( (os.getenv('HOME', ''), '.gnome2', 'epiphany', 'ephy-history.xml'), ) return paths def SquidLogPath(self): paths = ( ('/usr/local/squid/logs/access.log',), ('/var/log/squid/access_log',) ) return paths if __name__ == '__main__': parser = HistoryParser() types_str = ', '.join(parser.TYPES.keys()) if len(sys.argv) < 2: print 'You must provide a filename or history file type (%s)' % types_str sys.exit(1) records = parser.Parse(sys.argv[1]) for record in records: print record
Python
0.000003
@@ -5381,29 +5381,8 @@ um'%0A - print new_path%0A
9cb554c13ae3cec85fd2a3bf0afd9ae2b6cca96a
Refactor target.py
construi/target.py
construi/target.py
import construi.console as console from compose.project import Project from compose.cli.docker_client import docker_client import dockerpty import sys class Target(object): def __init__(self, config): self.config = config self.project = Project.from_dicts( 'construi', config.services, docker_client()) def run(self): try: self.setup() service = self.project.get_service(self.config.construi['name']) for cmd in self.config.construi['run']: console.progress("> %s" % cmd) container = service.create_container( one_off=True, command=cmd, tty=False, stdin_open=True, detach=False ) dockerpty.start( self.project.client, container.id, interactive=False) exit_code = container.wait() self.project.client.remove_container(container.id, force=True) if exit_code != 0: console.error("\nBuild Failed.") sys.exit(1) console.progress('Done.') except KeyboardInterrupt: console.warn("\nBuild Interrupted.") sys.exit(1) finally: self.cleanup() def setup(self): console.progress('Building Images...') self.project.build() console.progress('Pulling Images...') self.project.pull() def cleanup(self): console.progress('Cleaning up...') self.project.kill() self.project.remove_stopped(None, v=True)
Python
0.000002
@@ -342,161 +342,127 @@ -def run(self):%0A try:%0A self.setup()%0A%0A service = self.project.get_service(self.config.construi%5B'name'%5D)%0A%0A for cmd i +@property%0A def client(self):%0A return self.project.client%0A%0A @property%0A def commands(self):%0A retur n se @@ -490,649 +490,318 @@ un'%5D -: +%0A %0A - console.progress(%22%3E %25s%22 %25 cmd)%0A%0A container = service.create_container(%0A one_off=True,%0A command=cmd,%0A tty=False,%0A stdin_open=True,%0A detach=False%0A )%0A%0A dockerpty.start(%0A self.project.client, container.id, interactive=Fals +@property%0A def name(self):%0A return self.config.construi%5B'name'%5D%0A%0A @property%0A def service(self):%0A return self.project.get_service(self.nam e)%0A +%0A - exit_code = container.wait()%0A self.project.client.remove_container(container.id, force=True)%0A%0A if exit_code != 0:%0A console.error(%22%5CnBuild Failed.%22)%0A sys.exit(1 +def run(self):%0A try:%0A self.setup()%0A%0A for command in self.commands:%0A self.run_command(command )%0A%0A @@ -833,24 +833,25 @@ ss('Done.')%0A +%0A exce @@ -987,24 +987,587 @@ .cleanup()%0A%0A + def run_command(self, command):%0A console.progress(%22%3E %25s%22 %25 command)%0A%0A container = self.service.create_container(%0A one_off=True,%0A command=command,%0A tty=False,%0A stdin_open=True,%0A detach=False%0A )%0A%0A try:%0A dockerpty.start(self.client, container.id, interactive=False)%0A%0A if container.wait() != 0:%0A console.error(%22%5CnBuild Failed.%22)%0A sys.exit(1)%0A%0A finally:%0A self.client.remove_container(container.id, force=True)%0A%0A def setu
b6a5dcef6a612098dc6abddec831980792c23ddf
Allow API key to be set in config for rottentomatoes_list
flexget/plugins/input/rottentomatoes_list.py
flexget/plugins/input/rottentomatoes_list.py
from __future__ import unicode_literals, division, absolute_import import logging from flexget import plugin from flexget.entry import Entry from flexget.event import event from flexget.utils.cached_input import cached try: from flexget.plugins.api_rottentomatoes import lists except ImportError: raise plugin.DependencyError(issued_by='rottentomatoes_lookup', missing='api_rottentomatoes', message='rottentomatoes_lookup requires the `api_rottentomatoes` plugin') log = logging.getLogger('rottentomatoes_list') class RottenTomatoesList(object): """ Emits an entry for each movie in a Rotten Tomatoes list. Configuration: dvds: - top_rentals - upcoming movies: - box_office Possible lists are * dvds: top_rentals, current_releases, new_releases, upcoming * movies: box_office, in_theaters, opening, upcoming """ def __init__(self): # We could pull these from the API through lists.json but that's extra web/API key usage self.dvd_lists = ['top_rentals', 'current_releases', 'new_releases', 'upcoming'] self.movie_lists = ['box_office', 'in_theaters', 'opening', 'upcoming'] def validator(self): from flexget import validator root = validator.factory('dict') root.accept('list', key='dvds').accept('choice').accept_choices(self.dvd_lists) root.accept('list', key='movies').accept('choice').accept_choices(self.movie_lists) return root @cached('rottentomatoes_list', persist='2 hours') def on_task_input(self, task, config): entries = [] for l_type, l_names in config.items(): for l_name in l_names: results = lists(list_type=l_type, list_name=l_name) if results: for movie in results['movies']: if [entry for entry in entries if movie['title'] == entry.get('title')]: continue imdb_id = movie.get('alternate_ids', {}).get('imdb') if imdb_id: imdb_id = 'tt' + str(imdb_id) entries.append(Entry(title=movie['title'], rt_id=movie['id'], imdb_id=imdb_id, rt_name=movie['title'], url=movie['links']['alternate'])) else: log.critical('Failed to fetch Rotten tomatoes %s list: %s. List doesn\'t exist?' % (l_type, l_name)) return entries @event('plugin.register') def register_plugin(): plugin.register(RottenTomatoesList, 'rottentomatoes_list', api_ver=2)
Python
0
@@ -1534,32 +1534,75 @@ lf.movie_lists)%0A + root.accept('text', key='api_key')%0A return r @@ -1724,16 +1724,62 @@ es = %5B%5D%0A + api_key = config.get('api_key', None)%0A @@ -1813,24 +1813,105 @@ ig.items():%0A + if type(l_names) is not list: %0A continue%0A %0A @@ -1999,16 +1999,33 @@ e=l_name +, api_key=api_key )%0A
e73d69d258ab595ee8353efd85a6f37829b47b2b
update docstring
pysat/instruments/methods/general.py
pysat/instruments/methods/general.py
# -*- coding: utf-8 -*- """Provides generalized routines for integrating instruments into pysat. """ from __future__ import absolute_import, division, print_function import pandas as pds import pysat import logging logger = logging.getLogger(__name__) def list_files(tag=None, sat_id=None, data_path=None, format_str=None, supported_tags=None, fake_daily_files_from_monthly=False, two_digit_year_break=None): """Return a Pandas Series of every file for chosen satellite data. This routine is intended to be used by pysat instrument modules supporting a particular NASA CDAWeb dataset. Parameters ----------- tag : (string or NoneType) Denotes type of file to load. Accepted types are <tag strings>. (default=None) sat_id : (string or NoneType) Specifies the satellite ID for a constellation. Not used. (default=None) data_path : (string or NoneType) Path to data directory. If None is specified, the value previously set in Instrument.files.data_path is used. (default=None) format_str : (string or NoneType) User specified file format. If None is specified, the default formats associated with the supplied tags are used. (default=None) supported_tags : (dict or NoneType) keys are sat_id, each containing a dict keyed by tag where the values file format template strings. (default=None) fake_daily_files_from_monthly : bool Some CDAWeb instrument data files are stored by month, interfering with pysat's functionality of loading by day. This flag, when true, appends daily dates to monthly files internally. These dates are used by load routine in this module to provide data by day. two_digit_year_break : int If filenames only store two digits for the year, then '1900' will be added for years >= two_digit_year_break and '2000' will be added for years < two_digit_year_break. Returns -------- pysat.Files.from_os : (pysat._files.Files) A class containing the verified available files Examples -------- :: fname = 'cnofs_vefi_bfield_1sec_{year:04d}{month:02d}{day:02d}_v05.cdf' supported_tags = {'dc_b': fname} list_files = functools.partial(nasa_cdaweb.list_files, supported_tags=supported_tags) fname = 'cnofs_cindi_ivm_500ms_{year:4d}{month:02d}{day:02d}_v01.cdf' supported_tags = {'': fname} list_files = functools.partial(mm_gen.list_files, supported_tags=supported_tags) """ if data_path is not None: if format_str is None: try: format_str = supported_tags[sat_id][tag] except KeyError as estr: raise ValueError('Unknown sat_id or tag: ' + estr) out = pysat.Files.from_os(data_path=data_path, format_str=format_str) if (not out.empty) and fake_daily_files_from_monthly: out.loc[out.index[-1] + pds.DateOffset(months=1) - pds.DateOffset(days=1)] = out.iloc[-1] out = out.asfreq('D', 'pad') out = out + '_' + out.index.strftime('%Y-%m-%d') return out return out else: estr = ''.join(('A directory must be passed to the loading routine ', 'for <Instrument Code>')) raise ValueError(estr)
Python
0
@@ -531,106 +531,65 @@ ine -is intended to be used by pysat instrument modules supporting%0A a particular NASA CDAWeb dataset +provides a standard interfacefor pysat instrument modules .%0A%0A
6b6d3779cd23c188c808387b9f4095ea75da3284
Add a way to get the resources depended on by an output
heat/engine/output.py
heat/engine/output.py
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import six from heat.engine import function class OutputDefinition(object): """A definition of a stack output, independent of any template format.""" def __init__(self, name, value, description=None): self.name = name self._value = value self._resolved_value = None self._description = description def validate(self, path=''): """Validate the output value without resolving it.""" function.validate(self._value, path) def dep_attrs(self, resource_name): """Iterate over attributes of a given resource that this references. Return an iterator over dependent attributes for specified resource_name in the output's value field. """ return function.dep_attrs(self._value, resource_name) def get_value(self): """Resolve the value of the output.""" if self._resolved_value is None: self._resolved_value = function.resolve(self._value) return self._resolved_value def description(self): """Return a description of the output.""" if self._description is None: return 'No description given' return six.text_type(self._description) def render_hot(self): def items(): if self._description is not None: yield 'description', self._description yield 'value', copy.deepcopy(self._value) return dict(items())
Python
0.99996
@@ -592,16 +592,50 @@ rt six%0A%0A +from heat.common import exception%0A from hea @@ -659,16 +659,16 @@ unction%0A - %0A%0Aclass @@ -955,16 +955,42 @@ cription +%0A self._deps = None %0A%0A de @@ -1074,24 +1074,24 @@ ving it.%22%22%22%0A - func @@ -1124,16 +1124,539 @@ path)%0A%0A + def required_resource_names(self):%0A if self._deps is None:%0A try:%0A required_resources = function.dependencies(self._value)%0A self._deps = set(six.moves.map(lambda rp: rp.name,%0A required_resources))%0A except (exception.InvalidTemplateAttribute,%0A exception.InvalidTemplateReference):%0A # This output ain't gonna work anyway%0A self._deps = set()%0A return self._deps%0A%0A def
0f43efc9c611f4b8bd93a42f85db3e14106915ba
fix burst not work bug
pyspider/database/local/projectdb.py
pyspider/database/local/projectdb.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- # vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8: # Author: Binux<roy@binux.me> # http://binux.me # Created on 2015-01-17 12:32:17 import os import re import six import logging from pyspider.database.base.projectdb import ProjectDB as BaseProjectDB class ProjectDB(BaseProjectDB): """ProjectDB loading scripts from local file.""" def __init__(self, files): self.projects = {} for filename in files: project = self._build_project(filename) if not project: continue self.projects[project['name']] = project rate_re = re.compile(r'^\s*#\s*rate.*(\d+(\.\d+)?)', re.I) burst_re = re.compile(r'^\s*#\s*burst.*(\d+(\.\d+)?)', re.I) def _build_project(self, filename): try: with open(filename) as fp: script = fp.read() m = self.rate_re.match(script) if m: rate = float(m.group(1)) else: rate = 1 m = self.burst_re.match(script) if m: burst = float(m.group(1)) else: burst = 3 return { 'name': os.path.splitext(os.path.basename(filename))[0], 'group': None, 'status': 'RUNNING', 'script': script, 'comments': None, 'rate': rate, 'burst': burst, 'updatetime': os.path.getmtime(filename), } except OSError as e: logging.error('loading project script error: %s', e) return None def get_all(self, fields=None): for projectname in self.projects: yield self.get(projectname, fields) def get(self, name, fields=None): if name not in self.projects: return None project = self.projects[name] result = {} for f in fields or project: if f in project: result[f] = project[f] else: result[f] = None return result def check_update(self, timestamp, fields=None): for projectname, project in six.iteritems(self.projects): if project['updatetime'] > timestamp: yield self.get(projectname, fields)
Python
0.000001
@@ -676,16 +676,17 @@ s*rate.* +? (%5Cd+(%5C.%5C @@ -697,16 +697,23 @@ )', re.I + %7C re.M )%0A bu @@ -749,16 +749,17 @@ *burst.* +? (%5Cd+(%5C.%5C @@ -770,16 +770,23 @@ )', re.I + %7C re.M )%0A%0A d @@ -932,27 +932,28 @@ elf.rate_re. -mat +sear ch(script)%0A @@ -1088,11 +1088,12 @@ _re. -mat +sear ch(s
74e4d69a6ab501e11ff266d1ad77992d0203729f
Include os stuff
thumbor_rackspace/loaders/cloudfiles_loader.py
thumbor_rackspace/loaders/cloudfiles_loader.py
#!/usr/bin/python # -*- coding: utf-8 -*- # thumbor imaging service # https://github.com/globocom/thumbor/wiki # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license # Copyright (c) 2013 theiconic.com.au development@theiconic.com.au import pyrax def load(context, path, callback): if(context.config.RACKSPACE_PYRAX_REGION): pyrax.set_default_region(context.config.RACKSPACE_PYRAX_REGION) pyrax.set_credential_file(expanduser(context.config.RACKSPACE_PYRAX_CFG)) cf = pyrax.connect_to_cloudfiles(public=context.config.RACKSPACE_PYRAX_PUBLIC) cont = cf.get_container(context.config.RACKSPACE_LOADER_CONTAINER) file_abspath = normalize_path(context) try: logger.debug("[LOADER] getting from %s/%s" % (context.config.RACKSPACE_LOADER_CONTAINER, file_abspath)) obj = cont.get_object(file_abspath) if obj: logger.debug("[LOADER] Found object at %s/%s" % (context.config.RACKSPACE_LOADER_CONTAINER, file_abspath)) else: logger.warning("[LOADER] Unable to find object %s/%s" % (context.config.RACKSPACE_LOADER_CONTAINER, file_abspath )) except: callback(None) callback(obj.get()) def normalize_path(context): path = join(context.config.RACKSPACE_LOADER_CONTAINER_ROOT.rstrip('/'), contenxt.request.url.lstrip('/')) path = path.replace('http://', '') return path
Python
0
@@ -257,16 +257,53 @@ com.au%0A%0A +from os.path import join, expanduser%0A import p
1fe57b534a200e771f0698de9e1910635e4f3222
allow specifying table
custom/icds_reports/management/commands/migrate_tables.py
custom/icds_reports/management/commands/migrate_tables.py
from __future__ import absolute_import, print_function from __future__ import unicode_literals import argparse import inspect import logging import subprocess import sys from datetime import datetime, date import csv342 as csv from six.moves import input logger = logging.getLogger(__name__) def parse_date(s, default=None): if not s: return default try: return datetime.strptime(s, "%Y-%m-%d").date().replace(day=1) except ValueError: msg = "Not a valid date: '{0}'.".format(s) raise argparse.ArgumentTypeError(msg) def run_migration( table_path, source_db, target_db, target_host, target_user, start_date, end_date, confirm, dry_run ): with open(table_path, 'r') as file: tables = [ (source_table, parse_date(date_str, date.max), target_table) for source_table, date_str, target_table in csv.reader(file) ] filtered_tables = filter_tables_by_date(tables, start_date, end_date) if dry_run or _confirm('Preparing to migrate {} tables.'.format(len(filtered_tables))): migrate_tables( filtered_tables, source_db, target_db, target_host, target_user, confirm, dry_run ) def migrate_tables(tables, source_db, target_db, target_host, target_user, confirm, dry_run): commands = get_dump_load_commands(tables, source_db, target_db, target_host, target_user) for source_table, target_table, cmd_parts in commands: print(' '.join(cmd_parts)) if not dry_run and (not confirm or _confirm('Migrate {} to {}'.format(source_table, target_table))): code = subprocess.call(cmd_parts, shell=True) if code != 0: sys.exit(code) def filter_tables_by_date(tables, start_date, end_date): return [ (source_table, target_table) for source_table, table_date, target_table in tables if (not start_date or table_date >= start_date) and (not end_date or table_date < end_date) ] def get_dump_load_commands(tables, source_db, target_db, target_host, target_user): dump_opts = ['--data-only', '--no-acl'] load_opts = ['-h', target_host, '-U', target_user, target_db] for source_table, target_table in tables: cmd_parts = ['pg_dump', '-t', source_table, source_db] + dump_opts if target_table: cmd_parts += ['|', 'sed', '"s/{}/{}/g"'.format(source_table, target_table)] cmd_parts += ['|', 'psql'] + load_opts yield source_table, target_table, cmd_parts def _confirm(msg): confirm_update = input(msg + ' [yes / no] ') return confirm_update == 'yes' def main(): parser = argparse.ArgumentParser(description="Migrate DB tables from one DB to another using pg_dump") parser.add_argument( 'table_path', help=inspect.cleandoc(""" Path to list file containing list of tables formatted as CSV. File should have 3 columns in this order: source_table_name,table_date,target_table_name source_table_name: name of table in source DB table_date: For tables partitioned by month this should be the month of the data in the table e.g. 2018-03-01 target_table_name: name of the table to load the data into in the target database """) ) parser.add_argument( '-d', '--source-db', required=True, help='Name for source database' ) parser.add_argument( '-t', '--target-db', required=True, help='Name for target database' ) parser.add_argument( '-o', '--target-host', required=True, help='Name for target database' ) parser.add_argument( '-u', '--target-user', required=True, help='PG user to connect to target DB as. This user should be able to connect to the target' 'DB without a password.', ) parser.add_argument( '--start-date', type=parse_date, help='Only migrate tables with date on or after this date. Format YYYY-MM-DD', ) parser.add_argument( '--end-date', type=parse_date, help='Only migrate tables with date before this date. Format YYYY-MM-DD', ) parser.add_argument( '--confirm', action='store_true', help='Confirm before each table.', ) parser.add_argument( '--dry-run', action='store_true', help='Only output the commands.', ) args = parser.parse_args() run_migration( args.table_path, args.source_db, args.target_db, args.target_host, args.target_user, args.start_date, args.end_date, args.confirm, args.dry_run ) if __name__ == "__main__": main()
Python
0.000001
@@ -674,24 +674,36 @@ nd_date,%0A + only_table, confirm, dr @@ -1000,16 +1000,221 @@ nd_date) +%0A if only_table:%0A filtered_tables = %5B%0A table for table in filtered_tables if table%5B0%5D == only_table%0A %5D%0A if not filtered_tables:%0A raise Exception(%22No table to migrate%22) %0A%0A if @@ -4475,32 +4475,122 @@ r.add_argument(%0A + '--table',%0A help='Only migrate this table',%0A )%0A parser.add_argument(%0A '--confi @@ -4991,24 +4991,36 @@ ate,%0A + args.table, args.confir
61ad96b4a0ba15cfdf9600c15ce8366eda7ef4f4
Allow overriding of list filter properties (mainly used for changing title)
ixxy_admin_utils/list_filters.py
ixxy_admin_utils/list_filters.py
import datetime from django.contrib.admin import SimpleListFilter, FieldListFilter from django.utils import timezone from django.utils.translation import ugettext_lazy as _ def makeRangeFieldListFilter(lookups, nullable=False, title=None): """Mostly based on https://djangosnippets.org/snippets/2779/ Modified to work with date ranges A factory for ListFilter's. Example Usage: list_filter = ( ('chapters', makeRangeFieldListFilter([ ('1', 1, 2), ('2 to 10', 2, 10), ('11 to 30', 11, 30), ('31 to 100', 31, 100), ('At least 100', 100, None), ], nullable=True)), ('word_count', makeRangeFieldListFilter([ ('Less than 1000', None, 1000), ('1K to 5K', 1000, 5000), ('5K to 10K', 5000, 10000), ('10K to 75K', 10000, 75000), ('75K to 150K', 75000, 150000), ('150K to 300K', 150000, 300000), ('At least 300K', 300000, None), ], nullable=True)), ('derivatives_count', makeRangeFieldListFilter([ ('None', 0, 1), ('1 to 5', 1, 5), ('5 to 50', 5, 50), ('50 to 1000', 50, 1000), ('At least 1000', 1000, None), ])), )""" class RangeFieldListFilter(FieldListFilter): def __init__(self, field, request, params, model, model_admin, field_path, title=title): self.field_generic = '%s__' % field_path self.range_params = dict( [(k, v) for k, v in params.items() if k.startswith(self.field_generic)] ) self.lookup_kwarg_start = '%s__gte' % field_path self.lookup_kwarg_stop = '%s__lt' % field_path self.lookup_kwarg_null = '%s__isnull' % field_path self.links = [(_('Any'), {}), ] for name, start, stop in lookups: # If we pass in a timedelta then assume we want date filtering # relative to now # TODO This only supports date not datetime is_date_based = False if isinstance(start, datetime.timedelta): start = timezone.now() + start is_date_based = True if isinstance(stop, datetime.timedelta): stop = timezone.now() + stop is_date_based = True query_params = {} if is_date_based: if start is not None: start = str(start.date()) if stop is not None: stop = str(stop.date()) if start is not None: query_params[self.lookup_kwarg_start] = start if stop is not None: query_params[self.lookup_kwarg_stop] = stop self.links.append((name, query_params)) if nullable: self.links.append((_('Unknown'), { self.lookup_kwarg_null: 'True' })) super(RangeFieldListFilter, self).__init__( field, request, params, model, model_admin, field_path, ) # Allow a custom title if title is not None: self.title = title def expected_parameters(self): return [ self.lookup_kwarg_start, self.lookup_kwarg_stop, self.lookup_kwarg_null ] def choices(self, cl): for title, param_dict in self.links: yield { 'selected': self.range_params == param_dict, 'query_string': cl.get_query_string(param_dict, [self.field_generic]), 'display': title, } return RangeFieldListFilter
Python
0
@@ -168,16 +168,400 @@ as _%0A%0A%0A +def custom_field_list_filter(title=None):%0A%0A # Allow overriding of%0A # ('events__type', custom_field_list_filter(title='event type')),%0A%0A class Wrapper(FieldListFilter):%0A def __new__(cls, *args, **kwargs):%0A instance = FieldListFilter.create(*args, **kwargs)%0A instance.title = title or instance.title%0A return instance%0A return Wrapper%0A%0A%0A def make
adcf722f41d47f0adb7b7b4a1a5f04a153dabd53
Test the new pipeline method call
tests/python/pipeline/test-pipeline.py
tests/python/pipeline/test-pipeline.py
#!/usr/bin/env python #ckwg +5 # Copyright 2011 by Kitware, Inc. All Rights Reserved. Please refer to # KITWARE_LICENSE.TXT for licensing information, or contact General Counsel, # Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065. def log(msg): import sys sys.stderr.write("%s\n" % msg) def test_import(): try: import vistk.pipeline.pipeline except: log("Error: Failed to import the pipeline module") def test_create(): from vistk.pipeline import config from vistk.pipeline import pipeline c = config.empty_config() pipeline.Pipeline(c) def test_api_calls(): from vistk.pipeline import config from vistk.pipeline import edge from vistk.pipeline import modules from vistk.pipeline import pipeline from vistk.pipeline import process from vistk.pipeline import process_registry c = config.empty_config() p = pipeline.Pipeline(c) proc_type1 = 'numbers' proc_type2 = 'print_number' proc_name1 = 'src' proc_name2 = 'snk' port_name1 = 'number' port_name2 = 'number' group_name = 'group' group_iport = 'iport' group_oport = 'oport' modules.load_known_modules() reg = process_registry.ProcessRegistry.self() c.set_value(process_registry.Process.config_name, proc_name1) proc1 = reg.create_process(proc_type1, c) conf_name = 'output' c.set_value(process_registry.Process.config_name, proc_name2) c.set_value(conf_name, 'test-python-pipeline-api_calls-print_number.txt') proc2 = reg.create_process(proc_type2, c) p.add_process(proc1) p.add_process(proc2) p.add_group(group_name) p.connect(proc_name1, port_name1, proc_name2, port_name2) p.map_input_port(group_name, group_iport, proc_name2, port_name2, process.PortFlags()) p.map_output_port(group_name, group_oport, proc_name1, port_name1, process.PortFlags()) p.process_names() p.process_by_name(proc_name1) p.upstream_for_process(proc_name2) p.upstream_for_port(proc_name2, port_name2) p.downstream_for_process(proc_name1) p.downstream_for_port(proc_name1, port_name1) p.sender_for_port(proc_name2, port_name2) p.receivers_for_port(proc_name1, port_name1) p.input_edges_for_process(proc_name2) p.input_edge_for_port(proc_name2, port_name2) p.output_edges_for_process(proc_name1) p.output_edges_for_port(proc_name1, port_name1) p.groups() p.input_ports_for_group(group_name) p.output_ports_for_group(group_name) p.mapped_group_input_port_flags(group_name, group_iport) p.mapped_group_output_port_flags(group_name, group_oport) p.mapped_group_input_ports(group_name, group_iport) p.mapped_group_output_port(group_name, group_oport) p.setup_pipeline() def main(testname): if testname == 'import': test_import() elif testname == 'create': test_create() elif testname == 'api_calls': test_api_calls() else: log("Error: No such test '%s'" % testname) if __name__ == '__main__': import os import sys if not len(sys.argv) == 4: log("Error: Expected three arguments") sys.exit(1) testname = sys.argv[1] os.chdir(sys.argv[2]) sys.path.append(sys.argv[3]) main(testname)
Python
0
@@ -2322,24 +2322,124 @@ port_name1)%0A + p.edge_for_connection(proc_name1, port_name1,%0A proc_name2, port_name2)%0A p.input_
d55210495fde133b8b76ee1f55e593dd43389e0e
Update to use new HTTP APIs.
src/livestreamer/plugins/ongamenet.py
src/livestreamer/plugins/ongamenet.py
from livestreamer.compat import str, bytes from livestreamer.exceptions import PluginError, NoStreamsError from livestreamer.plugin import Plugin from livestreamer.stream import RTMPStream from livestreamer.utils import urlget import re class Ongamenet(Plugin): StreamURL = "http://dostream.lab.so/stream.php" SWFURL = "http://www.ongamenet.com/front/ongame/live/CJPlayer.swf" PageURL = "http://www.ongamenet.com" @classmethod def can_handle_url(self, url): return "ongamenet.com" in url def _get_streams(self): res = urlget(self.StreamURL, data={"from": "ongamenet"}) match = re.search("var stream = \"(.+?)\";", res.text) if not match: raise NoStreamsError(self.url) stream = match.group(1) match = re.search("var server = \"(.+?)\";", res.text) if not match: raise NoStreamsError(self.url) server = match.group(1) streams = {} streams["live"] = RTMPStream(self.session, { "rtmp": server, "playpath": stream, "swfUrl": self.SWFURL, "pageUrl": self.PageURL, "live": True, }) return streams __plugin__ = Ongamenet
Python
0
@@ -1,47 +1,4 @@ -from livestreamer.compat import str, bytes%0A from @@ -32,21 +32,8 @@ port - PluginError, NoS @@ -105,32 +105,30 @@ mer. -stream import RTMPStream +plugin.api import http %0Afro @@ -146,21 +146,22 @@ mer. -utils +stream import urlg @@ -156,22 +156,26 @@ import -urlget +RTMPStream %0A%0Aimport @@ -179,16 +179,17 @@ ort re%0A%0A +%0A class On @@ -503,19 +503,21 @@ res = -url +http. get(self
1ab7829903e377033d439052337d98176cd16ae7
Version checking
treeinterpreter/treeinterpreter.py
treeinterpreter/treeinterpreter.py
# -*- coding: utf-8 -*- import numpy as np from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier, _tree from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestClassifier def _get_tree_paths(tree, node_id, depth=0): """ Returns all paths through the tree as list of node_ids """ if node_id == _tree.TREE_LEAF: raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF) left_child = tree.children_left[node_id] right_child = tree.children_right[node_id] if left_child != _tree.TREE_LEAF: left_paths = _get_tree_paths(tree, left_child, depth=depth + 1) right_paths = _get_tree_paths(tree, right_child, depth=depth + 1) for path in left_paths: path.append(node_id) for path in right_paths: path.append(node_id) paths = left_paths + right_paths else: paths = [[node_id]] return paths def _predict_tree(model, X): """ For a given DecisionTreeRegressor or DecisionTreeClassifier, returns a triple of [prediction, bias and feature_contributions], such that prediction ≈ bias + feature_contributions. """ leaves = model.apply(X) paths = _get_tree_paths(model.tree_, 0) for path in paths: path.reverse() # remove the single-dimensional inner arrays values = model.tree_.value.squeeze() if type(model) == DecisionTreeRegressor: contributions = np.zeros(X.shape) biases = np.zeros(X.shape[0]) line_shape = X.shape[1] elif type(model) == DecisionTreeClassifier: # scikit stores category counts, we turn them into probabilities normalizer = values.sum(axis=1)[:, np.newaxis] normalizer[normalizer == 0.0] = 1.0 values /= normalizer biases = np.zeros((X.shape[0], model.n_classes_)) contributions = np.zeros((X.shape[0], X.shape[1], model.n_classes_)) line_shape = (X.shape[1], model.n_classes_) for row, leaf in enumerate(leaves): for path in paths: if leaf == path[-1]: break biases[row] = values[path[0]] contribs = np.zeros(line_shape) for i in range(len(path) - 1): contrib = values[path[i+1]] - \ values[path[i]] contribs[model.tree_.feature[path[i]]] += contrib contributions[row] = contribs direct_prediction = values[leaves] return direct_prediction, biases, contributions def _predict_forest(model, X): """ For a given RandomForestRegressor or RandomForestClassifier, returns a triple of [prediction, bias and feature_contributions], such that prediction ≈ bias + feature_contributions. """ biases = [] contributions = [] predictions = [] for tree in model.estimators_: pred, bias, contribution = _predict_tree(tree, X) biases.append(bias) contributions.append(contribution) predictions.append(pred) return (np.mean(predictions, axis=0), np.mean(biases, axis=0), np.mean(contributions, axis=0)) def predict(model, X): """ Returns a triple (prediction, bias, feature_contributions), such that prediction ≈ bias + feature_contributions. Parameters ---------- model : DecisionTreeRegressor, DecisionTreeClassifier or RandomForestRegressor, RandomForestClassifier Scikit-learn model on which the prediction should be decomposed. X : array-like, shape = (n_samples, n_features) Test samples. Returns ------- decomposed prediction : triple of * prediction, shape = (n_samples) for regression and (n_samples, n_classes) for classification * bias, shape = (n_samples) for regression and (n_samples, n_classes) for classification * contributions, shape = (n_samples, n_features) for regression or shape = (n_samples, n_features, n_classes) for classification """ # Only single out response variable supported, if model.n_outputs_ > 1: raise ValueError("Multilabel classification trees not supported") if (type(model) == DecisionTreeRegressor or type(model) == DecisionTreeClassifier): return _predict_tree(model, X) elif (type(model) == RandomForestRegressor or type(model) == RandomForestClassifier): return _predict_forest(model, X) else: raise ValueError("Wrong model type. Base learner needs to be \ DecisionTreeClassifier or DecisionTreeRegressor.") if __name__ == "__main__": # test from sklearn.datasets import load_iris iris = load_iris() idx = range(len(iris.data)) np.random.shuffle(idx) X = iris.data[idx] Y = iris.target[idx] dt = RandomForestClassifier(max_depth=3) dt.fit(X[:len(X)/2], Y[:len(X)/2]) testX = X[len(X)/2:len(X)/2+5] base_prediction = dt.predict_proba(testX) pred, bias, contrib = _predictforest(dt, testX) assert(np.allclose(base_prediction, pred)) assert(np.allclose(pred, bias + np.sum(contrib, axis=1)))
Python
0
@@ -217,16 +217,210 @@ ssifier%0A +from distutils.version import LooseVersion%0Aimport sklearn%0Aif LooseVersion(sklearn.__version__) %3C LooseVersion(%220.17%22):%0A raise Exception(%22treeinterpreter requires scikit-learn 0.17 or later%22)%0A %0A%0Adef _g
e78d613f66df5f10b59e47b6cfce619182d1297f
Update run.py
src/main/app-resources/py-ndvi/run.py
src/main/app-resources/py-ndvi/run.py
#!/usr/bin/env python import site import os import sys site.addsitedir('/application/share/python/lib/python2.6/site-packages') #print sys.path #os.environ['PYTHONUSERBASE'] = '/application/share/python' #print 'Base:', site.USER_BASE #print 'Site:', site.USER_SITE import ndvi sys.path.append('/usr/lib/ciop/python/') import cioppy as ciop ciop.log('INFO', 'Hello World') #myvar = ciop.getparam('param1') #ciop.log('DEBUG', 'value is: ' + myvar) # input comes from STDIN (standard input) for line in sys.stdin: ciop.log('INFO', 'input: ' + line) res = ciop.copy(line, os.environ['TMPDIR']) ciop.log('DEBUG', 'local path:' + res[0].rstrip('\n')) obj = ndvi.GDALCalcNDVI() obj.calc_ndvi(res[0].rstrip(), '/tmp/pippo.tif') pub = ciop.publish('/tmp/pippo.tif') metadata = [ "ical:dtstart=2001-01-10T14:00:00", "ical:dtend=2001-01-10T14:05:00", "dc:identifier=mydataset", "dct:spatial=MULTIPOLYGON(((25.55215 36.97701,24.740512 37.091395,24.496927 35.950137,25.284346 35.839142,25.55215 36.97701)))", "dclite4g:onlineResource=" + pub[0].rstrip()] metadata = [ "ical:dtstart=2001-01-10T14:00:00", "ical:dtend=2001-01-10T14:05:00", "dc:identifier=mydataset", "dct:spatial=MULTIPOLYGON(((25.55215 36.97701,24.740512 37.091395,24.496927 35.950137,25.284346 35.839142,25.55215 36.97701)))", "dclite4g:onlineResource=http://some.host.com/myproduct.tif"] ciop.log('DEBUG', 'Going to register') ciop.register('http://localhost/catalogue/sandbox/rdf', 'file:///application/py-ndvi/etc/series.rdf', metadata) ciop.publish('/tmp/pippo.tif', metalink = True) ciop.log('INFO', 'Done my share of the work!')
Python
0.000001
@@ -362,96 +362,141 @@ ', ' -Hello World')%0A%0A#myvar = ciop.getparam('param1')%0A%0A#ciop.log('DEBUG', 'value is: ' + myvar +Calculating NDVI')%0A%0A# create an output folder for the results%0Aoutput.path = os.environ%5B'TMPDIR'%5D + '/output' %0Aos.makedirs(output.path )%0A%0A#
0048794fd6e71f58bf88d84ddefb1e9a0194efca
Fix the mock-image used in test-steps unittests.
tests/test_steps/test_source_extraction.py
tests/test_steps/test_source_extraction.py
import unittest from tkp.testutil import db_subs, data from ConfigParser import SafeConfigParser from tkp.config import parse_to_dict from tkp.testutil.data import default_job_config from tkp.testutil import Mock import tkp.steps.source_extraction import tkp.accessors class MockImage(Mock): def extract(self, *args, **kwargs): return self.__call__(*args, **kwargs) class TestSourceExtraction(unittest.TestCase): @classmethod def setUpClass(cls): cls.dataset_id = db_subs.create_dataset_8images() config = SafeConfigParser() config.read(default_job_config) config = parse_to_dict(config) cls.parset = config['source_extraction'] def test_extract_sources(self): image_path = data.fits_file tkp.steps.source_extraction.extract_sources(image_path, self.parset) def test_for_appropriate_arguments(self): # sourcefinder_image_from_accessor() should get a single positional # argument, which is the accessor, and four kwargs: back_sizex, # back_sizey, margin and radius. # The object it returns has an extract() method, which should have # been called with det, anl, force_beam and deblend_nthresh kwargs. image_path = data.fits_file mock_method = Mock(MockImage([])) orig_method = tkp.steps.source_extraction.sourcefinder_image_from_accessor tkp.steps.source_extraction.sourcefinder_image_from_accessor = mock_method tkp.steps.source_extraction.extract_sources(image_path, self.parset) tkp.steps.source_extraction.sourcefinder_image_from_accessor = orig_method # Arguments to sourcefinder_image_from_accessor() self.assertIn('radius', mock_method.callvalues[0][1]) self.assertIn('margin', mock_method.callvalues[0][1]) self.assertIn('back_size_x', mock_method.callvalues[0][1]) self.assertIn('back_size_y', mock_method.callvalues[0][1]) # Arguments to extract() self.assertIn('det', mock_method.returnvalue.callvalues[0][1]) self.assertIn('anl', mock_method.returnvalue.callvalues[0][1]) self.assertIn('force_beam', mock_method.returnvalue.callvalues[0][1]) self.assertIn('deblend_nthresh', mock_method.returnvalue.callvalues[0][1])
Python
0
@@ -9,16 +9,35 @@ nittest%0A +import numpy as np%0A from tkp @@ -282,16 +282,17 @@ essors%0A%0A +%0A class Mo @@ -392,16 +392,99 @@ kwargs)%0A + @property%0A def rmsmap(self, *args, **kwargs):%0A return np.zeros((1))%0A%0A %0Aclass T
a019f2f5f2dd18761b2c6bce252232dd680d65f1
order my tasks by lastedit as well
python/mod_python/firefox/firefox.py
python/mod_python/firefox/firefox.py
from mod_python import apache import sys, os sys.path.append("/usr/lib") import feedparser, time, pickle feeds = None dumpfile = "/home/vmiklos/ftp/vmiklos.hu/htdocs/startup/feeds" def dumpcache(): global feeds try: socket = open(dumpfile, "w") pickle.dump(feeds, socket) socket.close() except TypeError: os.remove(dumpfile) def fetchfeed(url): global feeds feed = feedparser.parse(url) feed.updated = time.time() feeds[url] = feed return feed def dumpfeed(url): global feeds ret = [] if not feeds: try: socket = open(dumpfile, "r") feeds = pickle.load(socket) socket.close() except IOError: feeds = {} if url in feeds.keys(): feed = feeds[url] # check if the feed is outdated if (time.time() - feed.updated) > 1800: feed = fetchfeed(url) else: feed = fetchfeed(url) try: ret.append('<div id="right" class="sideboxpadding">') ret.append('<div class="boxheader">%s<br /></div>' % (feed.feed.title.encode('ascii', 'xmlcharrefreplace'))) ret.append('<div class="sidecontent">') for i in feed.entries: ret.append('<a href="%s">%s</a><br />' % (i.link, i.title.encode('ascii', 'xmlcharrefreplace'))) ret.append('</div></div>') ret.append('<div id="right" class="dummybox">') ret.append('</div>') except AttributeError: feed.feed = None ret = [] return "\n".join(ret) if __name__ == "firefox": def handler(req): out = [] req.content_type = "text/html; charset=utf-8" out.append(""" <html> <head> <link rel="stylesheet" type="text/css" href="firefox.css" /> <title>Firefox Startup Page</title> </head> <body> <div class="header"> <a href="http://frugalware.org/">frugalware</a> | <a href="http://vmiklos.hu/blog">blog</a> | <a href="http://blogs.frugalware.org/vmiklos">fwblog</a> | <a href="http://bugs.frugalware.org/?dev=vmiklos&order=dateopened&sort=desc">my tasks</a> | <a href="http://bugs.frugalware.org/?string=[SEC]&order=lastedit&sort=desc">sec tasks</a> | <a href="http://bugs.frugalware.org/?do=roadmap">roadmap</a> | <a href="http://bugs.frugalware.org/?sev[]=5">critical bugs</a> | <a href="http://rss.gmane.org/gmane.linux.frugalware.cvs">-darcs rss</a> | <a href="http://rss.gmane.org/gmane.linux.frugalware.scm">-git rss</a> | <a href="http://ftp.frugalware.org/pub/other/people/vmiklos/">devspace</a> | <a href="http://vmiklos.hu">homepage</a> </div> <div class="dummybox"></div> <div id="main"> """) out.append(dumpfeed("http://feeds.feedburner.com/HUP")) out.append(dumpfeed("http://frugalware.org/~vmiklos/rss/irc/irc.py")) out.append(dumpfeed("http://rss.slashdot.org/Slashdot/slashdot")) out.append(dumpfeed("http://frugalware.org/~vmiklos/rss/legalja/legalja.py")) out.append(""" </div> </body> </html> """) req.write("".join(out)) dumpcache() return apache.OK
Python
0
@@ -1835,18 +1835,16 @@ der= -dateopened +lastedit &sor
edf1e96e56272a10ad767f13e6e8cc886f98055c
Test consecutive Coordinator.heartbeat calls #17
tests/unit/test_stream/test_coordinator.py
tests/unit/test_stream/test_coordinator.py
import functools from bloop.stream.shard import Shard from . import build_get_records_responses def test_coordinator_repr(coordinator): coordinator.stream_arn = "repr-stream-arn" assert repr(coordinator) == "<Coordinator[repr-stream-arn]>" def test_heartbeat_latest(coordinator, session): find_records_id = "id-find-records" no_records_id = "id-no-records" has_sequence_id = "id-has-sequence" # When "id-finds-records" gets a response, it should only advance once and return 3 records. records = build_get_records_responses(3, 1)[0] def mock_get_records(iterator_id): return { find_records_id: records, no_records_id: {}, has_sequence_id: {} }[iterator_id] session.get_stream_records.side_effect = mock_get_records make_shard = functools.partial(Shard, stream_arn=coordinator.stream_arn, shard_id="shard-id", session=session) coordinator.active.extend([ # Has a sequence number, should not be called during a heartbeat make_shard(iterator_id=has_sequence_id, iterator_type="at_sequence", sequence_number="sequence-number"), # No sequence number, should find records during a heartbeat make_shard(iterator_id=find_records_id, iterator_type="trim_horizon"), # No sequence number, should not find records during a heartbeat make_shard(iterator_id=no_records_id, iterator_type="latest"), ]) coordinator.heartbeat() assert session.get_stream_records.call_count == 2 session.get_stream_records.assert_any_call(find_records_id) session.get_stream_records.assert_any_call(no_records_id) assert len(coordinator.buffer) == 3 pairs = [coordinator.buffer.pop() for _ in range(len(coordinator.buffer))] sequence_numbers = [record["meta"]["sequence_number"] for (record, _) in pairs] assert sequence_numbers == [0, 1, 2]
Python
0
@@ -267,15 +267,8 @@ beat -_latest (coo @@ -1883,8 +1883,829 @@ , 1, 2%5D%0A +%0A%0Adef test_heartbeat_until_sequence_number(coordinator, session):%0A %22%22%22After heartbeat() finds records for a shard, the shard doens't check during the next heartbeat.%22%22%22%0A shard = Shard(stream_arn=coordinator.stream_arn, shard_id=%22shard-id%22, session=session,%0A iterator_id=%22iterator-id%22, iterator_type=%22latest%22)%0A coordinator.active.append(shard)%0A%0A session.get_stream_records.side_effect = build_get_records_responses(1)%0A%0A # First call fetches records from DynamoDB%0A coordinator.heartbeat()%0A assert coordinator.buffer%0A assert shard.sequence_number is not None%0A session.get_stream_records.assert_called_once_with(%22iterator-id%22)%0A%0A # Second call ships the shard, since it now has a sequence_number.%0A coordinator.heartbeat()%0A assert session.get_stream_records.call_count == 1%0A
a0c3465b6e8a91da1865971ff97ecf44360fd290
Set filter theshold as a paramter for function build_dict of dataset imikolov
python/paddle/v2/dataset/imikolov.py
python/paddle/v2/dataset/imikolov.py
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ imikolov's simple dataset. This module will download dataset from http://www.fit.vutbr.cz/~imikolov/rnnlm/ and parse training set and test set into paddle reader creators. """ import paddle.v2.dataset.common import collections import tarfile __all__ = ['train', 'test', 'build_dict'] URL = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz' MD5 = '30177ea32e27c525793142b6bf2c8e2d' def word_count(f, word_freq=None): if word_freq is None: word_freq = collections.defaultdict(int) for l in f: for w in l.strip().split(): word_freq[w] += 1 word_freq['<s>'] += 1 word_freq['<e>'] += 1 return word_freq def build_dict(): """ Build a word dictionary from the corpus, Keys of the dictionary are words, and values are zero-based IDs of these words. """ train_filename = './simple-examples/data/ptb.train.txt' test_filename = './simple-examples/data/ptb.valid.txt' with tarfile.open( paddle.v2.dataset.common.download( paddle.v2.dataset.imikolov.URL, 'imikolov', paddle.v2.dataset.imikolov.MD5)) as tf: trainf = tf.extractfile(train_filename) testf = tf.extractfile(test_filename) word_freq = word_count(testf, word_count(trainf)) if '<unk>' in word_freq: # remove <unk> for now, since we will set it as last index del word_freq['<unk>'] TYPO_FREQ = 50 word_freq = filter(lambda x: x[1] > TYPO_FREQ, word_freq.items()) word_freq_sorted = sorted(word_freq, key=lambda x: (-x[1], x[0])) words, _ = list(zip(*word_freq_sorted)) word_idx = dict(zip(words, xrange(len(words)))) word_idx['<unk>'] = len(words) return word_idx def reader_creator(filename, word_idx, n): def reader(): with tarfile.open( paddle.v2.dataset.common.download( paddle.v2.dataset.imikolov.URL, 'imikolov', paddle.v2.dataset.imikolov.MD5)) as tf: f = tf.extractfile(filename) UNK = word_idx['<unk>'] for l in f: l = ['<s>'] + l.strip().split() + ['<e>'] if len(l) >= n: l = [word_idx.get(w, UNK) for w in l] for i in range(n, len(l) + 1): yield tuple(l[i - n:i]) return reader def train(word_idx, n): """ imikolov training set creator. It returns a reader creator, each sample in the reader is a word ID tuple. :param word_idx: word dictionary :type word_idx: dict :param n: sliding window size :type n: int :return: Training reader creator :rtype: callable """ return reader_creator('./simple-examples/data/ptb.train.txt', word_idx, n) def test(word_idx, n): """ imikolov test set creator. It returns a reader creator, each sample in the reader is a word ID tuple. :param word_idx: word dictionary :type word_idx: dict :param n: sliding window size :type n: int :return: Test reader creator :rtype: callable """ return reader_creator('./simple-examples/data/ptb.valid.txt', word_idx, n) def fetch(): paddle.v2.dataset.common.download(URL, "imikolov", MD5)
Python
0
@@ -1297,16 +1297,28 @@ ld_dict( +typo_freq=50 ):%0A %22 @@ -2059,31 +2059,8 @@ '%5D%0A%0A - TYPO_FREQ = 50%0A @@ -2103,17 +2103,17 @@ %5D %3E -TYPO_FREQ +typo_freq , wo
213d6a42d505fb7ca320873cafdc187cf65d10ed
add unit tests for escaping curlies
tests/unit/pypyr/format/string_test.py
tests/unit/pypyr/format/string_test.py
""""string.py unit tests.""" import pypyr.format.string import pytest def test_string_interpolate_works(): context = {'key1': 'down', 'key2': 'valleys', 'key3': 'value3'} input_string = 'Piping {key1} the {key2} wild' output = pypyr.format.string.get_interpolated_string(input_string, context) assert output == 'Piping down the valleys wild', ( "string interpolation incorrect") def test_string_interpolate_works_with_no_swaps(): context = {'key1': 'down', 'key2': 'valleys', 'key3': 'value3'} input_string = 'Piping down the valleys wild' output = pypyr.format.string.get_interpolated_string(input_string, context) assert output == 'Piping down the valleys wild', ( "string interpolation incorrect") def test_tag_not_in_context_should_throw(): """pycode error should raise up to caller.""" with pytest.raises(KeyError): context = {'key1': 'value1'} input_string = '{key1} this is {key2} string' pypyr.format.string.get_interpolated_string(input_string, context)
Python
0
@@ -741,32 +741,1040 @@ n incorrect%22)%0A%0A%0A +def test_string_interpolate_escapes_double_curly():%0A context = %7B'key1': 'down', 'key2': 'valleys', 'key3': 'value3'%7D%0A input_string = 'Piping %7B%7B down the valleys wild'%0A output = pypyr.format.string.get_interpolated_string(input_string, context)%0A assert output == 'Piping %7B down the valleys wild', (%0A %22string interpolation incorrect%22)%0A%0A%0Adef test_string_interpolate_escapes_double_curly_pair():%0A context = %7B'key1': 'down', 'key2': 'valleys', 'key3': 'value3'%7D%0A input_string = 'Piping %7B%7Bdown%7D%7D the valleys wild'%0A output = pypyr.format.string.get_interpolated_string(input_string, context)%0A assert output == 'Piping %7Bdown%7D the valleys wild', (%0A %22string interpolation incorrect%22)%0A%0A%0Adef test_single_curly_should_throw():%0A %22%22%22pycode error should raise up to caller.%22%22%22%0A with pytest.raises(ValueError):%0A context = %7B'key1': 'value1'%7D%0A input_string = '%7Bkey1%7D this %7B is %7Bkey2%7D string'%0A pypyr.format.string.get_interpolated_string(input_string, context)%0A%0A%0A def test_tag_not
e84b2e11088878d44433bfc767b8abba79eca0a7
use environment variable for config folder
litleSdkPython/Configuration.py
litleSdkPython/Configuration.py
#Copyright (c) 2011-2012 Litle & Co. # #Permission is hereby granted, free of charge, to any person #obtaining a copy of this software and associated documentation #files (the "Software"), to deal in the Software without #restriction, including without limitation the rights to use, #copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the #Software is furnished to do so, subject to the following #conditions: # #The above copyright notice and this permission notice shall be #included in all copies or substantial portions of the Software. # #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, #EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES #OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND #NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT #HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, #WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR #OTHER DEALINGS IN THE SOFTWARE. import os class Configuration(object): def __init__(self): self.version = 8.25 self.reportGroup = 'Default Report Group' self._url = 'Sandbox' self.proxy = None self.timeout = 65 self.printXml = False self.configFolder = os.path.expanduser('~') self.__LITLE_SDK_CONFIG = '.litle_Python_SDK_config' @property def url(self): return self._urlMapper(self._url) @url.setter def url(self, value): self._url = value def getConfigFileName(self): return self.__LITLE_SDK_CONFIG def _urlMapper(self,target): if (target == "Cert"): return 'https://cert.litle.com/vap/communicator/online' elif(target == "Sandbox"): return 'https://www.testlitle.com/sandbox/communicator/online' elif(target == "Precert"): return 'https://precert.litle.com/vap/communicator/online' elif(target == "Prod"): return 'https://production.litle.com/vap/communicator/online' else: return target
Python
0.000001
@@ -1358,16 +1358,101 @@ Folder = + os.environ%5B'LITLE_SDK_CONFIG'%5D%5C%0A if 'LITLE_SDK_CONFIG' in os.environ else os.path
24e42d5d4a21c1f3ffd36a163b89ee7f39375945
Update P05_trafficLight add assertion to check for red light
books/AutomateTheBoringStuffWithPython/Chapter10/P05_trafficLight.py
books/AutomateTheBoringStuffWithPython/Chapter10/P05_trafficLight.py
# This program emulates traffic lights at intersections with assertions market_2nd = {"ns": "green", "ew": "red"} mission_16th = {"ns": "red", "ew": "green"} def switchLights(stoplight): for key in stoplight.keys(): if stoplight[key] == "green": stoplight[key] = "yellow" elif stoplight[key] == "yellow": stoplight[key] = "red" elif stoplight[key] == "red": stoplight[key] = "green" switchLights(market_2nd)
Python
0
@@ -443,16 +443,98 @@ %22green%22%0A + assert %22red%22 in stoplight.values(), %22Neither light is red! %22 + str(stoplight)%0A %0A%0Aswitch
dbd74718645cec4aaec419b48c2ab0c0e15a9821
Fix up entity indexed at
ingestors/manager.py
ingestors/manager.py
import magic import logging import balkhash from pprint import pprint # noqa from tempfile import mkdtemp from followthemoney import model from banal import ensure_list from normality import stringify from pantomime import normalize_mimetype from balkhash.utils import safe_fragment from servicelayer.archive import init_archive from servicelayer.archive.util import ensure_path from servicelayer.extensions import get_extensions from followthemoney.helpers import entity_filename from ingestors.directory import DirectoryIngestor from ingestors.exc import ProcessingException from ingestors.util import filter_text, remove_directory from ingestors import settings log = logging.getLogger(__name__) class Manager(object): """Handles the lifecycle of an ingestor. This can be subclassed to embed it into a larger processing framework.""" #: Indicates that during the processing no errors or failures occured. STATUS_SUCCESS = u'success' #: Indicates occurance of errors during the processing. STATUS_FAILURE = u'failure' MAGIC = magic.Magic(mime=True) def __init__(self, stage, context): self.stage = stage self.context = context self.work_path = ensure_path(mkdtemp(prefix='ingestor-')) self.emitted = set() self._writer = None self._dataset = None @property def archive(self): if not hasattr(settings, '_archive'): settings._archive = init_archive() return settings._archive @property def dataset(self): if self._dataset is None: self._dataset = self.get_dataset(self.stage, self.context) return self._dataset @classmethod def get_dataset(cls, stage, context): dataset = stage.job.dataset.name name = context.get('balkhash_name', dataset) return balkhash.init(name) @property def writer(self): if self._writer is None: self._writer = self.dataset.bulk() return self._writer def make_entity(self, schema, parent=None): schema = model.get(schema) prefix = self.stage.job.dataset.name entity = model.make_entity(schema, key_prefix=prefix) self.make_child(parent, entity) return entity def make_child(self, parent, child): if parent is not None and child is not None: child.add('parent', parent.id) child.add('ancestors', parent.get('ancestors')) child.add('ancestors', parent.id) def emit_entity(self, entity, fragment=None): # pprint(entity.to_dict()) self.writer.put(entity.to_dict(), fragment) self.emitted.add(entity.id) def emit_text_fragment(self, entity, texts, fragment): texts = [t for t in ensure_list(texts) if filter_text(t)] if len(texts): doc = self.make_entity(entity.schema) doc.id = entity.id doc.add('indexText', texts) self.emit_entity(doc, fragment=safe_fragment(fragment)) def auction(self, file_path, entity): if not entity.has('mimeType'): if file_path.is_dir(): entity.add('mimeType', DirectoryIngestor.MIME_TYPE) return DirectoryIngestor entity.add('mimeType', self.MAGIC.from_file(file_path.as_posix())) best_score, best_cls = 0, None for cls in get_extensions('ingestors'): score = cls.match(file_path, entity) if score > best_score: best_score = score best_cls = cls if best_cls is None: raise ProcessingException("Format not supported") return best_cls def queue_entity(self, entity): log.debug("Queue: %r", entity) self.stage.queue(entity.to_dict(), self.context) def store(self, file_path, mime_type=None): file_path = ensure_path(file_path) mime_type = normalize_mimetype(mime_type) if file_path is not None and file_path.is_file(): return self.archive.archive_file(file_path, mime_type=mime_type) def load(self, content_hash, file_name=None): # log.info("Local archive name: %s", file_name) return self.archive.load_file(content_hash, file_name=file_name, temp_path=self.work_path) def ingest_entity(self, entity): for content_hash in entity.get('contentHash', quiet=True): file_name = entity_filename(entity) file_path = self.load(content_hash, file_name=file_name) if file_path is None or not file_path.exists(): continue self.ingest(file_path, entity) return self.finalize(entity) def ingest(self, file_path, entity, **kwargs): """Main execution step of an ingestor.""" file_path = ensure_path(file_path) if file_path.is_file() and not entity.has('fileSize'): entity.add('fileSize', file_path.stat().st_size) entity.set('processingStatus', self.STATUS_FAILURE) try: ingestor_class = self.auction(file_path, entity) log.info("Ingestor [%r]: %s", entity, ingestor_class.__name__) self.delegate(ingestor_class, file_path, entity) entity.set('processingStatus', self.STATUS_SUCCESS) except ProcessingException as pexc: entity.set('processingError', stringify(pexc)) log.error("[%r] Failed to process: %s", entity, pexc) finally: self.finalize(entity) def finalize(self, entity): self.emit_entity(entity) self.writer.flush() remove_directory(self.work_path) def delegate(self, ingestor_class, file_path, entity): ingestor_class(self).ingest(file_path, entity) def close(self): self.writer.flush() self.dataset.close() remove_directory(self.work_path)
Python
0.000042
@@ -2295,16 +2295,86 @@ child):%0A + %22%22%22Derive entity properties by knowing it's parent folder.%22%22%22%0A @@ -2414,24 +2414,56 @@ s not None:%0A + # Folder hierarchy:%0A @@ -2594,24 +2594,165 @@ , parent.id) +%0A # Avoid re-ingest re-notification:%0A prop = 'indexUpdatedAt'%0A child.add(prop, parent.get(prop), quiet=True) %0A%0A def em
304a220e99694ec6b41a31db8150c7f4604f6ef5
Remove old logging import
flexget/components/notify/notifiers/gotify.py
flexget/components/notify/notifiers/gotify.py
import logging from http import HTTPStatus from requests.exceptions import RequestException from urllib.parse import urljoin from flexget import plugin from flexget.event import event from flexget.plugin import PluginWarning from flexget.utils.requests import Session as RequestSession, TimedLimiter plugin_name = 'gotify' log = logging.getLogger(plugin_name) requests = RequestSession(max_retries=3) class GotifyNotifier(object): """ Example:: notify: entries: via: - gotify: url: <GOTIFY_SERVER_URL> token: <GOTIFY_TOKEN> priority: <PRIORITY> Configuration parameters are also supported from entries (eg. through set). """ schema = { 'type': 'object', 'properties': { 'url': {'format': 'url'}, 'token': {'type': 'string'}, 'priority': {'type': 'integer', 'default': 4}, }, 'required': [ 'token', 'url', ], 'additionalProperties': False, } def notify(self, title, message, config): """ Send a Gotify notification """ base_url = config['url'] api_endpoint = '/message' url = urljoin(base_url, api_endpoint) params = {'token': config['token']} priority = config['priority'] notification = {'title': title, 'message': message, 'priority': priority} # Make the request try: response = requests.post(url, params=params, json=notification) except RequestException as e: if e.response is not None: if e.response.status_code in (HTTPStatus.UNAUTHORIZED, HTTPStatus.FORBIDDEN): message = 'Invalid Gotify access token' else: message = e.response.json()['error']['message'] else: message = str(e) raise PluginWarning(message) @event('plugin.register') def register_plugin(): plugin.register(GotifyNotifier, plugin_name, api_ver=2, interfaces=['notifiers'])
Python
0.000001
@@ -1,20 +1,4 @@ -import logging%0A%0A from @@ -280,17 +280,16 @@ imiter%0A%0A -%0A plugin_n @@ -306,45 +306,8 @@ ify' -%0Alog = logging.getLogger(plugin_name) %0A%0Are
77ac4b3cc97731c0fcb387a10fadd1509e057a6d
update with main function and header
controller/test.py
controller/test.py
#!/usr/bin/python2.7 import constants import sys sys.path.insert(0, "./view/") import viewAssessment import viewQuestion import viewTopic import viewSection import viewCourse import viewUser sys.path.insert(0, "./edit/") import editAssessment import editQuestion import editTopic import editSection import editCourse sys.path.insert(0, "./objects/") from assessment import Assessment from question import Question from topic import Topic from section import Section from course import Course from user import User print(viewAssessment.byID(1).sortByTopic())
Python
0
@@ -14,16 +14,158 @@ thon2.7%0A +%22%22%22%0Acreated_by: Micah Halter%0Acreated_date: 2/28/2015%0Alast_modified_by: Micah Halter%0Alast_modified_date: 3/2/2015%0A%22%22%22%0A%0A#imports %0Aimport @@ -654,16 +654,44 @@ t User%0A%0A +# functions%0Adef main():%0A print(vi @@ -726,8 +726,62 @@ opic())%0A +%0A# running code%0Aif __name__ == %22__main__%22:%0A main()%0A
d7006769cb868a368c40e5408671088278581ece
add DD_HISTOGRAM_PERCENTILES to config_builder.py
config_builder.py
config_builder.py
#!/opt/datadog-agent/embedded/bin/python ''' This script is used to generate the configuration of the datadog agent. ''' from os import getenv, environ import logging from urllib2 import urlopen, URLError, HTTPError from socket import getdefaulttimeout, setdefaulttimeout from ConfigParser import ConfigParser class ConfBuilder(object): ''' This class manages the configuration files ''' def __init__(self): # excludes from the generic variables parsing the ones that have a # certain logic warpped around them self.exclude_from_generic = [ 'DD_API_KEY', 'DD_API_KEY_FILE', 'DD_HOME', 'DD_START_AGENT', 'DD_LOGS_STDOUT' ] dd_agent_root = '/etc/dd-agent' dd_home = getenv('DD_HOME') if dd_home is not None: dd_agent_root = '{}/agent'.format(dd_home) self.datadog_conf_file = '{}/datadog.conf'.format(dd_agent_root) # This will store the config parser object that is used in the different functions self.config = None def load_config(self, config_file): ''' Loads a config file using ConfigParser ''' self.config = ConfigParser() # import existing config from file with open(config_file, 'rb') as cfd: self.config.readfp(cfd) def save_config(self, config_file): ''' Saves a ConfigParser object (self.config) to the given file ''' if self.config is None: logging.error('config object needs to be created before saving anything') exit(1) with open(config_file, 'wb') as cfd: self.config.write(cfd) def build_datadog_conf(self): ''' Builds the datadog.conf based on the environment variables ''' self.load_config(self.datadog_conf_file) ##### Core config ##### self.set_api_key() self.set_from_env_mapping('DD_HOSTNAME', 'hostname') self.set_from_env_mapping('EC2_TAGS', 'collect_ec2_tags') # The TAGS env variable superseeds DD_TAGS self.set_from_env_mapping('DD_TAGS', 'tags') self.set_from_env_mapping('TAGS', 'tags') # The LOG_LEVEL env variable superseeds DD_LOG_LEVEL self.set_from_env_mapping('DD_LOG_LEVEL', 'log_level') self.set_from_env_mapping('LOG_LEVEL', 'log_level') self.set_from_env_mapping('NON_LOCAL_TRAFFIC', 'non_local_traffic') self.set_from_env_mapping('DD_URL', 'dd_url') self.set_from_env_mapping('STATSD_METRIC_NAMESPACE', 'statsd_metric_namespace') self.set_from_env_mapping('USE_DOGSTATSD', 'use_dogstatsd') ##### Proxy config ##### self.set_from_env_mapping('PROXY_HOST', 'proxy_host') self.set_from_env_mapping('PROXY_PORT', 'proxy_port') self.set_from_env_mapping('PROXY_USER', 'proxy_user') self.set_from_env_mapping('PROXY_PASSWORD', 'proxy_password') ##### Service discovery ##### self.set_from_env_mapping('SD_BACKEND', 'service_discovery_backend') self.set_sd_backend_host() self.set_from_env_mapping('SD_BACKEND_PORT', 'sd_backend_port') self.set_from_env_mapping('SD_TEMPLATE_DIR', 'sd_template_dir') self.set_from_env_mapping('SD_CONSUL_TOKEN', 'consul_token') self.set_from_env_mapping('SD_BACKEND_USER', 'sd_backend_username') self.set_from_env_mapping('SD_BACKEND_PASSWORD', 'sd_backend_password') # Magic trick to automatically add properties not yet define in the doc self.set_generics('DD_CONF_') self.save_config(self.datadog_conf_file) def set_api_key(self): ''' Used for building datadog.conf Gets the API key from the environment or the key file and sets it in the configuration ''' api_key = getenv('DD_API_KEY', getenv('API_KEY', '')) keyfile = getenv('DD_API_KEY_FILE') if keyfile is not None: try: with open(keyfile, 'r') as kfile: api_key = kfile.read() except IOError: logging.warning('Unable to read the content of they key file specified in DD_API_KEY_FILE') if len(api_key) <= 0: logging.error('You must set API_KEY environment variable or include a DD_API_KEY_FILE to run the Datadog Agent container') exit(1) self.set_property('api_key', api_key) def set_from_env_mapping(self, env_var_name, property_name, section='Main', action=None): ''' Sets a property using the corresponding environment variable if it exists It also returns the value in case you want to play with it If action is specified to 'store_true', whatever the content of the env variable is (if exists), the value of the property will be true ''' _val = getenv(env_var_name) if _val is not None: if action == 'store_true': _val = 'true' self.set_property(property_name, _val, section) return _val return None def set_sd_backend_host(self): ''' Used for building datadog.conf Sets sd_config_backend and sd_backend_host depending on the environment ''' _config_backend = getenv('SD_CONFIG_BACKEND') if _config_backend is not None: self.set_property('sd_config_backend', _config_backend) _backend_host = getenv('SD_BACKEND_HOST') if _backend_host is not None: self.set_property('sd_backend_host', _backend_host) else: _timeout = getdefaulttimeout() try: setdefaulttimeout(1) _ec2_ip = urlopen('http://169.254.169.254/latest/meta-data/local-ipv4') self.set_property('sd_backend_host', _ec2_ip.read()) except (URLError, HTTPError): pass # silent fail on purpose setdefaulttimeout(_timeout) def set_generics(self, prefix='DD_CONF_'): ''' Looks for environment variables starting by the given prefix and consider that the rest of the variable name is the name of the property to set ''' for dd_var in environ: if dd_var.startswith(prefix) and dd_var.upper() not in self.exclude_from_generic: if len(dd_var) > 0: self.set_property(dd_var[len(prefix):].lower(), environ[dd_var]) def set_property(self, property_name, property_value, section='Main'): ''' Sets the given property to the given value in the configuration ''' if self.config is None: logging.error('config object needs to be created before setting properties') exit(1) self.config.set(section, property_name, property_value) if __name__ == '__main__': cfg = ConfBuilder() cfg.build_datadog_conf()
Python
0.000001
@@ -2654,24 +2654,111 @@ dogstatsd')%0A + self.set_from_env_mapping('DD_HISTOGRAM_PERCENTILES', 'histogram_percentiles')%0A ####
0353392f7c597354c79f8a13cff33e35881d02b4
Remove @expectedFailure from test_html_import()
future/tests/test_standard_library_renames.py
future/tests/test_standard_library_renames.py
""" Tests for the future.standard_library module """ from __future__ import absolute_import, unicode_literals, print_function from future import standard_library from future import six import sys import textwrap import unittest from subprocess import check_output from future.standard_library import RENAMES, REPLACED_MODULES class TestStandardLibraryRenames(unittest.TestCase): def setUp(self): self.interpreter = 'python' @unittest.skipIf(six.PY3, 'generic import tests are for Py2 only') def test_all(self): """ Tests whether all of the old imports in RENAMES are accessible under their new names. """ for (oldname, newname) in RENAMES.items(): if newname == 'winreg' and sys.platform not in ['win32', 'win64']: continue if newname in REPLACED_MODULES: # Skip this check for e.g. the stdlib's ``test`` module, # which we have replaced completely. continue oldmod = __import__(oldname) newmod = __import__(newname) if '.' not in oldname: self.assertEqual(oldmod, newmod) def test_import_from_module(self): """ Tests whether e.g. "import socketserver" succeeds in a module imported by another module. """ code1 = ''' from future import standard_library import importme2 ''' code2 = ''' import socketserver print('Import succeeded!') ''' with open('importme1.py', 'w') as f: f.write(textwrap.dedent(code1)) with open('importme2.py', 'w') as f: f.write(textwrap.dedent(code2)) output = check_output([self.interpreter, 'importme1.py']) print(output) def test_configparser(self): import configparser def test_copyreg(self): import copyreg def test_pickle(self): import pickle def test_profile(self): import profile def test_stringio(self): from io import StringIO s = StringIO('test') for method in ['tell', 'read', 'seek', 'close', 'flush']: self.assertTrue(hasattr(s, method)) def test_bytesio(self): from io import BytesIO s = BytesIO(b'test') for method in ['tell', 'read', 'seek', 'close', 'flush', 'getvalue']: self.assertTrue(hasattr(s, method)) def test_queue(self): import queue q = queue.Queue() q.put('thing') self.assertFalse(q.empty()) def test_reprlib(self): import reprlib def test_socketserver(self): import socketserver @unittest.skip("Not testing tkinter import (it may be installed separately from Python)") def test_tkinter(self): import tkinter def test_builtins(self): import builtins self.assertTrue(hasattr(builtins, 'tuple')) # @unittest.skip("skipping in case there's no net connection") @unittest.expectedFailure def test_urllib_request(self): import urllib.request from pprint import pprint URL = 'http://pypi.python.org/pypi/{}/json' package = 'future' r = urllib.request.urlopen(URL.format(package)) pprint(r.read().decode('utf-8')) @unittest.expectedFailure def test_html_import(self): import html import html.entities import html.parser @unittest.expectedFailure def test_http_import(self): import http import http.server import http.client import http.cookies import http.cookiejar @unittest.expectedFailure def test_urllib_imports(self): import urllib import urllib.parse import urllib.request import urllib.robotparser import urllib.error import urllib.response @unittest.expectedFailure def test_urllib_parse(self): import urllib.parse URL = 'http://pypi.python.org/test_url/spaces oh no/' assertEqual(urllib.parse.quote(URL.format(package)), 'http%3A//pypi.python.org/test_url/spaces%20oh%20no/') @unittest.expectedFailure # currently fails on Py2 def test_sys_intern(self): """ intern() has been moved to the sys module. """ from future import standard_library from sys import intern intern('mystring') self.assertTrue(True) def test_underscore_prefixed_modules(self): import _thread import _dummy_thread import _markupbase self.assertTrue(True) def test_reduce(self): """ reduce has been moved to the functools module """ import functools self.assertEqual(functools.reduce(lambda x, y: x+y, range(1, 6)), 15) def test_reload(self): """ reload has been moved to the imp module """ import imp imp.reload(imp) self.assertTrue(True) if __name__ == '__main__': unittest.main()
Python
0
@@ -3366,38 +3366,8 @@ ))%0A%0A - @unittest.expectedFailure%0A @@ -3386,32 +3386,32 @@ l_import(self):%0A + import h @@ -3469,16 +3469,46 @@ l.parser +%0A self.assertTrue(True) %0A%0A @u @@ -3694,16 +3694,46 @@ ookiejar +%0A self.assertTrue(True) %0A%0A @u @@ -3963,16 +3963,46 @@ response +%0A self.assertTrue(True) %0A%0A @u @@ -4148,24 +4148,24 @@ oh no/'%0A - assertEq @@ -4156,16 +4156,21 @@ +self. assertEq
b8cec88e733237b94fafb2aa978dcb6b758c954f
Add string representation of Log
irclogview/models.py
irclogview/models.py
from django.db import models from django.core.urlresolvers import reverse from picklefield.fields import PickledObjectField from . import utils class Channel(models.Model): name = models.SlugField(max_length=50, unique=True) updated = models.DateTimeField(auto_now=True) class Meta: ordering = ['name'] def __unicode__(self): return u'#%s' % self.name def get_absolute_url(self): return reverse('irclogview_channel', args=[self.name]) class Log(models.Model): channel = models.ForeignKey(Channel, db_index=True) date = models.DateField(db_index=True) mtime = models.DateTimeField() updated = models.DateTimeField(auto_now=True) content = PickledObjectField() class Meta: ordering = ['-date'] unique_together = ('channel', 'date') def get_absolute_url(self): date = self.date return reverse('irclogview_show', args=[self.channel.name, '%04d' % date.year, '%02d' % date.month, '%02d' % date.day]) def content_dict(self): colors = utils.RainbowColor() for data in self.content: item = dict(zip(['time', 'type', 'name', 'text'], data)) item['name_color'] = item['type'] == 'act' \ and 'inherit' \ or colors.get_color(item['name']) yield item
Python
0.999989
@@ -816,16 +816,155 @@ date')%0A%0A + def __unicode__(self):%0A return u'#%25s - %25s' %25 (self.channel.name,%0A self.date.strftime('%25Y-%25m-%25d'))%0A%0A def
28fd64565ea2e7b1e40e88e6121936d03a77b444
Fix task set generator.
generic-8-link/generated/generate_task_set.py
generic-8-link/generated/generate_task_set.py
#!/usr/bin/python template = ''' environment {{ robot_filename: "../robot/setup.robot" environment_filename: "../environment/obstacles_{1_difficulty}.stl" max_underestimate: 20.0 }} generator {{ type: {2_generator_type} seed: {6_seed} keys: 2 keys: 3 keys: 5 keys: 7 keys: 11 keys: 13 }} index {{ type: {3_index_type} index_params {{ trees: 8 }} search_params {{ checks: 128 use_heap: false }} }} tree {{ type: {4_tree_type} attempt_connect: {5_attempt_connect} }} source {{ {source_q_string} }} destination {{ {destination_q_string} }} ''' src_q_string = { 'trivial': ''' q: 10 q: 10 q: 10 q: 10 q: 10 q: 10 q: 10 q: 10 ''', 'easy': ''' q: 10 q: 10 q: 10 q: 10 q: 10 q: 10 q: 10 q: 10 ''', 'hard': ''' q: 0 q: 0 q: 0 q: 90 q: 0 q: 0 q: 90 q: 0 ''', } dst_q_string = { 'trivial': ''' q: -10 q: -10 q: -10 q: -10 q: -10 q: -10 q: -10 q: -10 ''', 'easy': ''' q: -10 q: -10 q: -10 q: -10 q: -10 q: -10 q: -10 q: -10 ''', 'hard': ''' q: 0 q: 0 q: 0 q: -90 q: 0 q: 0 q: -90 q: 0 ''', } def parameter_provider(): for difficulty in ['trivial', 'easy', 'hard']: for generator_type in ['SIMPLE', 'HALTON']: #for index_type in ['LINEAR', 'KD_TREE', 'AUTOTUNED']: for index_type in ['KD_TREE']: for tree_type in ['BUBBLE', 'CLASSIC']: # for attempt_connect in ['true', 'false']: for attempt_connect in ['true']: for seed in range(0, 100): yield { '1_difficulty': difficulty, '2_generator_type': generator_type, '6_seed': str(400 * seed), '3_index_type': index_type, '4_tree_type': tree_type, '5_attempt_connect': attempt_connect, } counter = 0 for mapping in parameter_provider(): counter += 1 print 'At: {}'.format(counter) with open('generated_' + ('_'.join(zip(*sorted(mapping.items()))[1])) + '.task', 'w') as f: mapping['source_q_string'] = src_q_string[mapping['1_difficulty']] mapping['destination_q_string'] = dst_q_string[mapping['1_difficulty']] f.write(template.format(**mapping))
Python
0.000006
@@ -301,16 +301,38 @@ eys: 13%0A + keys: 17%0A keys: 19%0A %7D%7D%0Aindex @@ -737,33 +737,33 @@ : '''%0A q: -1 +2 0%0A q: 10%0A q: 10%0A @@ -746,33 +746,33 @@ : 20%0A q: -1 +2 0%0A q: 10%0A q: 10%0A @@ -755,33 +755,33 @@ : 20%0A q: -1 +2 0%0A q: 10%0A q: 10%0A @@ -764,33 +764,33 @@ : 20%0A q: -1 +2 0%0A q: 10%0A q: 10%0A @@ -773,33 +773,33 @@ : 20%0A q: -1 +2 0%0A q: 10%0A q: 10%0A @@ -782,33 +782,33 @@ : 20%0A q: -1 +2 0%0A q: 10%0A q: 10%0A @@ -791,33 +791,33 @@ : 20%0A q: -1 +2 0%0A q: 10%0A''',%0A @@ -796,33 +796,33 @@ %0A q: 20%0A q: -1 +2 0%0A''',%0A 'hard @@ -1039,33 +1039,33 @@ : '''%0A q: - -1 +2 0%0A q: - 10%0A q: -10%0A @@ -1048,33 +1048,33 @@ : -20%0A q: - -1 +2 0%0A q: - 10%0A q: -10%0A @@ -1057,33 +1057,33 @@ : -20%0A q: - -1 +2 0%0A q: - 10%0A q: -10%0A @@ -1066,33 +1066,33 @@ : -20%0A q: - -1 +2 0%0A q: - 10%0A q: -10%0A @@ -1075,33 +1075,33 @@ : -20%0A q: - -1 +2 0%0A q: - 10%0A q: -10%0A @@ -1084,33 +1084,33 @@ : -20%0A q: - -1 +2 0%0A q: - 10%0A q: -10%0A @@ -1093,33 +1093,33 @@ : -20%0A q: - -1 +2 0%0A q: - 10%0A''',%0A @@ -1098,33 +1098,33 @@ %0A q: -20%0A q: - -1 +2 0%0A''',%0A 'hard
b18f23adba22641910e19d056cf3e3a97477540a
update trace
geoconnect/apps/dv_notify/metadata_updater.py
geoconnect/apps/dv_notify/metadata_updater.py
from __future__ import print_function import os import json import requests # for POST if __name__=='__main__': import sys CURRENT_DIR = os.path.dirname(os.path.dirname(__file__)) sys.path.append(os.path.join(CURRENT_DIR, '../')) os.environ.setdefault("DJANGO_SETTINGS_MODULE", "geoconnect.settings.local") from geo_utils.key_checker import KeyChecker from geo_utils.json_field_reader import MessageHelperJSON from django.conf import settings from apps.dv_notify.models import DATAVERSE_REQUIRED_KEYS import logging logger = logging.getLogger('geoconnect') class MetadataUpdater: """Send a metadata update to Dataverse. Specifically, update the GIS Metadata block for a given file.""" #METADATA_UPDATE_API_PATH = 'geo-api/singlefile/update-gis-metadata/' REQUIRED_PARAM_KEYS = DATAVERSE_REQUIRED_KEYS # ['datafile_id', 'layer_name', 'layer_link', 'embed_map_link', 'worldmap_username', 'dv_session_token']#, 'bbox_min_lng', 'bbox_min_lat', 'bbox_max_lng', 'bbox_max_lat'] def __init__(self, dataverse_server_url, timeout_seconds=240, return_type_json=False): """ Use data in a python dict to POST data to the Dataverse API, specifically the GeographicMetadataUpdateForm :param dv_metadata_params: dict containing information necessary for contacting dataverse """ self.api_update_url = dataverse_server_url + settings.DATAVERSE_METADATA_UPDATE_API_PATH self.timeout_seconds = timeout_seconds self.return_type_json = return_type_json def get_result_msg(self, success=False, msg='', data_dict=None): if type(data_dict) is dict: print ('YES') d = MessageHelperJSON.get_dict_msg(success=success, msg=msg, data_dict=data_dict) else: d = MessageHelperJSON.get_dict_msg(success=success, msg=msg) if not self.return_type_json: return d return MessageHelperJSON.get_json_msg_from_dict(d) def send_info_to_dataverse(self, dv_metadata_params): """ Go through the process of sending params to dataverse :param dv_metadata_params: python dict used to POST to dataverse :returns: JSON with "success" flag and either error or data :rtype: JSON string """ logger.info('send_params_to_dataverse') print('1) send_params_to_dataverse') print (dv_metadata_params) key_check_response = KeyChecker.has_required_keys(self.REQUIRED_PARAM_KEYS, dv_metadata_params) if not key_check_response.success: logger.error(key_check_response.err_msg + ' Info not in "layer_params"') return self.get_result_msg(False, key_check_response.err_msg) print('2) passed key check') try: print ('params to send: %s' % dv_metadata_params) req = requests.post(self.api_update_url, data=json.dumps(dv_metadata_params), timeout=self.timeout_seconds) print('3) req: %s' % req) if not req.status_code == 200: logger.error('Metadata update failed. Status code: %s\nResponse:%s' % (req.status_code, req.text)) return self.get_result_msg(False, 'Sorry! The update failed.') print (req.text) #open('err.html', 'w').write(req.text) dv_response_dict = req.json() print('4) req to json') print( dv_response_dict) if dv_response_dict.get('status', False) in ('OK', 'success'): dv_response_dict.pop('status') print('4) send result') return self.get_result_msg(True, '', data_dict=dv_response_dict) elif dv_response_dict.has_key('message'): return self.get_result_msg(False, dv_response_dict['message']) return self.get_result_msg(False, 'The update failed.') except requests.exceptions.Timeout: return self.get_result_msg(False, 'This request timed out. (Time limit: %s seconds(s))' % self.timeout_seconds) #except: # return self.get_result_msg(False, 'Sorry! The request failed') return self.get_result_msg(False, 'The import failed for an unknown reason') @staticmethod def update_dataverse_with_metadata(worldmap_import_success_obj): if worldmap_import_success_obj is None: logger.error('worldmap_import_success_obj is None') return False params_for_dv = worldmap_import_success_obj.get_params_for_dv_update() mu = MetadataUpdater(settings.DATAVERSE_SERVER_URL) resp_dict = mu.send_info_to_dataverse(params_for_dv) print ('>>>>>>>>>',resp_dict) if resp_dict.get('success', False) is True: return True return False if __name__ == '__main__': #f2 = '../../scripts/worldmap_api/test_shps/poverty_1990_gfz.zip' from apps.worldmap_import.models import WorldMapImportSuccess if WorldMapImportSuccess.objects.count() > 0: success_obj = WorldMapImportSuccess.objects.all().order_by('-modified')[0] params = success_obj.get_params_for_dv_update() print('params to send: %s' % params) mu = MetadataUpdater(settings.DATAVERSE_SERVER_URL) print (mu.send_info_to_dataverse(params)) else: print('No WorldMapImportSuccess objects')
Python
0.000001
@@ -2895,32 +2895,91 @@ etadata_params)%0A + print ('update url: %25s' %25 self.api_update_url)%0A req @@ -3176,16 +3176,104 @@ == 200:%0A + %0A print ('request text: %25s' %25 req.text)%0A %0A
c62a658eb469e449372207f146f60375d7497f63
update dataset api
ismrmrdpy/dataset.py
ismrmrdpy/dataset.py
# Copyright (c) 2014-2015 Ghislain Antony Vaillant. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. from __future__ import absolute_import, division, print_function class Dataset(object): """ """ def __init__(self, *args, **kwargs): pass def open(self): pass def close(self): pass def read_header(self): pass def write_header(self, xmlstring): pass def append_acquisition(self, acq): pass def read_acquisition(self, index): pass def append_image(self, img): pass def read_image(self, index): pass def append_array(self, arr): pass def read_array(self, index): pass
Python
0.000001
@@ -1803,32 +1803,71 @@ x):%0A pass +%0A%0A def number_of_acquisitions(self): %0A %0A de @@ -1851,32 +1851,37 @@ (self):%0A +pass%0A %0A def append_ @@ -1957,32 +1957,65 @@ x):%0A pass +%0A%0A def number_of_images(self): %0A %0A de @@ -1999,32 +1999,37 @@ (self):%0A +pass%0A %0A def append_ @@ -2060,32 +2060,32 @@ pass%0A %0A - def read_arr @@ -2093,28 +2093,74 @@ y(self, index):%0A pass +%0A%0A def number_of_arrays(self):%0A pass
1bd6943acf40ea049091b5437b6ecec71c907d07
Set default logging level to error
tools/nodeset_compiler/nodeset_compiler.py
tools/nodeset_compiler/nodeset_compiler.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. ### ### Authors: ### - Chris Iatrou (ichrispa@core-vector.net) ### - Julius Pfrommer ### - Stefan Profanter (profanter@fortiss.org) ### ### This program was created for educational purposes and has been ### contributed to the open62541 project by the author. All licensing ### terms for this source is inherited by the terms and conditions ### specified for by the open62541 project (see the projects readme ### file for more information on the MPLv2 terms and restrictions). ### ### This program is not meant to be used in a production environment. The ### author is not liable for any complications arising due to the use of ### this program. ### import logging import argparse from nodeset import * from backend_open62541 import generateOpen62541Code parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-e', '--existing', metavar="<existingNodeSetXML>", type=argparse.FileType('rb'), dest="existing", action='append', default=[], help='NodeSet XML files with nodes that are already present on the server.') parser.add_argument('-x', '--xml', metavar="<nodeSetXML>", type=argparse.FileType('rb'), action='append', dest="infiles", default=[], help='NodeSet XML files with nodes that shall be generated.') parser.add_argument('outputFile', metavar='<outputFile>', help='The path/basename for the <output file>.c and <output file>.h files to be generated. This will also be the function name used in the header and c-file.') parser.add_argument('--generate-ns0', action='store_true', dest="generate_ns0", help='Omit some consistency checks for bootstrapping namespace 0, create references to parents and type definitions manually') parser.add_argument('--internal-headers', action='store_true', dest="internal_headers", help='Include internal headers instead of amalgamated header') parser.add_argument('-b', '--blacklist', metavar="<blacklistFile>", type=argparse.FileType('r'), action='append', dest="blacklistFiles", default=[], help='Loads a list of NodeIDs stored in blacklistFile (one NodeID per line). Any of the nodeIds encountered in this file will be removed from the nodeset prior to compilation. Any references to these nodes will also be removed') parser.add_argument('-i', '--ignore', metavar="<ignoreFile>", type=argparse.FileType('r'), action='append', dest="ignoreFiles", default=[], help='Loads a list of NodeIDs stored in ignoreFile (one NodeID per line). Any of the nodeIds encountered in this file will be kept in the nodestore but not printed in the generated code') parser.add_argument('-t', '--types-array', metavar="<typesArray>", action='append', type=str, dest="typesArray", default=[], help='Types array for the given namespace. Can be used mutliple times to define (in the same order as the .xml files, first for --existing, then --xml) the type arrays') parser.add_argument('--max-string-length', type=int, dest="max_string_length", default=0, help='Maximum allowed length of a string literal. If longer, it will be set to an empty string') parser.add_argument('-v', '--verbose', action='count', help='Make the script more verbose. Can be applied up to 4 times') args = parser.parse_args() # Set up logging logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) verbosity = 0 if args.verbose: verbosity = int(args.verbose) if (verbosity == 1): logging.basicConfig(level=logging.ERROR) elif (verbosity == 2): logging.basicConfig(level=logging.WARNING) elif (verbosity == 3): logging.basicConfig(level=logging.INFO) elif (verbosity >= 4): logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.CRITICAL) # Create a new nodeset. The nodeset name is not significant. # Parse the XML files ns = NodeSet() nsCount = 0 def getTypesArray(nsIdx): if nsIdx < len(args.typesArray): return args.typesArray[nsIdx] else: return "UA_TYPES" for xmlfile in args.existing: logger.info("Preprocessing (existing) " + str(xmlfile.name)) ns.addNodeSet(xmlfile, True, typesArray=getTypesArray(nsCount)) nsCount +=1 for xmlfile in args.infiles: logger.info("Preprocessing " + str(xmlfile.name)) ns.addNodeSet(xmlfile, typesArray=getTypesArray(nsCount)) nsCount +=1 # # We need to notify the open62541 server of the namespaces used to be able to use i.e. ns=3 # namespaceArrayNames = preProc.getUsedNamespaceArrayNames() # for key in namespaceArrayNames: # ns.addNamespace(key, namespaceArrayNames[key]) # Remove blacklisted nodes from the nodeset # Doing this now ensures that unlinkable pointers will be cleanly removed # during sanitation. for blacklist in args.blacklistFiles: for line in blacklist.readlines(): line = line.replace(" ", "") id = line.replace("\n", "") if ns.getNodeByIDString(id) == None: logger.info("Can't blacklist node, namespace does currently not contain a node with id " + str(id)) else: ns.removeNodeById(line) blacklist.close() # Set the nodes from the ignore list to hidden. This removes them from dependency calculation # and from printing their generated code. # These nodes should be already pre-created on the server to avoid any errors during # creation. for ignoreFile in args.ignoreFiles: for line in ignoreFile.readlines(): line = line.replace(" ", "") id = line.replace("\n", "") ns.hide_node(NodeId(id)) #if not ns.hide_node(NodeId(id)): # logger.info("Can't ignore node, namespace does currently not contain a node with id " + str(id)) ignoreFile.close() # Remove nodes that are not printable or contain parsing errors, such as # unresolvable or no references or invalid NodeIDs ns.sanitize() # Parse Datatypes in order to find out what the XML keyed values actually # represent. # Ex. <rpm>123</rpm> is not encodable # only after parsing the datatypes, it is known that # rpm is encoded as a double ns.buildEncodingRules() # Allocate/Parse the data values. In order to do this, we must have run # buidEncodingRules. ns.allocateVariables() #printDependencyGraph(ns) # Create the C code with the open62541 backend of the compiler logger.info("Generating Code") generateOpen62541Code(ns, args.outputFile, args.generate_ns0, args.internal_headers, args.typesArray, args.max_string_length) logger.info("NodeSet generation code successfully printed")
Python
0.000006
@@ -4168,24 +4168,55 @@ on='count',%0A + default=1,%0A
7f46a10bf1391b52b9b83171a1291a2a79bfba8a
Remove unused dependency simplejson.
tools/pdtools/pdtools/controller_client.py
tools/pdtools/pdtools/controller_client.py
import os from simplejson.scanner import JSONDecodeError from six.moves.urllib.parse import urlparse from .authenticated_client import AuthenticatedClient from .config import PdtoolsConfig from .devices.camera import Camera from .util import LoginGatherer LOCAL_DEFAULT_USERNAME = "paradrop" LOCAL_DEFAULT_PASSWORD = "" PARADROP_API_TOKEN = os.environ.get("PARADROP_API_TOKEN", None) PARADROP_CHUTE_NAME = os.environ.get("PARADROP_CHUTE_NAME", None) PDSERVER_URL = os.environ.get('PDSERVER_URL', 'https://paradrop.org') class ControllerClient(AuthenticatedClient): """ Client for interacting with a cloud controller. """ def __init__(self, host=PDSERVER_URL): super(ControllerClient, self).__init__("cloud", PDSERVER_URL) self.host = host self.base_url = host + "/api" #self.base_url = "http://{}/api".format(host) def claim_node(self, token): """ Claim ownership of a node using a claim token. """ url = self.base_url + "/routers/claim" data = { "claim_token": token } return self.request("POST", url, json=data) def create_chute(self, name, description, public=False): """ Create a new chute in the store. """ url = self.base_url + "/chutes" data = { "name": name, "description": description, "public": public } return self.request("POST", url, json=data) def create_node(self, name, orphaned=False, claim=None): """ Create a new node tracked by the controller. """ url = self.base_url + "/routers" data = { "name": name, "orphaned": orphaned } if claim is not None: data['claim'] = claim return self.request("POST", url, json=data) def create_user(self, name, email, password, password2): """ Create a new user account on the controller. """ url = self.base_url + "/users" data = { "name": name, "email": email, "password": password, "confirmPassword": password2 } return self.request("POST", url, json=data) def create_version(self, name, config): """ Create a new chute version. """ chute = self.find_chute(name) if chute is None: return None url = "{}/chutes/{}/versions".format(self.base_url, chute['_id']) data = { "chute_id": chute['_id'], "config": config } return self.request("POST", url, json=data) def delete_node(self, name): """ Delete a node tracked by the controller. """ node = self.find_node(name) if node is not None: url = "{}/routers/{}".format(self.base_url, node['_id']) return self.request("DELETE", url) else: return None def find_chute(self, name): """ Find a chute by name or id. """ # If this client object is ever used for multiple requests during its # lifetime, we could consider caching the group list locally for a # better response time. Then we need to add cache invalidation to all # of the methods that might affect the group list. chutes = self.list_chutes() for chute in chutes: if chute['_id'] == name or chute['name'] == name: return chute return None def find_group(self, name): """ Find a group by name or id. """ # If this client object is ever used for multiple requests during its # lifetime, we could consider caching the group list locally for a # better response time. Then we need to add cache invalidation to all # of the methods that might affect the group list. groups = self.list_groups() for group in groups: if group['_id'] == name or group['name'] == name: return group return None def find_node(self, name): """ Find a node by name or id. """ # If this client object is ever used for multiple requests during its # lifetime, we could consider caching the node list locally for a # better response time. Then we need to add cache invalidation to all # of the methods that might affect the node list. nodes = self.list_nodes() for node in nodes: if node['_id'] == name or node['name'] == name: return node return None def group_add_node(self, group_name, node_name): """ Add a node to a group. """ group = self.find_group(group_name) if group is None: raise Exception("Group was not found") node = self.find_node(node_name) if node is None: raise Exception("Node was not found") url = "{}/groups/{}/addRouter".format(self.base_url, group['_id']) data = { 'router_id': node['_id'] } return self.request("POST", url, json=data) def list_chutes(self): """ List chutes that the user owns or has access to. """ url = self.base_url + "/chutes" return self.request("GET", url) def list_groups(self): """ List groups that the user belongs to. """ url = self.base_url + "/groups" return self.request("GET", url) def list_nodes(self): """ List nodes that the user owns or has access to. """ url = self.base_url + "/routers" return self.request("GET", url) def list_versions(self, name): """ List nodes that the user owns or has access to. """ chute = self.find_chute(name) if chute is None: return [] url = "{}/chutes/{}/versions".format(self.base_url, chute['_id']) return self.request("GET", url) def save_node(self, node): """ Save changes to a node object. """ url = "{}/routers/{}".format(self.base_url, node['_id']) return self.request("PUT", url, json=node)
Python
0
@@ -7,55 +7,8 @@ os%0A -from simplejson.scanner import JSONDecodeError%0A from
b558941fce5933f2b3b145ee3bf9f7452086b8f3
Update hr_employee.py
l10n_br_hr/models/hr_employee.py
l10n_br_hr/models/hr_employee.py
# (c) 2014 Kmee - Rafael da Silva Lima <rafael.lima@kmee.com.br> # (c) 2014 Kmee - Matheus Felix <matheus.felix@kmee.com.br> # (c) 2016 KMEE Informática - Daniel Sadamo <daniel.sadamo@kmee.com.br> # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html from datetime import datetime from erpbrasil.base.fiscal import cnpj_cpf, pis from odoo import models, fields, api, _ from odoo.tools import DEFAULT_SERVER_DATE_FORMAT from odoo.exceptions import ValidationError class HrEmployee(models.Model): _inherit = 'hr.employee' naturalidade = fields.Many2one( string='Naturalidade', comodel_name='res.city', ) def _default_country(self): return self.env['res.country'].search([('code', '=', 'BR')]) @api.constrains('dependent_ids') def _check_dependents(self): self._check_dob() self._check_dependent_type() def _check_dob(self): for dependent in self.dependent_ids: if datetime.strptime( dependent.dependent_dob, DEFAULT_SERVER_DATE_FORMAT ).date() > datetime.now().date(): raise ValidationError(_('Invalid birth date for dependent %s') % dependent.dependent_name) def _check_dependent_type(self): seen = set() restrictions = ( self.env.ref('l10n_br_hr.l10n_br_dependent_1'), self.env.ref('l10n_br_hr.l10n_br_dependent_9_1'), self.env.ref('l10n_br_hr.l10n_br_dependent_9_2') ) for dependent in self.dependent_ids: dep_type = dependent.dependent_type_id if dep_type not in seen and dep_type in restrictions: seen.add(dep_type) elif dep_type in seen and dep_type in restrictions: raise ValidationError( _('A dependent with the same level of relatedness' ' already exists for dependent %s') % dependent.dependent_name) @api.constrains('pis_pasep') def _validate_pis_pasep(self): for record in self: if record.pis_pasep and not pis.\ validar(record.pis_pasep): raise ValidationError(_('Invalid PIS/PASEP')) pis_pasep = fields.Char( string='PIS/PASEP', size=15) ctps = fields.Char( string='CTPS', help='CTPS number') ctps_series = fields.Char( string='CTPS series') ctps_date = fields.Date( string='CTPS emission date') ctps_uf_id = fields.Many2one( string='CTPS district', comodel_name='res.country.state') creservist = fields.Char( string='Military service status certificate') cresv_categ = fields.Selection( string='Military service status category', selection=[ ('1', 'First Category'), ('2', 'Second Category'), ('3', 'Third Category')], default='3') educational_attainment = fields.Many2one( string='Educational attainment', comodel_name='hr.educational.attainment', track_visibility='onchange', ) have_dependent = fields.Boolean( string='Has dependents', track_visibility='onchange') dependent_ids = fields.One2many( comodel_name='hr.employee.dependent', inverse_name='employee_id', string='Dependents') rg = fields.Char( string='RG', store=True, related='address_home_id.inscr_est', help='National ID number') cpf = fields.Char( string='CPF', store=True, related='address_home_id.cnpj_cpf') @api.onchange('cpf') def onchange_cpf(self): cpf = cnpj_cpf.formata(str(self.cpf)) if cpf: self.cpf = cpf @api.multi @api.constrains('cpf') def _check_cpf(self): for record in self: if record.cpf and not cnpj_cpf.validar(record.cpf): raise ValidationError(_("CPF Invalido!")) organ_exp = fields.Char( string='Dispatcher organ') rg_emission = fields.Date( string='Emission date') voter_title = fields.Char( string='Voter title') voter_zone = fields.Char( string='Voter zone') voter_section = fields.Char( string='Voter section') driver_license = fields.Char( string='Driver license number') driver_categ = fields.Char( string='Driver license category') father_name = fields.Char( string='Father name') mother_name = fields.Char( string='Mother name') expiration_date = fields.Date( string='Expiration date') ethnicity = fields.Many2one( string='Ethnicity', comodel_name='hr.ethnicity') blood_type = fields.Selection( string='Blood type', selection=[ ('a+', 'A+'), ('a-', 'A-'), ('b+', 'B+'), ('b-', 'B-'), ('o+', 'O+'), ('o-', 'O-'), ('ab+', 'AB+'), ('ab-', 'AB-'), ]) deficiency_id = fields.Many2one( string='Deficiency', comodel_name='hr.deficiency', track_visibility='onchange') deficiency_description = fields.Char( string='Deficiency description') identity_type_id = fields.Many2one( string='ID type', comodel_name='hr.identity.type') identity_validity = fields.Date( string='ID expiration date') identity_uf_id = fields.Many2one( string='ID expedition district', comodel_name='res.country.state') identity_city_id = fields.Many2one( string='ID expedition city', comodel_name='res.city', domain="[('state_id','=',identity_uf_id)]") civil_certificate_type_id = fields.Many2one( string='Civil certificate type', comodel_name='hr.civil.certificate.type') alternate_phone = fields.Char( string='Alternate phone') emergency_phone = fields.Char( string='Emergency phone') talk_to = fields.Char( string='Emergency contact name') alternate_email = fields.Char( string='Alternate email') chronic_disease_ids = fields.Many2many( string='Chronic Diseases', comodel_name='hr.chronic.disease') marital = fields.Selection( selection_add=[ ('common_law_marriage', 'Common law marriage'), ('separated', 'Separated')]) registration = fields.Char( string='Registration number') nationality_code = fields.Many2one( string='Nationality code', comodel_name='hr.nationality.code') nat_code = fields.Char( related='nationality_code.code') arrival_year = fields.Integer( string="Arrival year in Brazil") country_id = fields.Many2one( comodel_name='res.country', default=_default_country) tipo = fields.Selection( string="Tipo de Colaborador", selection=[ # S2200 ('funcionario', 'Funcionário'), # S2300 Sem vinculo ('autonomo', 'Autônomo'), ('terceirizado', 'Terceirizado'), ('cedido', 'Funcionário Cedido'), ], default='funcionario', required=True, )
Python
0.000019
@@ -532,17 +532,16 @@ loyee'%0A%0A -%0A natu
3effb540220f4ce1918d0210e882d926e268473f
Bump P4Runtime to v1.2.0
tools/build/bazel/p4lang_workspace.bzl
tools/build/bazel/p4lang_workspace.bzl
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") P4RUNTIME_VER = "1.0.0" P4RUNTIME_SHA = "667464bd369b40b58dc9552be2c84e190a160b6e77137b735bd86e5b81c6adc0" def generate_p4lang(): http_archive( name = "com_github_p4lang_p4runtime", urls = ["https://github.com/p4lang/p4runtime/archive/v%s.zip" % P4RUNTIME_VER], sha256 = P4RUNTIME_SHA, strip_prefix = "p4runtime-%s/proto" % P4RUNTIME_VER, build_file = "//tools/build/bazel:p4runtime_BUILD", )
Python
0.000001
@@ -82,17 +82,17 @@ ER = %221. -0 +2 .0%22%0AP4RU @@ -108,72 +108,72 @@ = %22 -667464bd369b40b58dc9552be2c84e190a160b6e77137b735bd86e5b81c6adc0 +0fce7e06c63e60a8cddfe56f3db3d341953560c054d4c09ffda0e84476124f5a %22%0A%0Ad
64e902fae3117c246272cbde943d013da1345b7b
Fix RenameField alteration
gravity/migrations/0003_tiltbridge_mdns_id.py
gravity/migrations/0003_tiltbridge_mdns_id.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.13 on 2019-03-18 23:46 from __future__ import unicode_literals from django.db import migrations, models import django.core.validators class Migration(migrations.Migration): dependencies = [ ('gravity', '0002_tilt_refactor'), ] operations = [ # Converting from AlterField to RemoveField/AddField because of issues with Django 2.0+ migration: # https://docs.djangoproject.com/en/3.0/releases/2.0/#foreign-key-constraints-are-now-enabled-on-sqlite migrations.RemoveField( model_name='tiltbridge', name='api_key', ), migrations.AddField( model_name='tiltbridge', name='mdns_id', field=models.CharField(help_text="mDNS ID used by the TiltBridge to identify itself both on your network and to Fermentrack. NOTE - Prefix only - do not include '.local'", max_length=64, primary_key=True, serialize=False, validators=[django.core.validators.RegexValidator(regex='^[a-zA-Z0-9]+$')]), ), migrations.RenameField( model_name='tiltbridge', old_name='api_key', new_name='mdns_id', ), migrations.AlterField( model_name='tiltbridge', name='mdns_id', field=models.CharField(default='tiltbridge', help_text="mDNS ID used by the TiltBridge to identify itself both on your network and to Fermentrack. NOTE - Prefix only - do not include '.local'", max_length=64, primary_key=True, serialize=False), preserve_default=False, ), ]
Python
0
@@ -1058,152 +1058,8 @@ ),%0A - migrations.RenameField(%0A model_name='tiltbridge',%0A old_name='api_key',%0A new_name='mdns_id',%0A ),%0A
5389663818e3bdf10556e0a8d7d4ceeb14d798d1
Exclude even more non-essential directories from the release tarball.
tools/export_tarball/export_tarball.py
tools/export_tarball/export_tarball.py
#!/usr/bin/python # Copyright (c) 2009 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ This tool creates a tarball with all the sources, but without .svn directories. It can also remove files which are not strictly required for build, so that the resulting tarball can be reasonably small (last time it was ~110 MB). Example usage: export_tarball.py /foo/bar The above will create file /foo/bar.tar.bz2. """ import optparse import os import sys import tarfile NONESSENTIAL_DIRS = ( 'chrome/common/extensions/docs', 'chrome/test/data', 'chrome/tools/test/reference_build', 'courgette/testdata', 'data', 'gears/binaries', 'native_client/src/trusted/service_runtime/testdata', 'native_client/tests', 'net/data/cache_tests', 'src/chrome/test/data', 'o3d/documentation', 'o3d/samples', 'o3d/tests', 'third_party/hunspell_dictionaries', 'third_party/lighttpd', 'third_party/scons', 'third_party/vc_80', 'third_party/WebKit/LayoutTests', 'v8/test', 'webkit/data/layout_tests', 'webkit/tools/test/reference_build', ) def GetSourceDirectory(): return os.path.realpath( os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src')) # Workaround lack of the exclude parameter in add method in python-2.4. # TODO(phajdan.jr): remove the workaround when it's not needed on the bot. class MyTarFile(tarfile.TarFile): def set_remove_nonessential_files(self, remove): self.__remove_nonessential_files = remove def add(self, name, arcname=None, recursive=True, exclude=None): head, tail = os.path.split(name) if tail in ('.svn', '.git'): return if self.__remove_nonessential_files: for nonessential_dir in NONESSENTIAL_DIRS: dir_path = os.path.join(GetSourceDirectory(), nonessential_dir) if name.startswith(dir_path): return tarfile.TarFile.add(self, name, arcname=arcname, recursive=recursive) def main(argv): parser = optparse.OptionParser() parser.add_option("--remove-nonessential-files", dest="remove_nonessential_files", action="store_true", default=False) options, args = parser.parse_args(argv) if len(args) != 1: print 'You must provide only one argument: output file name' print '(without .tar.bz2 extension).' return 1 if not os.path.exists(GetSourceDirectory()): print 'Cannot find the src directory.' return 1 output_fullname = args[0] + '.tar.bz2' output_basename = os.path.basename(args[0]) archive = MyTarFile.open(output_fullname, 'w:bz2') archive.set_remove_nonessential_files(options.remove_nonessential_files) try: archive.add(GetSourceDirectory(), arcname=output_basename) finally: archive.close() return 0 if __name__ == "__main__": sys.exit(main(sys.argv[1:]))
Python
0.000013
@@ -943,24 +943,68 @@ o3d/tests',%0A + 'third_party/angle/samples/gles2_book',%0A 'third_p @@ -1028,24 +1028,58 @@ tionaries',%0A + 'third_party/hunspell/tests',%0A 'third_p @@ -1140,13 +1140,833 @@ rty/ -vc_80 +sqlite/test',%0A 'third_party/vc_80',%0A 'third_party/xdg-utils/tests',%0A 'third_party/yasm/source/patched-yasm/modules/arch/x86/tests',%0A 'third_party/yasm/source/patched-yasm/modules/dbgfmts/dwarf2/tests',%0A 'third_party/yasm/source/patched-yasm/modules/objfmts/bin/tests',%0A 'third_party/yasm/source/patched-yasm/modules/objfmts/coff/tests',%0A 'third_party/yasm/source/patched-yasm/modules/objfmts/elf/tests',%0A 'third_party/yasm/source/patched-yasm/modules/objfmts/macho/tests',%0A 'third_party/yasm/source/patched-yasm/modules/objfmts/rdf/tests',%0A 'third_party/yasm/source/patched-yasm/modules/objfmts/win32/tests',%0A 'third_party/yasm/source/patched-yasm/modules/objfmts/win64/tests',%0A 'third_party/yasm/source/patched-yasm/modules/objfmts/xdf/tests',%0A 'third_party/WebKit/JavaScriptCore/tests ',%0A
86c7d73e312115da1dfea6f7b73e768de0d35519
Fix some typos
hypchat/jsonobject.py
hypchat/jsonobject.py
from __future__ import absolute_import, division import json import re _urls_to_objects = {} class Linker(object): """ Responsible for on-demand loading of JSON objects. """ url = None def __init__(self, url, parent=None, _requests=None): self.url = url self.__parent = parent self._requests = _requests or __import__('requests') @staticmethod def _obj_from_text(text, requests): """ Constructs objects (including our wrapper classes) from a JSON-formatted string """ def _object_hook(obj): if 'links' in obj: klass = JsonObject if 'self' in obj['links']: for p, c in _urls_to_objects.iteritems(): if p.match(obj['links']['self']): klass = c break rv = klass(obj) rv._requests = requests return rv else: return obj return json.JSONDecoder(object_hook=_object_hook).decode(text) def __call__(self, expand=None): """ Actually perform the request """ params = None if expand is not None: if isinstance(expand, basestring): params = {'expand': expand} else: params = {'expand': ','.join(expand)} rv = self._obj_from_text(self._requests.get(self.url, params=params).text, self._requests) rv._requests = self._requests if self.__parent is not None: rv.parent = self.__parent return rv def __repr__(self): return "<%s url=%r>" % (type(self).__name__, self.url) class JsonObject(dict): """ Nice wrapper around the JSON objects and their links. """ def __getattr__(self, name): if name in self.get('links', {}): return Linker(self['links'][name], parent=self, _requests=self._requests) elif name in self: return self[name] else: raise AttributeError("%r object has no attribute %r" % (type(self).__name__, name)) @property def url(self): return self['links']['self'] def save(self): return self._requests.put(self.url).json() def delete(self): return self._requests.delete(self.url).json() class Room(JsonObject): def message(self, *p, **kw): """ Redirects to the /notification URL. Will soon be reimplemented as a resource that posts a message to a room as a user. """ return self.notification(*p, **kw) def notification(self, message, color='yellow', notify=False, format='html'): """ Send a message to a room. """ requests.post(self.url+'/notification', data={ 'color': color, 'message': message, 'notify': notify, 'message_format': format, }) def topic(self, text): """ Set a room's topic. Useful for displaying statistics, important links, server status, you name it! """ requests.put(self.url+'/topic', data={ 'topic': text, }) def history(self, date='recent'): # TODO: If given an aware datetime, pass its timezone along raise NotImplementedError def invite(self, user, reason): requests.post(self.url+'/invite/%s' % user['id'], data={ 'reason': reason, }) _urls_to_objects[re.compile(r'https://api.hipchat.com/v2/room/[^/]+')] = Room class User(JsonObject): def message(self, message): raise NotImplementedError _urls_to_objects[re.compile(r'https://api.hipchat.com/v2/user/[^/]+')] = User class MemberCollection(JsonObject): def add(self, user): """ Adds a member to a private room. """ raise NotImplementedError def remove(self, user): """ Removes a member from a private room. """ _urls_to_objects[re.compile(r'https://api.hipchat.com/v2/room/[^/]+/member')] = MemberCollection
Python
0.999999
@@ -2273,32 +2273,38 @@ a room.%0A%09%09%22%22%22%0A%09%09 +self._ requests.post(se @@ -2330,24 +2330,24 @@ on', data=%7B%0A - %09%09%09'color': @@ -2571,16 +2571,22 @@ %09%09%22%22%22%0A%09%09 +self._ requests @@ -2801,16 +2801,22 @@ son):%0A%09%09 +self._ requests @@ -3337,21 +3337,49 @@ e room.%0A - %09%09%22%22%22 +%0A%09%09raise NotImplementedError %0A%0A_urls_
2957b4020a7c3eeeaab63d0f5088b47b9b53395c
Raise non 404 Error in generate-tempest-plugin-list.py
tools/generate-tempest-plugins-list.py
tools/generate-tempest-plugins-list.py
#! /usr/bin/env python # Copyright 2016 Hewlett Packard Enterprise Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This script is intended to be run as part of a periodic proposal bot # job in OpenStack infrastructure. # # In order to function correctly, the environment in which the # script runs must have # * network access to the review.opendev.org Gerrit API # working directory # * network access to https://opendev.org/openstack import json import re import sys try: # For Python 3.0 and later from urllib.error import HTTPError import urllib.request as urllib except ImportError: # Fall back to Python 2's urllib2 import urllib2 as urllib from urllib2 import HTTPError # List of projects having tempest plugin stale or unmaintained for a long time # (6 months or more) # TODO(masayukig): Some of these can be removed from BLACKLIST in the future # when the patches are merged. BLACKLIST = [ 'barbican-tempest-plugin', # https://review.opendev.org/#/c/634631/ 'cyborg-tempest-plugin', # https://review.opendev.org/659687 'intel-nfv-ci-tests', # https://review.opendev.org/#/c/634640/ 'networking-ansible', # https://review.opendev.org/#/c/634647/ 'networking-generic-switch', # https://review.opendev.org/#/c/634846/ 'networking-l2gw-tempest-plugin', # https://review.opendev.org/#/c/635093/ 'networking-midonet', # https://review.opendev.org/#/c/635096/ 'networking-plumgrid', # https://review.opendev.org/#/c/635096/ 'networking-spp', # https://review.opendev.org/#/c/635098/ 'neutron-dynamic-routing', # https://review.opendev.org/#/c/637718/ 'neutron-vpnaas', # https://review.opendev.org/#/c/637719/ 'nova-lxd', # https://review.opendev.org/#/c/638334/ 'valet', # https://review.opendev.org/#/c/638339/ ] url = 'https://review.opendev.org/projects/' # This is what a project looks like ''' "openstack-attic/akanda": { "id": "openstack-attic%2Fakanda", "state": "READ_ONLY" }, ''' # Rather than returning a 404 for a nonexistent file, cgit delivers a # 0-byte response to a GET request. It also does not provide a # Content-Length in a HEAD response, so the way we tell if a file exists # is to check the length of the entire GET response body. def has_tempest_plugin(proj): try: r = urllib.urlopen( "https://opendev.org/%s/raw/branch/" "master/setup.cfg" % proj) except HTTPError as err: if err.code == 404: return False p = re.compile(r'^tempest\.test_plugins', re.M) if p.findall(r.read().decode('utf-8')): return True else: False if len(sys.argv) > 1 and sys.argv[1] == 'blacklist': for black_plugin in BLACKLIST: print(black_plugin) # We just need BLACKLIST when we use this `blacklist` option. # So, this exits here. sys.exit() r = urllib.urlopen(url) # Gerrit prepends 4 garbage octets to the JSON, in order to counter # cross-site scripting attacks. Therefore we must discard it so the # json library won't choke. content = r.read().decode('utf-8')[4:] projects = sorted(json.loads(content)) # Retrieve projects having no deployment tool repo (such as deb, # puppet, ansible, etc.), infra repos, ui or spec namespace as those # namespaces do not contains tempest plugins. projects_list = [i for i in projects if not ( i.startswith('openstack-dev/') or i.startswith('openstack-infra/') or i.startswith('openstack/ansible-') or i.startswith('openstack/charm-') or i.startswith('openstack/cookbook-openstack-') or i.startswith('openstack/devstack-') or i.startswith('openstack/fuel-') or i.startswith('openstack/deb-') or i.startswith('openstack/puppet-') or i.startswith('openstack/openstack-ansible-') or i.startswith('x/deb-') or i.startswith('x/fuel-') or i.startswith('x/python-') or i.startswith('zuul/') or i.endswith('-ui') or i.endswith('-specs'))] found_plugins = list(filter(has_tempest_plugin, projects_list)) # We have tempest plugins not only in 'openstack/' namespace but also the # other name spaces such as 'airship/', 'x/', etc. # So, we print all of them here. for project in found_plugins: print(project)
Python
0.000007
@@ -3030,16 +3030,81 @@ n False%0A + # We should not ignore non 404 errors.%0A raise err%0A p =
fc4aada050fd995ecf5375871fa1e6ed1884293f
fix hail-apiserver.py module path (#4850)
hail/python/hail-apiserver/hail-apiserver.py
hail/python/hail-apiserver/hail-apiserver.py
import hail as hl from hail.utils.java import Env, info import logging import flask hl.init() app = flask.Flask('hail-apiserver') @app.route('/execute', methods=['POST']) def execute(): code = flask.request.json info(f'execute: {code}') jir = Env.hail().expr.Parser.parse_value_ir(code, {}, {}) typ = hl.HailType._from_java(jir.typ()) value = Env.hail().expr.ir.Interpret.interpretPyIR(code, {}, {}) result = { 'type': str(typ), 'value': value } info(f'result: {result}') return flask.jsonify(result) app.run(threaded=False, host='0.0.0.0')
Python
0
@@ -279,16 +279,21 @@ ().expr. +ir.IR Parser.p
1d10582d622ce6867a85d9e4e8c279ab7e4ab5ab
Revert "Don't complain about \r when core.autocrlf is on in Git"
src/etc/tidy.py
src/etc/tidy.py
#!/usr/bin/python import sys, fileinput, subprocess err=0 cols=78 config_proc=subprocess.Popen([ "git", "config", "core.autocrlf" ], stdout=subprocess.PIPE) result=config_proc.communicate()[0] autocrlf=result.strip() == b"true" if result is not None else False def report_err(s): global err print("%s:%d: %s" % (fileinput.filename(), fileinput.filelineno(), s)) err=1 for line in fileinput.input(openhook=fileinput.hook_encoded("utf-8")): if line.find('\t') != -1 and fileinput.filename().find("Makefile") == -1: report_err("tab character") if not autocrlf and line.find('\r') != -1: report_err("CR character") line_len = len(line)-2 if autocrlf else len(line)-1 if line_len > cols: report_err("line longer than %d chars" % cols) sys.exit(err)
Python
0
@@ -37,20 +37,8 @@ nput -, subprocess %0A%0Aer @@ -54,208 +54,8 @@ 78%0A%0A -config_proc=subprocess.Popen(%5B %22git%22, %22config%22, %22core.autocrlf%22 %5D,%0A stdout=subprocess.PIPE)%0Aresult=config_proc.communicate()%5B0%5D%0Aautocrlf=result.strip() == b%22true%22 if result is not None else False%0A%0A def @@ -367,25 +367,8 @@ if -not autocrlf and line @@ -430,75 +430,22 @@ -line_len = len(line)-2 if autocrlf else len(line)-1%0A if line_len +if len(line)-1 %3E c
f5fa434c6e0b7ed68bf8969203c355459e319ac8
removed extraneous comma
src/data/data_pool.py
src/data/data_pool.py
# -*- coding: utf-8 -*- from __future__ import division import os, re from sentence import Sentence import logging import re from learn.partition import partition_data logging.basicConfig(filename='glm_parser.log', level=logging.DEBUG, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') class DataPool(): """ Data object that holds all sentences (dependency trees) and provides interface for loading data from the disk and retrieving them using an index. Data are classified into sections when stored in the disk, but we do not preserve such structural information, and all sentences will be loaded and "flattened" to be held in a single list. The instance maintains a current_index variable, which is used to locate the last sentence object we have read. Calling get_next() method will increase this by 1, and calling has_next() will test this index against the total number. The value of the index is persistent during get_next() and has_next() calls, and will only be reset to initial value -1 when reset() is called (manually or during init). """ def __init__(self, section_regex='', data_path="./penn-wsj-deps/", fgen=None, config_path=None, textString=None, config_list=None): """ Initialize the Data set :param section_regex: the sections to be used. A regular expression that indicates which sections to be used e.g. (0[0-9])|(1[0-9])|(2[0-1])/.*tab :type section_regex: str :param data_path: the relative or absolute path to the 'penn-wsj-deps' folder (including "penn-wsj-deps") :type data_path: str :param config_path: the config file that describes the file format for the type of data :type config_path: str """ self.fgen = fgen self.reset_all() if textString is None: if section_regex is not '': self.data_path = data_path self.section_regex = section_regex self.config_path = config_path self.load() else: self.load_stringtext(textString,config_list) return def load_stringtext(self,textString,config_list): lines = textString.splitlines() column_list = {} for field in config_list: if not(field.isdigit()): column_list[field] = [] length = len(config_list) - 2 for line in lines: if line != '': entity = line.split() for i in range(length): if not(config_list[i].isdigit()): column_list[config_list[i]].append(str(entity[i])) else: if not(config_list[0].isdigit()) and column_list[config_list[0]] != []: sent = Sentence(column_list, config_list, self.fgen) self.data_list.append(sent) column_list = {} for field in config_list: if not (field.isdigit()): column_list[field] = [] def reset_all(self): """ Reset the index variables and the data list. Restores the instance to a state when no sentence has been read """ self.reset_index() self.data_list = [] return def reset_index(self): """ Reset the index variable to the very beginning of sentence list """ self.current_index = -1 def has_next_data(self): """ Returns True if there is still sentence not read. This call does not advence data pointer. Call to get_next_data() will do the job. :return: False if we have reaches the end of data_list True otherwise """ i = self.current_index + 1 if i >= 0 and i < len(self.data_list): return True else: return False def get_next_data(self): """ Return the next sentence object, which is previously read from disk files. This method does not perform index checking, so please make sure the internal index is valid by calling has_next_data(), or an exception will be raise (which would be definitely not what you want) """ if(self.has_next_data()): self.current_index += 1 # Logging how many entries we have supplied if self.current_index % 1000 == 0: logging.debug("Data finishing %.2f%% ..." % (100 * self.current_index/len(self.data_list), )) return self.data_list[self.current_index] raise IndexError("Run out of data while calling get_next_data()") def load(self): """ For each section in the initializer, iterate through all files under that section directory, and load the content of each individual file into the class instance. This method should be called after section regex has been initalized and before any get_data method is called. """ logging.debug("Loading data...") output_path = partition_data(self.data_path, self.section_regex, 1,) for dirName, subdirList, fileList in os.walk(output_path): for file_name in fileList: file_path = "%s/%s" % ( str(dirName), str(file_name) ) self.data_list += self.get_data_list(file_path) return def get_data_list(self, file_path): """ Form the DependencyTree list from the specified file. :param file_path: the path to the data file :type file_path: str :return: a list of DependencyTree in the specified file :rtype: list(Sentence) """ fconfig = open(self.config_path) field_name_list = [] for line in fconfig: field_name_list.append(line.strip()) fconfig.close() f = open(file_path) data_list = [] column_list = {} for field in field_name_list: if not(field.isdigit()): column_list[field] = [] length = len(field_name_list) - 2 for line in f: line = line[:-1] if line != '': entity = line.split() for i in range(length): if not(field_name_list[i].isdigit()): column_list[field_name_list[i]].append(entity[i]) else: # Prevent any non-mature (i.e. trivial) sentence structure if not(field_name_list[0].isdigit()) and column_list[field_name_list[0]] != []: # Add "ROOT" for word and pos here sent = Sentence(column_list, field_name_list, self.fgen) data_list.append(sent) column_list = {} for field in field_name_list: if not (field.isdigit()): column_list[field] = [] f.close() return data_list def get_sent_num(self): return len(self.data_list)
Python
0.999635
@@ -5385,17 +5385,16 @@ regex, 1 -, )%0A%09for d
9dcf5e0b30141641a0e182257b34720bcf07d730
Fix typo in S3_Bucket_With_Versioning_And_Lifecycle_Rules.py (#693)
examples/S3_Bucket_With_Versioning_And_Lifecycle_Rules.py
examples/S3_Bucket_With_Versioning_And_Lifecycle_Rules.py
# Converted from S3_Bucket.template located at: # http://aws.amazon.com/cloudformation/aws-cloudformation-templates/ from troposphere import Output, Ref, Template from troposphere.s3 import Bucket, PublicRead, VersioningConfiguration, \ LifecycleConfiguration, LifecycleRule, NoncurrentVersionTransition, \ LifecycleRuleTransition t = Template() t.add_description( "AWS CloudFormation Sample Template S3_Bucket: Sample template showing :" "How to create a publicly accessible S3 bucket. " "How to enable bucket object versions. " "How to archive and delete current objects. " "How to archive and delete non current (versioned) objects. " "**WARNING** This template creates an Amazon S3 Bucket. " "You will be billed for the AWS resources used if you create " "a stack from this template.") s3bucket = t.add_resource(Bucket( "S3Bucket", # Make public Read AccessControl=PublicRead, # Turn on Versioning to the whole S3 Bucket VersioningConfiguration=VersioningConfiguration( Status="Enabled", ), # Attach a LifeCycle Confiragtion LifecycleConfiguration=LifecycleConfiguration(Rules=[ # Add a rule to LifecycleRule( # Rule attributes Id="S3BucketRule001", Prefix="/only-this-sub-dir", Status="Enabled", # Applies to current objects ExpirationInDays=3650, Transitions=[ LifecycleRuleTransition( StorageClass="STANDARD_IA", TransitionInDays=60, ), ], # Applies to Non Current objects NoncurrentVersionExpirationInDays=365, NoncurrentVersionTransitions=[ NoncurrentVersionTransition( StorageClass="STANDARD_IA", TransitionInDays=30, ), NoncurrentVersionTransition( StorageClass="GLACIER", TransitionInDays=120, ), ], ), ]), )) t.add_output(Output( "BucketName", Value=Ref(s3bucket), Description="Name of S3 bucket to hold website content" )) print(t.to_json())
Python
0.998464
@@ -1094,19 +1094,20 @@ le Confi +gu ra -g tion%0A%0A
a378649f85f0bc55060ad0238e426f587bc2ff1a
Send location only when printing exception (Avoid leaking ID/UUID)
core/exceptions.py
core/exceptions.py
""" exceptions - Core exceptions """ class InvalidMembership(Exception): """ The membership provided is not valid """ pass class SourceNotFound(Exception): """ InstanceSource doesn't have an associated source. """ pass class RequestLimitExceeded(Exception): """ A limit was exceeded for the specific request """ pass class ProviderLimitExceeded(Exception): """ A limit was exceeded for the specific provider """ pass class ProviderNotActive(Exception): """ The provider that was requested is not active """ def __init__(self, provider, *args, **kwargs): self.message = "Cannot create driver on an inactive provider:%s" \ % (provider,) pass
Python
0
@@ -707,16 +707,17 @@ rovider: + %25s%22 %5C%0A @@ -748,16 +748,25 @@ provider +.location ,)%0A p
1d5b3322f8f2f18640e640d7f015814df4274a89
Upgrade default go to 1.8.3. (#4799)
contrib/go/src/python/pants/contrib/go/subsystems/go_distribution.py
contrib/go/src/python/pants/contrib/go/subsystems/go_distribution.py
# coding=utf-8 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os import subprocess from collections import OrderedDict, namedtuple from pants.base.workunit import WorkUnit, WorkUnitLabel from pants.binaries.binary_util import BinaryUtil from pants.fs.archive import TGZ from pants.subsystem.subsystem import Subsystem from pants.util.contextutil import temporary_dir from pants.util.memo import memoized_property class GoDistribution(object): """Represents a self-bootstrapping Go distribution.""" class Factory(Subsystem): options_scope = 'go-distribution' @classmethod def subsystem_dependencies(cls): return (BinaryUtil.Factory,) @classmethod def register_options(cls, register): register('--supportdir', advanced=True, default='bin/go', help='Find the go distributions under this dir. Used as part of the path to lookup ' 'the distribution with --binary-util-baseurls and --pants-bootstrapdir') register('--version', advanced=True, default='1.8', fingerprint=True, help='Go distribution version. Used as part of the path to lookup the distribution ' 'with --binary-util-baseurls and --pants-bootstrapdir') def create(self): # NB: create is an instance method to allow the user to choose global or scoped. # It's not unreasonable to imagine multiple go versions in play; for example: when # transitioning from the 1.x series to the 2.x series. binary_util = BinaryUtil.Factory.create() options = self.get_options() return GoDistribution(binary_util, options.supportdir, options.version) def __init__(self, binary_util, relpath, version): self._binary_util = binary_util self._relpath = relpath self._version = version @property def version(self): """Returns the version of the Go distribution. :returns: The Go distribution version number string. :rtype: string """ return self._version @memoized_property def goroot(self): """Returns the $GOROOT for this go distribution. :returns: The Go distribution $GOROOT. :rtype: string """ go_distribution = self._binary_util.select_binary(self._relpath, self.version, 'go.tar.gz') distribution_workdir = os.path.dirname(go_distribution) outdir = os.path.join(distribution_workdir, 'unpacked') if not os.path.exists(outdir): with temporary_dir(root_dir=distribution_workdir) as tmp_dist: TGZ.extract(go_distribution, tmp_dist) os.rename(tmp_dist, outdir) return os.path.join(outdir, 'go') def go_env(self, gopath=None): """Return an env dict that represents a proper Go environment mapping for this distribution.""" # Forcibly nullify the GOPATH if the command does not need one - this can prevent bad user # GOPATHs from erroring out commands; see: https://github.com/pantsbuild/pants/issues/2321. # NB: As of go 1.8, when GOPATH is unset (set to ''), it defaults to ~/go (assuming HOME is # set - and we can't unset that since it might legitimately be used by the subcommand); so we # set the GOPATH here to a valid value that nonetheless will fail to work if GOPATH is # actually used by the subcommand. no_gopath = os.devnull return OrderedDict(GOROOT=self.goroot, GOPATH=gopath or no_gopath) class GoCommand(namedtuple('GoCommand', ['cmdline', 'env'])): """Encapsulates a go command that can be executed.""" @classmethod def _create(cls, goroot, cmd, go_env, args=None): return cls([os.path.join(goroot, 'bin', 'go'), cmd] + (args or []), env=go_env) def spawn(self, env=None, **kwargs): """ :param dict env: A custom environment to launch the Go command in. If `None` the current environment is used. :param **kwargs: Keyword arguments to pass through to `subprocess.Popen`. :returns: A handle to the spawned go command subprocess. :rtype: :class:`subprocess.Popen` """ env = (env or os.environ).copy() env.update(self.env) return subprocess.Popen(self.cmdline, env=env, **kwargs) def check_output(self, env=None, **kwargs): """Returns the output of the executed Go command. :param dict env: A custom environment to launch the Go command in. If `None` the current environment is used. :param **kwargs: Keyword arguments to pass through to `subprocess.check_output`. :return str: Output of Go command. :raises subprocess.CalledProcessError: Raises if Go command fails. """ env = (env or os.environ).copy() env.update(self.env) return subprocess.check_output(self.cmdline, env=env, **kwargs) def __str__(self): return (' '.join('{}={}'.format(k, v) for k, v in self.env.items()) + ' ' + ' '.join(self.cmdline)) def create_go_cmd(self, cmd, gopath=None, args=None): """Creates a Go command that is optionally targeted to a Go workspace. :param string cmd: Go command to execute, e.g. 'test' for `go test` :param string gopath: An optional $GOPATH which points to a valid Go workspace from which to run the command. :param list args: A list of arguments and flags to pass to the Go command. :returns: A go command that can be executed later. :rtype: :class:`GoDistribution.GoCommand` """ return self.GoCommand._create(self.goroot, cmd, go_env=self.go_env(gopath=gopath), args=args) def execute_go_cmd(self, cmd, gopath=None, args=None, env=None, workunit_factory=None, workunit_name=None, workunit_labels=None, **kwargs): """Runs a Go command that is optionally targeted to a Go workspace. If a `workunit_factory` is supplied the command will run in a work unit context. :param string cmd: Go command to execute, e.g. 'test' for `go test` :param string gopath: An optional $GOPATH which points to a valid Go workspace from which to run the command. :param list args: An optional list of arguments and flags to pass to the Go command. :param dict env: A custom environment to launch the Go command in. If `None` the current environment is used. :param workunit_factory: An optional callable that can produce a `WorkUnit` context :param string workunit_name: An optional name for the work unit; defaults to the `cmd` :param list workunit_labels: An optional sequence of labels for the work unit. :param **kwargs: Keyword arguments to pass through to `subprocess.Popen`. :returns: A tuple of the exit code and the go command that was run. :rtype: (int, :class:`GoDistribution.GoCommand`) """ go_cmd = self.GoCommand._create(self.goroot, cmd, go_env=self.go_env(gopath=gopath), args=args) if workunit_factory is None: return go_cmd.spawn(**kwargs).wait() else: name = workunit_name or cmd labels = [WorkUnitLabel.TOOL] + (workunit_labels or []) with workunit_factory(name=name, labels=labels, cmd=str(go_cmd)) as workunit: process = go_cmd.spawn(env=env, stdout=workunit.output('stdout'), stderr=workunit.output('stderr'), **kwargs) returncode = process.wait() workunit.set_outcome(WorkUnit.SUCCESS if returncode == 0 else WorkUnit.FAILURE) return returncode, go_cmd
Python
0
@@ -1270,16 +1270,18 @@ ult='1.8 +.3 ', finge
93323426c22a08965544b19c818e53c8f2b29e8c
clean select_channel widget
ldk/gui/select_channel_widget.py
ldk/gui/select_channel_widget.py
# -*- coding: utf-8 -*- from pyqtgraph.Qt import QtGui, QtCore class SelectChannelWidget(QtGui.QWidget): def __init__(self, plot_widget): super(SelectChannelWidget, self).__init__() self.plot_widget = plot_widget self.layout = QtGui.QGridLayout() self.adc_checkbox = [] for i in range(2): self.adc_checkbox.append(QtGui.QCheckBox('ADC '+str(i+1), self)) self.adc_checkbox[i].setCheckState(QtCore.Qt.Checked) self.layout.addWidget(self.adc_checkbox[i],0,i,QtCore.Qt.AlignCenter) self.dac_checkbox = [] for i in range(2): self.dac_checkbox.append(QtGui.QCheckBox('DAC '+str(i+1), self)) self.dac_checkbox[i].setCheckState(QtCore.Qt.Unchecked) self.layout.addWidget(self.dac_checkbox[i],1,i,QtCore.Qt.AlignCenter) # Connections self.adc_checkbox[0].stateChanged.connect(lambda: self.show_adc(0)) self.adc_checkbox[1].stateChanged.connect(lambda: self.show_adc(1)) self.dac_checkbox[0].stateChanged.connect(lambda: self.show_dac(0)) self.dac_checkbox[1].stateChanged.connect(lambda: self.show_dac(1)) def show_adc(self, index): if self.adc_checkbox[index].isChecked(): self.plot_widget.show_adc[index] = True else: self.plot_widget.show_adc[index] = False self.plot_widget.dataItem[index].setVisible(self.plot_widget.show_adc[index]) self.plot_widget.enableAutoRange() def show_dac(self, index): if self.dac_checkbox[index].isChecked(): self.plot_widget.show_dac[index] = True else: self.plot_widget.show_dac[index] = False self.plot_widget.dataItem[2+index].setVisible(self.plot_widget.show_dac[index]) self.plot_widget.enableAutoRange() def uncheck_all(self): for i in range(2): self.adc_checkbox[i].setCheckState(QtCore.Qt.Unchecked) self.dac_checkbox[i].setCheckState(QtCore.Qt.Unchecked)
Python
0
@@ -340,661 +340,208 @@ -for i in range(2):%0A self.adc_checkbox.append(QtGui.QCheckBox('ADC '+str(i+1), self))%0A self.adc_checkbox%5Bi%5D.setCheckState(QtCore.Qt.Checked)%0A self.layout.addWidget(self.adc_checkbox%5Bi%5D,0,i,QtCore.Qt.AlignCenter)%0A %0A self.dac_checkbox = %5B%5D%0A for i in range(2):%0A self.dac_checkbox.append(QtGui.QCheckBox('DAC '+str(i+1), self))%0A self.dac_checkbox%5Bi%5D.setCheckState(QtCore.Qt.Unchecked)%0A self.layout.addWidget(self.dac_checkbox%5Bi%5D,1,i,QtCore.Qt.AlignCenter)%0A %0A # Connections%0A self.adc_checkbox%5B0%5D.stateChanged.connect(lambda: self.show_adc(0))%0A +self.add_checkbox(self.adc_checkbox, 0, 'ADC')%0A%0A self.dac_checkbox = %5B%5D%0A self.add_checkbox(self.dac_checkbox, 1, 'DAC')%0A %0A # Connections%0A for i in range(2):%0A @@ -554,33 +554,33 @@ lf.adc_checkbox%5B -1 +i %5D.stateChanged.c @@ -612,14 +612,16 @@ adc( -1 +i )) +%0A -%0A @@ -642,17 +642,17 @@ heckbox%5B -0 +i %5D.stateC @@ -692,28 +692,28 @@ dac( -0 +i ))%0A +%0A -self.dac +def add _che @@ -721,66 +721,73 @@ kbox -%5B1%5D.stateChanged.connect(lambda: self.show_dac(1)) +(self, checkbox, y_pos, text):%0A for i in range(2): %0A %0A @@ -786,163 +786,241 @@ -%0A -def show_adc(self, index):%0A if self.adc_checkbox%5Bindex%5D.isChecked(): %0A self.plot_widget.show_adc%5Bindex%5D = True%0A else:%0A + checkbox.append(QtGui.QCheckBox(text +' '+str(i+1), self))%0A checkbox%5Bi%5D.setCheckState(QtCore.Qt.Checked)%0A self.layout.addWidget(checkbox%5Bi%5D, y_pos, i, QtCore.Qt.AlignCenter)%0A%0A def show_adc(self, index):%0A @@ -1050,37 +1050,68 @@ ow_adc%5Bindex%5D = -False +self.adc_checkbox%5Bindex%5D.isChecked() %0A self.pl @@ -1276,171 +1276,79 @@ -if self.dac_checkbox%5Bindex%5D.isChecked(): %0A self.plot_widget.show_dac%5Bindex%5D = True%0A else:%0A self.plot_widget.show_dac%5Bindex%5D = False +self.plot_widget.show_dac%5Bindex%5D = self.dac_checkbox%5Bindex%5D.isChecked() %0A @@ -1658,28 +1658,29 @@ te(QtCore.Qt.Unchecked)%0A +%0A
0f4e977f18dc1e3b9bbe2f25c3c326ac769fecbd
order size to have thumbnail in first
insight/api/async.py
insight/api/async.py
# -*- coding: utf-8 -*- """Async API view""" from flask import abort, request from redis import StrictRedis import json from insight.api.config import INSIGHT_ENGINES try: import settings except ImportError: settings = None REDIS_QUEUE_KEY = getattr(settings, 'REDIS_QUEUE_KEY', 'insight') REDIS_HOST = getattr(settings, 'REDIS_HOST', 'localhost') REDIS_PORT = getattr(settings, 'REDIS_PORT', 6379) REDIS_DB = getattr(settings, 'REDIS_PORT', 0) redis = StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB) def view(engine): """Get job parameters and add it to a redis queue""" params = {'url': request.args.get('url', None), 'engine': engine, 'callback': request.args.get('callback', None)} # Get URL if params['url']: if params['url'].startswith('/'): params['url'] = '%s%s' % (request.host_url, url[1:]) else: abort(404) # Get Engine if engine not in INSIGHT_ENGINES: abort(400, '%s engine is not installed on this server' % engine) # Process sizes widths = [int(x) for x in request.args.getlist('width')] heights = [int(y) for y in request.args.getlist('height')] nb_width = len(widths) nb_height = len(heights) if nb_width == 0 and nb_height == 0: abort(400, u'You must set either width or height') if nb_width == 0: widths = heights nb_width = nb_height if nb_height == 0: heights = widths nb_height = nb_width if nb_width == nb_height: sizes = zip(widths, heights) elif nb_width == 1: if nb_height > 1: sizes = zip(widths*nb_height, heights) else: sizes = zip(widths, heights) elif nb_height == 1: if nb_width > 1: sizes = zip(widths, heights*nb_width) else: sizes = zip(widths, heights) else: abort(400, u'Number of widths and heights should be the same') # Max number of pages to compile try: params['max_previews'] = int(request.args.get('pages', 20)) except: params['max_previews'] = 20 params['sizes'] = sizes message = json.dumps(params) redis.rpush(REDIS_QUEUE_KEY, message) return "Job added to queue"
Python
0
@@ -2150,21 +2150,29 @@ zes'%5D = +sorted( sizes +) %0A %0A
68d54aec7a44aeae72f63155e0e0e11e6dd658a1
Add test for invalid option
tests/chainer_tests/links_tests/loss_tests/test_negative_sampling.py
tests/chainer_tests/links_tests/loss_tests/test_negative_sampling.py
import unittest import numpy import chainer from chainer import cuda from chainer.functions.loss import negative_sampling from chainer import gradient_check from chainer import links from chainer import testing from chainer.testing import attr from chainer.testing import condition @testing.parameterize(*testing.product({ 't': [[0, 2], [-1, 1, 2]], 'reduce': ['sum', 'no'], })) class TestNegativeSampling(unittest.TestCase): in_size = 3 sample_size = 2 def setUp(self): batch = len(self.t) x_shape = (batch, self.in_size) self.link = links.NegativeSampling( self.in_size, [10, 5, 2, 5, 2], self.sample_size) self.link.cleargrads() self.x = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32) self.t = numpy.array(self.t).astype(numpy.int32) if self.reduce == 'no': g_shape = self.t.shape elif self.reduce == 'sum': g_shape = () self.gy = numpy.random.uniform(-1, 1, g_shape).astype(numpy.float32) def check_forward(self, x_data, t_data): x = chainer.Variable(x_data) t = chainer.Variable(t_data) y = self.link(x, t, reduce=self.reduce) self.assertEqual(y.shape, self.gy.shape) W = cuda.to_cpu(self.link.W.data) samples = cuda.to_cpu(y.creator.samples) loss = numpy.empty((len(self.x),), numpy.float32) for i in range(len(self.x)): ix = self.x[i] it = self.t[i] if it == -1: loss[i] = 0 else: w = W[samples[i]] f = w.dot(ix) # first one is positive example f[0] *= -1 loss[i] = numpy.logaddexp(f, 0).sum() if self.reduce == 'sum': loss = loss.sum() testing.assert_allclose(y.data, loss) def test_forward_cpu(self): self.check_forward(self.x, self.t) @attr.gpu def test_forward_gpu(self): self.link.to_gpu() self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t)) def check_backward(self, x_data, t_data, w_data, sample, y_grad): t = chainer.Variable(t_data) # `__call__` method of `NegativeSampling` link cannot be tested with # `check_backward` because the link makes different samples on each # call. ns = negative_sampling.NegativeSamplingFunction( sample, self.link.sample_size, self.reduce) def f(x, w): return ns(x, t, w) gradient_check.check_backward( f, (x_data, w_data), y_grad, eps=1e-2, atol=1e-4, rtol=1e-4) def test_backward_cpu(self): self.check_backward( self.x, self.t, self.link.W.data, self.link.sampler.sample, self.gy) @attr.gpu def test_backward_gpu(self): self.link.to_gpu() self.check_backward( cuda.to_gpu(self.x), cuda.to_gpu(self.t), self.link.W.data, self.link.sampler.sample, cuda.to_gpu(self.gy)) @attr.gpu def test_to_cpu(self): self.link.to_gpu() self.assertTrue(self.link.sampler.use_gpu) self.link.to_cpu() self.assertFalse(self.link.sampler.use_gpu) testing.run_module(__name__, __file__)
Python
0.000004
@@ -3223,16 +3223,722 @@ _gpu)%0A%0A%0A +class TestNegativeSamplingInvalidReductionOption(unittest.TestCase):%0A%0A def setUp(self):%0A self.link = links.NegativeSampling(3, %5B10, 5, 2, 5, 2%5D, 3)%0A self.x = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)%0A self.t = numpy.random.randint(0, 2, (2,)).astype(numpy.int32)%0A%0A def check_invalid_option(self, xp):%0A x = xp.asarray(self.x)%0A t = xp.asarray(self.t)%0A%0A with self.assertRaises(ValueError):%0A self.link(x, t, 'invalid_option')%0A%0A def test_invalid_option_cpu(self):%0A self.check_invalid_option(numpy)%0A%0A @attr.gpu%0A def test_invalid_option_gpu(self):%0A self.link.to_gpu()%0A self.check_invalid_option(cuda.cupy)%0A%0A%0A testing.
ea545f205fd495f996b90910857af6e87da14272
update adapter to use new scheme
courses/adapter.py
courses/adapter.py
from courses.models import Semester, Department, Course from ccxp.fetch import Browser def update_departments(browser=None): if browser is None: browser = Browser() new = update = 0 for department in Browser().get_departments(): if Department.objects.filter(abbr=department['abbr']).exists(): dbdep = Department.objects.get(abbr=department['abbr']) dbdep.name_zh = department['name_zh'] dbdep.name_en = department['name_en'] update += 1 else: Department.objects.create(**department) new += 1 print(new, 'departments created,', update, 'updated.') return browser def update_semester(browser=None, semester_code=None): if browser is None: browser = Browser() print(browser.get_captcha_url()) browser.set_captcha(input('Input captcha from above url: ')) if semester_code is not None: browser.set_semester(semester_code) browser_semester = browser.get_current_semester() print(browser_semester) departments = dict() courses = dict() for department in Department.objects.all(): cbd = browser.get_courses_by_department(department.abbr) departments[department.abbr] = [c['no'] for c in cbd] courses.update((c['no'], c) for c in cbd) print( 'Collecting courses from', format(department.abbr, '4'), '...', len(courses), end='\r') print() semester = Semester.objects.create(**browser_semester) try: for n, course in enumerate(courses.values()): semester.course_set.create(**course) print('Updating courses', '...', n, end='\r') print() for n, (department, course_nos) in enumerate(departments.items()): courses = semester.course_set.filter(no__in=course_nos) ThroughModel = Course.departments.through ThroughModel.objects.bulk_create( ThroughModel( department=Department.objects.get(abbr=department), course=course, ) for course in courses ) print('Updating department data', '...', n, end='\r') print() semester.ready = True semester.save() except: semester.delete() raise else: Semester.objects.filter( value=semester.value).exclude( pk=semester.pk).delete() return browser
Python
0
@@ -74,32 +74,227 @@ mport Browser%0A%0A%0A +def get_browser(browser=None):%0A if browser is None:%0A browser = Browser()%0A print(browser.get_captcha_url())%0A browser.set_captcha(input('Input captcha from above url: '))%0A%0A%0A def update_depar @@ -410,25 +410,23 @@ ment in -B +b rowser -() .get_dep @@ -847,35 +847,16 @@ ated.')%0A - return browser%0A %0A%0Adef up @@ -868,16 +868,17 @@ semester +s (browser @@ -886,28 +886,8 @@ None -, semester_code=None ):%0A @@ -945,112 +945,617 @@ - print(browser.get_captcha_url())%0A browser.set_captcha(input('Input captcha from above url: ') +new = update = 0%0A for semester in browser.get_semesters():%0A if Semester.objects.filter(value=semester%5B'value'%5D).exists():%0A dbsem = Semester.objects.get(value=semester.pop('value'))%0A for key, value in semester.items():%0A setattr(dbsem, key, value)%0A update += 1%0A else:%0A Semester.objects.create(**semester)%0A new += 1%0A print(new, 'semesters created,', update, 'updated.')%0A%0A%0Adef update_semester(browser=None, semester_code=None):%0A browser = get_browser(browser)%0A update_departments(browser)%0A update_semesters(browser )%0A @@ -1708,24 +1708,81 @@ r_semester)%0A + semester = Semester.objects.get(value=semester_code)%0A departme @@ -2223,36 +2223,42 @@ semester +_entry = -S +s emester. objects.crea @@ -2249,41 +2249,33 @@ ter. -objects.create(**browser_semester +semesterentry_set.create( )%0A @@ -2347,32 +2347,38 @@ semester +_entry .course_set.crea @@ -2569,16 +2569,22 @@ semester +_entry .course_ @@ -3018,16 +3018,22 @@ semester +_entry .ready = @@ -3054,16 +3054,22 @@ semester +_entry .save()%0A @@ -3096,16 +3096,22 @@ semester +_entry .delete( @@ -3140,33 +3140,33 @@ se:%0A -S +s emester. objects.filt @@ -3145,39 +3145,49 @@ semester. -objects +semesterentry_set .filter(%0A @@ -3191,21 +3191,24 @@ -value +semester =semeste @@ -3208,22 +3208,16 @@ semester -.value ).exclud @@ -3242,16 +3242,22 @@ semester +_entry .pk).del
044edd3d9e6d14b8394a881fdb6389923e73545a
Add back system_health_info to the base of lovelace (#43382)
homeassistant/components/lovelace/__init__.py
homeassistant/components/lovelace/__init__.py
"""Support for the Lovelace UI.""" import logging import voluptuous as vol from homeassistant.components import frontend from homeassistant.config import async_hass_config_yaml, async_process_component_config from homeassistant.const import CONF_FILENAME from homeassistant.core import callback from homeassistant.exceptions import HomeAssistantError from homeassistant.helpers import collection, config_validation as cv from homeassistant.helpers.service import async_register_admin_service from homeassistant.helpers.typing import ConfigType, HomeAssistantType, ServiceCallType from homeassistant.loader import async_get_integration from homeassistant.util import sanitize_path from . import dashboard, resources, websocket from .const import ( CONF_ICON, CONF_MODE, CONF_REQUIRE_ADMIN, CONF_RESOURCES, CONF_SHOW_IN_SIDEBAR, CONF_TITLE, CONF_URL_PATH, DASHBOARD_BASE_CREATE_FIELDS, DEFAULT_ICON, DOMAIN, MODE_STORAGE, MODE_YAML, RESOURCE_CREATE_FIELDS, RESOURCE_RELOAD_SERVICE_SCHEMA, RESOURCE_SCHEMA, RESOURCE_UPDATE_FIELDS, SERVICE_RELOAD_RESOURCES, STORAGE_DASHBOARD_CREATE_FIELDS, STORAGE_DASHBOARD_UPDATE_FIELDS, url_slug, ) _LOGGER = logging.getLogger(__name__) CONF_DASHBOARDS = "dashboards" YAML_DASHBOARD_SCHEMA = vol.Schema( { **DASHBOARD_BASE_CREATE_FIELDS, vol.Required(CONF_MODE): MODE_YAML, vol.Required(CONF_FILENAME): vol.All(cv.string, sanitize_path), } ) CONFIG_SCHEMA = vol.Schema( { vol.Optional(DOMAIN, default={}): vol.Schema( { vol.Optional(CONF_MODE, default=MODE_STORAGE): vol.All( vol.Lower, vol.In([MODE_YAML, MODE_STORAGE]) ), vol.Optional(CONF_DASHBOARDS): cv.schema_with_slug_keys( YAML_DASHBOARD_SCHEMA, slug_validator=url_slug, ), vol.Optional(CONF_RESOURCES): [RESOURCE_SCHEMA], } ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass: HomeAssistantType, config: ConfigType): """Set up the Lovelace commands.""" mode = config[DOMAIN][CONF_MODE] yaml_resources = config[DOMAIN].get(CONF_RESOURCES) frontend.async_register_built_in_panel(hass, DOMAIN, config={"mode": mode}) async def reload_resources_service_handler(service_call: ServiceCallType) -> None: """Reload yaml resources.""" try: conf = await async_hass_config_yaml(hass) except HomeAssistantError as err: _LOGGER.error(err) return integration = await async_get_integration(hass, DOMAIN) config = await async_process_component_config(hass, conf, integration) resource_collection = await create_yaml_resource_col( hass, config[DOMAIN].get(CONF_RESOURCES) ) hass.data[DOMAIN]["resources"] = resource_collection if mode == MODE_YAML: default_config = dashboard.LovelaceYAML(hass, None, None) resource_collection = await create_yaml_resource_col(hass, yaml_resources) async_register_admin_service( hass, DOMAIN, SERVICE_RELOAD_RESOURCES, reload_resources_service_handler, schema=RESOURCE_RELOAD_SERVICE_SCHEMA, ) else: default_config = dashboard.LovelaceStorage(hass, None) if yaml_resources is not None: _LOGGER.warning( "Lovelace is running in storage mode. Define resources via user interface" ) resource_collection = resources.ResourceStorageCollection(hass, default_config) collection.StorageCollectionWebsocket( resource_collection, "lovelace/resources", "resource", RESOURCE_CREATE_FIELDS, RESOURCE_UPDATE_FIELDS, ).async_setup(hass, create_list=False) hass.components.websocket_api.async_register_command( websocket.websocket_lovelace_config ) hass.components.websocket_api.async_register_command( websocket.websocket_lovelace_save_config ) hass.components.websocket_api.async_register_command( websocket.websocket_lovelace_delete_config ) hass.components.websocket_api.async_register_command( websocket.websocket_lovelace_resources ) hass.components.websocket_api.async_register_command( websocket.websocket_lovelace_dashboards ) hass.data[DOMAIN] = { # We store a dictionary mapping url_path: config. None is the default. "dashboards": {None: default_config}, "resources": resource_collection, "yaml_dashboards": config[DOMAIN].get(CONF_DASHBOARDS, {}), } if hass.config.safe_mode: return True async def storage_dashboard_changed(change_type, item_id, item): """Handle a storage dashboard change.""" url_path = item[CONF_URL_PATH] if change_type == collection.CHANGE_REMOVED: frontend.async_remove_panel(hass, url_path) await hass.data[DOMAIN]["dashboards"].pop(url_path).async_delete() return if change_type == collection.CHANGE_ADDED: existing = hass.data[DOMAIN]["dashboards"].get(url_path) if existing: _LOGGER.warning( "Cannot register panel at %s, it is already defined in %s", url_path, existing, ) return hass.data[DOMAIN]["dashboards"][url_path] = dashboard.LovelaceStorage( hass, item ) update = False else: hass.data[DOMAIN]["dashboards"][url_path].config = item update = True try: _register_panel(hass, url_path, MODE_STORAGE, item, update) except ValueError: _LOGGER.warning("Failed to %s panel %s from storage", change_type, url_path) # Process YAML dashboards for url_path, dashboard_conf in hass.data[DOMAIN]["yaml_dashboards"].items(): # For now always mode=yaml config = dashboard.LovelaceYAML(hass, url_path, dashboard_conf) hass.data[DOMAIN]["dashboards"][url_path] = config try: _register_panel(hass, url_path, MODE_YAML, dashboard_conf, False) except ValueError: _LOGGER.warning("Panel url path %s is not unique", url_path) # Process storage dashboards dashboards_collection = dashboard.DashboardsCollection(hass) dashboards_collection.async_add_listener(storage_dashboard_changed) await dashboards_collection.async_load() collection.StorageCollectionWebsocket( dashboards_collection, "lovelace/dashboards", "dashboard", STORAGE_DASHBOARD_CREATE_FIELDS, STORAGE_DASHBOARD_UPDATE_FIELDS, ).async_setup(hass, create_list=False) return True async def create_yaml_resource_col(hass, yaml_resources): """Create yaml resources collection.""" if yaml_resources is None: default_config = dashboard.LovelaceYAML(hass, None, None) try: ll_conf = await default_config.async_load(False) except HomeAssistantError: pass else: if CONF_RESOURCES in ll_conf: _LOGGER.warning( "Resources need to be specified in your configuration.yaml. Please see the docs" ) yaml_resources = ll_conf[CONF_RESOURCES] return resources.ResourceYAMLCollection(yaml_resources or []) @callback def _register_panel(hass, url_path, mode, config, update): """Register a panel.""" kwargs = { "frontend_url_path": url_path, "require_admin": config[CONF_REQUIRE_ADMIN], "config": {"mode": mode}, "update": update, } if config[CONF_SHOW_IN_SIDEBAR]: kwargs["sidebar_title"] = config[CONF_TITLE] kwargs["sidebar_icon"] = config.get(CONF_ICON, DEFAULT_ICON) frontend.async_register_built_in_panel(hass, DOMAIN, **kwargs)
Python
0
@@ -1207,16 +1207,70 @@ _slug,%0A) +%0Afrom .system_health import system_health_info # NOQA %0A%0A_LOGGE
1eb90901e936a93ab2f5353080df9bad8c5d44bd
Update class comment
htmresearch/regions/CoordinateSensorRegion.py
htmresearch/regions/CoordinateSensorRegion.py
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2016, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import numpy from collections import deque from nupic.bindings.regions.PyRegion import PyRegion from nupic.encoders.coordinate import CoordinateEncoder class CoordinateSensorRegion(PyRegion): """ CoordinateSensorRegion is a simple sensor for sending coordinate data into networks. It accepts data using the command "addDataToQueue" or through the function addDataToQueue() which can be called directly from Python. Data is queued up in a FIFO and each call to compute pops the top element. Each data record consists of the coordinate in an N-dimensional integer coordinate space, a 0/1 reset flag, and an integer sequence ID. """ def __init__(self, activeBits=21, outputWidth=1000, radius=2, verbosity=0): self.verbosity = verbosity self.activeBits = activeBits self.outputWidth = outputWidth self.radius = radius self.queue = deque() self.encoder = CoordinateEncoder(n=self.outputWidth, w=self.activeBits, verbosity=self.verbosity) @classmethod def getSpec(cls): """Return base spec for this region. See base class method for more info.""" spec = { "description": CoordinateSensorRegion.__doc__, "singleNodeOnly": True, "inputs": {}, # input data is added to queue via "addDataToQueue" command "outputs": { "dataOut": { "description": "Encoded coordinate SDR.", "dataType": "uint", "count": 0, "regionLevel": True, "isDefaultOutput": True, }, "resetOut": { "description": "0/1 reset flag output.", "dataType": "uint", "count": 1, "regionLevel": True, "isDefaultOutput": False, }, "sequenceIdOut": { "description": "Sequence ID", "dataType": "uint", "count": 1, "regionLevel": True, "isDefaultOutput": False, }, }, "parameters": { "activeBits": { "description": "The number of bits that are set to encode a single " "coordinate value", "dataType": "uint", "accessMode": "ReadWrite", "count": 1, "defaultValue": 21 }, "outputWidth": { "description": "Size of output vector", "dataType": "uint", "accessMode": "ReadWrite", "count": 1, "defaultValue": 1000 }, "radius": { "description": "Radius around 'coordinate'", "dataType": "uint", "accessMode": "ReadWrite", "count": 1, "defaultValue": 2 }, "verbosity": { "description": "Verbosity level", "dataType": "uint", "accessMode": "ReadWrite", "count": 1 }, }, "commands": { "addDataToQueue": { "description": CoordinateSensorRegion.addDataToQueue.__doc__, }, "addResetToQueue": { "description": CoordinateSensorRegion.addResetToQueue.__doc__, } }, } return spec def compute(self, inputs, outputs): """ Get the next record from the queue and encode it. @param inputs This parameter is ignored. The data comes from the queue @param outputs See definition in the spec above. """ if len(self.queue) > 0: data = self.queue.pop() else: raise Exception("CoordinateSensor: No data to encode: queue is empty") outputs["resetOut"][0] = data["reset"] outputs["sequenceIdOut"][0] = data["sequenceId"] sdr = self.encoder.encode((numpy.array(data["coordinate"]), self.radius)) outputs["dataOut"][:] = sdr if self.verbosity > 1: print "CoordinateSensor outputs:" print "sequenceIdOut: ", outputs["sequenceIdOut"] print "resetOut: ", outputs["resetOut"] print "dataOut: ", outputs["dataOut"].nonzero()[0] def addDataToQueue(self, coordinate, reset, sequenceId): """ Add the given data item to the sensor's internal queue. Calls to compute will cause items in the queue to be dequeued in FIFO order. @param coordinate A list containing the N-dimensional integer coordinate space to be encoded. This list can be specified in two ways, as a python list of integers or as a string which can evaluate to a python list of integers. @param reset An int or string that is 0 or 1. resetOut will be set to this value when this item is computed. @param sequenceId An int or string with an integer ID associated with this token and its sequence (document). """ if type(coordinate) == type(""): coordinateList = eval(coordinate) elif type(coordinate) == type([]): coordinateList = coordinate else: raise Exception("CoordinateSensor.addDataToQueue: unknown type for " "coordinate") self.queue.appendleft({ "sequenceId": int(sequenceId), "reset": int(reset), "coordinate": coordinateList, }) def addResetToQueue(self, sequenceId): """ Add a reset signal to the sensor's internal queue. Calls to compute will cause items in the queue to be dequeued in FIFO order. @param sequenceId An int or string with an integer ID associated with this token and its sequence (document). """ self.queue.appendleft({ "sequenceId": int(sequenceId), "reset": 1, "coordinate": [], }) def getOutputElementCount(self, name): """Returns the width of dataOut.""" if name == "resetOut" or name == "sequenceIdOut": # Should never actually be called since output size is specified in spec return 1 elif name == "dataOut": return self.outputWidth else: raise Exception("Unknown output {}.".format(name)) def initialize(self, inputs, outputs): """ Initialize the Region - nothing to do here. """ pass
Python
0
@@ -1258,16 +1258,48 @@ networks + using NuPIC's CoordinateEncoder .%0A%0A It
7eed2f61039a87f4163aef949740e1629378bd35
That should be a string
src/subreddits.py
src/subreddits.py
import logging.handlers from datetime import datetime from datetime import timedelta import database import globals import reddit import strings import utility log = logging.getLogger("bot") def processSubreddits(): subredditsCount = 0 postsCount = 0 messagesSent = 0 foundPosts = [] for subreddits in database.getSubscribedSubreddits(): subPostsCount = 0 startTimestamp = datetime.utcnow() earliestDatetime = datetime.utcnow() subredditsStrings = [] for subreddit in subreddits: subredditsStrings.append(subreddit['subreddit']) subredditDatetime = datetime.strptime(subreddit['lastChecked'], "%Y-%m-%d %H:%M:%S") if earliestDatetime - subredditDatetime < timedelta(seconds=0): earliestDatetime = subredditDatetime subredditsCount += 1 subredditString = '+'.join(subredditsStrings) log.debug("Searching subreddit group: "+subredditString) submissions = [] hitEnd = True for submission in reddit.getSubredditSubmissions(subredditString): log.debug("Found submission "+str(submission.id)+" in subreddit "+submission.subreddit) submissionCreated = datetime.utcfromtimestamp(submission.created_utc) if submissionCreated < earliestDatetime: hitEnd = False break submissions.append({'id': submission.id, 'dateCreated': submissionCreated, 'author': str(submission.author).lower(), 'link': "https://www.reddit.com"+submission.permalink, 'submission': submission, 'subreddit': submission.subreddit}) if len(submissions) % 50 == 0: log.info("Posts searched: "+str(len(submissions))) if hitEnd and len(submissions): log.info("Messaging owner that that we might have missed a post in /r/"+subredditString) strList = strings.possibleMissedPostMessage(submissions[len(submissions) - 1]['dateCreated'], earliestDatetime, subredditString) strList.append("\n\n*****\n\n") strList.append(strings.footer) if not reddit.sendMessage(globals.OWNER_NAME, "Missed Post", ''.join(strList)): log.warning("Could not send message to owner that we might have missed a post") if len(submissions): for submission in submissions: postsCount += 1 subPostsCount += 1 foundPosts.append(submission['id']) passesSubFilter = utility.passesFilter(submission['submission'], database.getFilter(submission['subreddit'])) if database.isPrompt(submission['author'], submission['subreddit']) and passesSubFilter and \ not database.isThreadReplied(submission['id']): log.info("Posting a prompt for /u/"+submission['author']+" in /r/"+submission['subreddit']) subredditDefaultSubscribe = database.subredditDefaultSubscribe(submission['subreddit']) promptStrList = strings.promptPublicComment(submission['author'], submission['subreddit']) promptStrList.append("\n\n*****\n\n") promptStrList.append(strings.footer) resultCommentID = reddit.replySubmission(submission['id'], ''.join(promptStrList)) if resultCommentID is not None: database.addThread(submission['id'], resultCommentID, submission['author'], submission['subreddit'], "", datetime.utcnow(), 0, subredditDefaultSubscribe, True) for subscriber in database.getSubredditAuthorSubscriptions(submission['subreddit'], submission['author']): if submission['dateCreated'] >= datetime.strptime(subscriber['lastChecked'], "%Y-%m-%d %H:%M:%S"): if (subscriber['filter'] != "none" and utility.passesFilter(submission, subscriber['filter'])) or \ (subscriber['filter'] == "none" and passesSubFilter): messagesSent += 1 log.info("Messaging /u/%s that /u/%s has posted a new thread in /r/%s: %s", subscriber['subscriber'], submission['author'], submission['subreddit'], submission['id']) strList = strings.alertMessage(submission['author'], submission['subreddit'], submission['link'], subscriber['single']) strList.append("\n\n*****\n\n") strList.append(strings.footer) if reddit.sendMessage(subscriber['subscriber'], strings.messageSubject(subscriber['subscriber']), ''.join(strList)): database.checkRemoveSubscription(subscriber['ID'], subscriber['single'], submission['dateCreated'] + timedelta(0,1)) else: log.warning("Could not send message to /u/%s when sending update", subscriber['subscriber']) for subreddit in subreddits: database.checkSubreddit(subreddit['subreddit'], startTimestamp) #log.debug(str(subPostsCount)+" posts searched in: "+str(round(time.perf_counter() - subStartTime, 3))) return subredditsCount, postsCount, messagesSent, foundPosts
Python
1
@@ -1048,16 +1048,20 @@ eddit %22+ +str( submissi @@ -1073,16 +1073,17 @@ breddit) +) %0A%09%09%09subm @@ -1466,16 +1466,20 @@ eddit': +str( submissi @@ -1490,16 +1490,17 @@ ubreddit +) %7D)%0A%09%09%09if
0116f38160c03939306470127f0489c98aeee954
Update nanomsg build file
shipyard/shipyard/nanomsg/build.py
shipyard/shipyard/nanomsg/build.py
"""Build nanomsg from source.""" from foreman import define_parameter, define_rule, decorate_rule from shipyard import ( ensure_directory, git_clone, run_commands, install_packages, copy_libraries, ) (define_parameter('deps') .with_doc("""Build-time Debian packages.""") .with_type(list) .with_parse(lambda pkgs: pkgs.split(',')) .with_default([ 'build-essential', 'cmake', ]) ) (define_parameter('repo') .with_doc("""Location of source repo.""") .with_type(str) .with_default('https://github.com/nanomsg/nanomsg.git') ) (define_parameter('version') .with_doc("""Version to build.""") .with_type(str) .with_default('1.0.0') ) @decorate_rule('//base:build') def build(parameters): """Build nanomsg from source.""" install_packages(parameters['deps']) build_src = parameters['//base:build_src'] / 'nanomsg' git_clone(parameters['repo'], build_src, parameters['version']) build_dir = build_src / 'build' if not ensure_directory(build_dir): # Don't run `ctest .` at the moment. run_commands(path=build_dir, commands_str=''' cmake .. cmake --build . sudo make install ''') (define_rule('tapeout') .with_doc("""Copy build artifacts.""") .with_build( lambda ps: copy_libraries(ps, '/usr/local/lib', ['libnanomsg'])) .depend('build') .reverse_depend('//base:tapeout') )
Python
0
@@ -1177,20 +1177,66 @@ udo +c make -install +--build . --target install%0A sudo ldconfig %0A
fe3798cf932880b2eac14e86d2652d08fdcbd093
Make method static to make it easier to move later.
src/tdl/client.py
src/tdl/client.py
__author__ = 'tdpreece' __author__ = 'tdpreece' import logging import time import json from collections import OrderedDict import stomp logger = logging.getLogger('tdl.client') logger.addHandler(logging.NullHandler()) class Client(object): def __init__(self, hostname, port, username): self.hostname = hostname self.port = port def go_live_with(self, implementation_map): hosts = [(self.hostname, self.port)] try: conn = stomp.Connection(host_and_ports=hosts) conn.start() listener = MyListener(conn, implementation_map) conn.connect(wait=True) remote_broker = RemoteBroker(conn) remote_broker.subscribe(listener) time.sleep(1) conn.disconnect() except Exception as e: logger.exception('Problem communicating with the broker.') def trial_run_with(self, implementation_map): hosts = [(self.hostname, self.port)] conn = stomp.Connection(host_and_ports=hosts) conn.start() conn.connect(wait=True) listener = PeekListener(conn, implementation_map) remote_broker = RemoteBroker(conn) remote_broker.subscribe(listener) time.sleep(1) conn.disconnect() class Listener(stomp.ConnectionListener): def __init__(self, conn, implementation_map): self.conn = conn self.remote_broker = RemoteBroker(self.conn) self.implementation_map = implementation_map def on_message(self, headers, message): self.process_next_message_from(headers, message) @staticmethod def respond_to(implementation_map, message): decoded_message = json.loads(message) method = decoded_message['method'] params = decoded_message['params'] id = decoded_message['id'] implementation = implementation_map[method] try: result = implementation(params) except Exception as e: logger.info('The user implementation has thrown an exception: {}'.format(e.message)) result = None params_str = ", ".join([str(p) for p in params]) print('id = {id}, req = {method}({params}), resp = {result}'.format(id=id, method=method, params=params_str, result=result)) if result is not None: response = OrderedDict([ ('result', result), ('error', None), ('id', id), ]) return response class MyListener(Listener): def process_next_message_from(self, headers, message): response = self.respond_to(self.implementation_map, message) if response is not None: self.remote_broker.acknowledge(headers) self.remote_broker.publish(response) class PeekListener(Listener): def process_next_message_from(self, headers, message): self.respond_to(self.implementation_map, message) class RemoteBroker(object): def __init__(self, conn): self.conn = conn def acknowledge(self, headers): self.conn.ack(headers['message-id'], headers['subscription']) def publish(self, response): self.conn.send( body=json.dumps(response, separators=(',', ':')), destination='test.resp' ) def subscribe(self, listener): self.conn.set_listener('listener', listener) self.conn.subscribe(destination='test.req', id=1, ack='client-individual')
Python
0
@@ -1585,16 +1585,61 @@ ge_from( +self.implementation_map, self.remote_broker, headers, @@ -2638,32 +2638,50 @@ ener(Listener):%0A + @staticmethod%0A def process_ @@ -2690,36 +2690,65 @@ xt_message_from( -self +implementation_map, remote_broker , headers, messa @@ -2771,20 +2771,24 @@ ponse = -self +Listener .respond @@ -2783,37 +2783,32 @@ ener.respond_to( -self. implementation_m @@ -2857,37 +2857,32 @@ ne:%0A -self. remote_broker.ac @@ -2904,37 +2904,32 @@ rs)%0A -self. remote_broker.pu @@ -2968,32 +2968,50 @@ ener(Listener):%0A + @staticmethod%0A def process_ @@ -3028,20 +3028,49 @@ ge_from( -self +implementation_map, remote_broker , header @@ -3090,20 +3090,24 @@ -self +Listener .respond @@ -3110,21 +3110,16 @@ pond_to( -self. implemen
535b1ea375465713f7505016e40d3fcd7533341f
write dtm
lexos/models/similarity_model.py
lexos/models/similarity_model.py
import os import numpy as np import pandas as pd from os import makedirs from flask import request from typing import Optional from os.path import join as path_join from sklearn.metrics.pairwise import cosine_similarity from lexos.helpers import constants from lexos.models.base_model import BaseModel from lexos.helpers.error_messages import NON_NEGATIVE_INDEX_MESSAGE from lexos.models.matrix_model import MatrixModel from lexos.receivers.matrix_receiver import IdTempLabelMap from lexos.receivers.session_receiver import SessionReceiver from lexos.receivers.similarity_receiver import SimilarityOption, \ SimilarityReceiver class SimilarityModel(BaseModel): def __init__(self, test_dtm: Optional[pd.DataFrame] = None, test_option: Optional[SimilarityOption] = None, test_id_temp_label_map: Optional[IdTempLabelMap] = None): """This is the class to generate similarity. :param test_dtm: (fake parameter) the doc term matrix used for testing. :param test_option: (fake parameter) the similarity option used for testing. :param test_id_temp_label_map: (fake parameter) the id temp label map used for testing. """ super().__init__() self._test_dtm = test_dtm self._test_option = test_option self._test_id_temp_label_map = test_id_temp_label_map @property def _doc_term_matrix(self) -> pd.DataFrame: """:return: the document term matrix.""" return self._test_dtm if self._test_dtm is not None \ else MatrixModel().get_matrix() @property def _id_temp_label_map(self) -> IdTempLabelMap: """:return: a map takes an id to temp labels.""" return self._test_id_temp_label_map \ if self._test_id_temp_label_map is not None \ else MatrixModel().get_temp_label_id_map() @property def _similarity_option(self) -> SimilarityOption: """:return: the similarity option.""" return self._test_option if self._test_option is not None \ else SimilarityReceiver().options_from_front_end() def _similarity_maker(self) -> pd.DataFrame: """this function generate the result of cos-similarity between files :return: docs_score: a parallel list with `docs_name`, is an array of the cos-similarity distance :return: docs_name: a parallel list with `docs_score`, is an array of the name (temp labels) """ # precondition assert self._similarity_option.comp_file_id >= 0, \ NON_NEGATIVE_INDEX_MESSAGE # get cosine_similarity dist = 1 - cosine_similarity(self._doc_term_matrix.values) # get index of selected file in the DTM selected_index = np.where(self._doc_term_matrix.index == self._similarity_option.comp_file_id)[0][0] # get an array of compared file indexes other_indexes = np.where(self._doc_term_matrix.index != self._similarity_option.comp_file_id)[0] # construct an array of scores docs_score_array = np.asarray([dist[file_index, selected_index] for file_index in other_indexes]) # construct an array of names compared_file_labels = np.asarray( [self._id_temp_label_map[file_id] for file_id in self._doc_term_matrix.index.values if file_id != self._similarity_option.comp_file_id]) # sort and round the score array final_score_array = np.round(np.sort(docs_score_array), decimals=4) # sort the name array to correctly map the score array final_name_array = compared_file_labels[docs_score_array.argsort()] # pack the scores and names in data_frame score_name_data_frame = pd.DataFrame(final_score_array, index=final_name_array, columns=["Cosine similarity"]) return score_name_data_frame def get_similarity_score(self) -> str: """This function returns similarity scores as a string""" scores = np.concatenate(self._similarity_maker().values) scores_list = '***'.join(str(score) for score in scores) + '***' return scores_list def get_similarity_label(self) -> str: """This function returns similarity compared labels as a string""" labels = np.array(self._similarity_maker().index) labels_list = '***'.join(name for name in labels) + '***' return labels_list def _generate_sims_csv(self): delimiter = ',' selected_file_name = self._id_temp_label_map[self._similarity_option.comp_file_id] # get the path of the folder to save result folder_path = path_join(SessionReceiver().get_session_folder(), constants.RESULTS_FOLDER) if not os.path.isdir(folder_path): makedirs(folder_path) # get the saved file path out_file_path = path_join(folder_path, 'results.csv') # write the header to the file with open(out_file_path, 'w') as out_file: out_file.write("Similarity Rankings:" + '\n') out_file.write( "The rankings are determined by 'distance between documents' " "where small distances (near zero) represent documents that " "are 'similar' and unlike documents have distances closer to " "one.\n") out_file.write("Selected Comparison Document: " + delimiter + selected_file_name + '\n')
Python
0.999971
@@ -5816,9 +5816,180 @@ '%5Cn')%0A%0A + # append the pandas data frame to the file%0A with open(out_file_path, 'a') as f:%0A self._similarity_maker().to_csv(f)%0A%0A return out_file_path %0A
22f3b74fec790847c3e353aad84b51252637a90f
Revert "oe.path.relative: switch to a different appraoch"
lib/oe/path.py
lib/oe/path.py
def join(*paths): """Like os.path.join but doesn't treat absolute RHS specially""" from os import sep from os.path import normpath return normpath(sep.join(paths)) def relative(src, dest=None): """ Return a relative path from src to dest(default=cwd). >>> relative("/usr/bin", "/tmp/foo/bar") ../../tmp/foo/bar >>> relative("/usr/bin", "/usr/lib") ../lib >>> relative("/tmp", "/tmp/foo/bar") foo/bar """ if dest is None: dest = getcwd() if hasattr(os.path, "relpath"): return os.path.relpath(dest, src) else: from os import getcwd, sep from os.path import abspath, normpath srclist = abspath(src).split(sep) destlist = abspath(dest).split(sep) loc = [spath == dpath for spath, dpath in zip(srclist, destlist)].index(False) rellist = ([ ".." ] * (len(srclist) - loc)) + destlist[loc:] return sep.join(rellist) def format_display(path, metadata): """ Prepare a path for display to the user. """ rel = relative(metadata.getVar("TOPDIR", 1), path) if len(rel) > len(path): return path else: return rel
Python
0
@@ -88,60 +88,22 @@ -from os import sep%0A from os.path import normpath%0A +import os.path %0A @@ -110,16 +110,24 @@ return +os.path. normpath @@ -127,19 +127,19 @@ ormpath( -sep +%22/%22 .join(pa @@ -171,13 +171,8 @@ dest -=None ):%0A @@ -221,21 +221,8 @@ dest -(default=cwd) .%0A%0A @@ -411,47 +411,21 @@ i -f dest is None:%0A dest = getcwd() +mport os.path %0A%0A @@ -522,81 +522,60 @@ -from os import getcwd, sep%0A from os.path import abspath, normpath%0A +destlist = os.path.normpath(dest).split(os.path.sep) %0A @@ -585,27 +585,36 @@ srclist = -abs +os.path.norm path(src).sp @@ -617,29 +617,38 @@ ).split( +os.path. sep)%0A +%0A destlist @@ -643,130 +643,192 @@ -destlist = abspath(dest).split(sep)%0A loc = %5Bspath == dpath for spath, dpath in zip(srclist, destlist)%5D.index(False) +# Find common section of the path%0A common = os.path.commonprefix(%5Bdestlist, srclist%5D)%0A commonlen = len(common)%0A%0A # Climb back to the point where they differentiate %0A @@ -839,22 +839,23 @@ rel -list +path = -( %5B -%22..%22 +pardir %5D * @@ -875,15 +875,115 @@ ) - -loc)) +commonlen)%0A if commonlen %3C len(destlist):%0A # Add remaining portion%0A relpath + += des @@ -992,14 +992,21 @@ ist%5B -loc +commonlen :%5D%0A +%0A @@ -1028,20 +1028,20 @@ join(rel -list +path )%0A%0Adef f
0f004830bd220ad8da1d4b151897630431d2f195
tweak scoring functions, always
cryptools/crack.py
cryptools/crack.py
# -*- coding: utf-8 -*- import string from stringutils import convert, freq def brute_xor(cyphertext, st_freqs): """Bruteforce a given single-character XOR-encrypted cyphertext. Statistical information is used to choose which character is the most likely key. :param cyphertext: the cyphertext to crack :param st_freqs: a Counter of standard frequencies in the target language :return: ``(key, message, distance)`` """ # standard frequency counts st_keys = st_freqs.keys() st_len = len(st_keys) # store a map of each candidate and a simple frequency score topchoice = None lowdist = float('inf') key = None # bruteforce for each character for test in (string.letters + string.digits): dec = convert.xor(test, cyphertext) cand_freqs = freq.get_freqs(freq.char_count(dec.lower())) cand_keys = cand_freqs.keys() distance = 0.0 for c in cand_freqs: # use two classifiers, based on pos'n in std freq list & freq dist try: st_in = st_keys.index(c) except ValueError: st_in = st_len distance += abs(cand_keys.index(c) - st_in) distance += abs(st_freqs[c] - cand_freqs[c]) * 100 if lowdist > distance: lowdist = distance topchoice = dec key = test return key, topchoice, lowdist
Python
0
@@ -17,16 +17,28 @@ f-8 -*-%0A +import math%0A import s @@ -437,23 +437,20 @@ essage, -distanc +scor e)%60%60%0A @@ -914,23 +914,20 @@ -distanc +scor e = 0.0%0A @@ -973,72 +973,15 @@ # -use two classifiers, based on pos'n in std freq list & freq dist +scoring %0A @@ -1097,16 +1097,17 @@ st_len%0A +%0A @@ -1102,39 +1102,80 @@ en%0A%0A -distanc +# find better scoring functions%0A scor e += abs(cand_ke @@ -1207,23 +1207,20 @@ -distanc +scor e += abs @@ -1277,23 +1277,20 @@ wdist %3E -distanc +scor e:%0A @@ -1310,15 +1310,12 @@ t = -distanc +scor e%0A
d48099080cedc81e70f79cbf45514cd77c5329eb
fix recorder bug
uliweb/contrib/recorder/middle_recorder.py
uliweb/contrib/recorder/middle_recorder.py
from uliweb import Middleware from uliweb.utils.common import request_url class RecorderrMiddle(Middleware): ORDER = 600 def process_response(self, request, response): from uliweb import settings, functions, json_dumps import base64 #if not debug status it'll quit if not settings.get_var('GLOBAL/DEBUG'): return response S = functions.get_model('uliwebrecorderstatus') s = S.all().one() if s and s.status == 'E': return response if settings.get_var('ULIWEBRECORDER/response_text'): text = response.data else: text = '' #test if post_data need to convert base64 if not request.content_type: post_data_is_text = True else: post_data_is_text = self.test_text(request.content_type) if not post_data_is_text: post_data = base64.encodestring(request.data) else: post_data = json_dumps(request.POST.to_dict()) #test if response.data need to convert base64 response_data_is_text = self.test_text(response.content_type) if not response_data_is_text: response_data = base64.encodestring(text) else: response_data = text R = functions.get_model('uliwebrecorder') if request.user: user_id = request.user.id else: user_id = None recorder = R(method=request.method, url=request_url(request), post_data_is_text=post_data_is_text, post_data=post_data, user=user_id, response_data=response_data, response_data_is_text=response_data_is_text, status_code=response.status_code, ) recorder.save() return response def test_text(self, content_type): from uliweb.utils.common import match from uliweb import settings m = content_type.split(';', 1)[0] r = match(m, settings.get_var('ULIWEBRECORDER/text_content_types')) return r
Python
0.000001
@@ -488,13 +488,16 @@ if -s and +not s or s.s
f9cc0155ba076f73d63d2106373371044b5f144d
add testsrequired for skipModule
lib/runtest.py
lib/runtest.py
#!/usr/bin/env python # Copyright (C) 2015 Intel Corporation # # Released under the MIT license (see COPYING.MIT) # ./runtest.py -b build_data.json -a tag -f test.manifest import sys import os import time import unittest import inspect from functools import wraps BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__))) sys.path.append(os.path.join(BASEDIR, "oeqa")) sys.path.append(os.path.join(BASEDIR, "bitbake", "lib")) from optparse import OptionParser from oeqa.oetest import oeTest from oeqa.oetest import oeRuntimeTest from oeqa.oetest import runTests from oeqa.runexported import FakeTarget from oeqa.runexported import MyDataDict from oeqa.runexported import TestContext from oeqa.utils.sshcontrol import SSHControl from oeqa.utils.decorators import gettag try: import simplejson as json except ImportError: import json def setUp(self): pass oeRuntimeTest.setUp = setUp def wrap_runner(runner, *wargs, **wkwargs): @wraps(runner) def __wrapper(*args, **kwargs): # args and kwargs will overwrite the wargs and wkwargs _args = list(args) _args.extend(wargs[len(args):] if len(wargs) > len(args) else []) kw = wkwargs.copy() kw.update(kwargs) return runner(*_args, **kw) return __wrapper def main(): usage = "usage: %prog [options]" parser = OptionParser(usage=usage) parser.add_option("-t", "--target-ip", dest="ip", help="The IP address of the target machine. Use this to \ overwrite the value determined from TEST_TARGET_IP at build time") parser.add_option("-s", "--server-ip", dest="server_ip", help="The IP address of this machine. Use this to \ overwrite the value determined from TEST_SERVER_IP at build time.") parser.add_option("-d", "--deploy-dir", dest="deploy_dir", default=os.path.join(BASEDIR, "deploy"), help="Full path to the package feeds, that this \ the contents of what used to be DEPLOY_DIR on the build machine. \ If not specified it will use the value specified in the json if \ that directory actually exists or it will error out.") parser.add_option("-l", "--log-dir", dest="log_dir", help="This sets the path for TEST_LOG_DIR. If not specified \ the current dir is used. This is used for usually creating a \ ssh log file and a scp test file.") parser.add_option("-f", "--test-manifest", dest="tests_list", help="The test list file") parser.add_option("-b", "--build-data", dest="build_data", help="The build data file.") parser.add_option("-a", "--tag", dest="tag", help="The tags to filter test case") parser.add_option("-m", "--machine", dest="machine", help="""The target machine:quark intel-corei7-64 beaglebone""") parser.add_option("-n", "--nativearch", dest="nativearch", help="The native arch") parser.add_option("-x", "--xunit", dest="xunit", help="Output directory to put results in xUnit XML format") (options, args) = parser.parse_args() tc = TestContext() #inject testcase list tclist = [] if not options.tests_list: options.tests_list = os.path.join(os.path.dirname(__file__), "testplan", "iottest.manifest") for each_manifest in options.tests_list.split(): with open(each_manifest, "r") as f: map(lambda y:tclist.append(y) if y not in tclist else None, filter(lambda x: not x.startswith('#'), [n.strip() for n in f.readlines()]) ) tc.testslist = tclist print tc.testslist deployDir = os.path.abspath(options.deploy_dir) if not os.path.isdir(deployDir): raise Exception("The path to DEPLOY_DIR does not exists: %s" % deployDir) if options.machine: machine = options.machine else: parser.error("Please specify target machine by -m") if options.xunit: try: import xmlrunner except Exception: raise Exception( "xUnit output requested but unittest-xml-reporting not installed") unittest.TextTestRunner = wrap_runner(xmlrunner.XMLTestRunner, output=options.xunit) if options.build_data: build_data = options.build_data else: build_data = os.path.join(deployDir, "files", machine, "builddata.json") #get build data from file with open(build_data, "r") as f: loaded = json.load(f) #inject build datastore d = MyDataDict() if loaded.has_key("d"): for key in loaded["d"].keys(): d[key] = loaded["d"][key] d["DEPLOY_DIR"], d["MACHINE"] = deployDir, machine if options.log_dir: d["TEST_LOG_DIR"] = os.path.abspath(options.log_dir) else: d["TEST_LOG_DIR"] = os.path.abspath(os.path.dirname(__file__)) navarch = os.popen("uname -m").read().strip() d["BUILD_ARCH"] = "x86_64" if not navarch else navarch if options.nativearch: d["BUILD_ARCH"] = options.nativearch setattr(tc, "d", d) #inject build package manifest pkgs = [pname.strip() for pname in loaded["pkgmanifest"]] setattr(tc, "pkgmanifest", " ".join(pkgs)) #inject target information target = FakeTarget(d) target.ip = options.ip if options.ip else "192.168.7.2" target.server_ip = options.server_ip if options.server_ip else "192.168.7.1" setattr(tc, "target", target) #inject others for key in loaded.keys(): if key not in ["testslist", "d", "target", "pkgmanifest"]: setattr(tc, key, loaded[key]) target.exportStart() setattr(tc, "tagexp", options.tag) runTests(tc) return 0 if __name__ == "__main__": try: ret = main() except Exception: ret = 1 import traceback traceback.print_exc(5) sys.exit(ret)
Python
0
@@ -3699,16 +3699,92 @@ tslist%0A%0A + #add testsrequired for skipModule %0A tc.testsrequired = tc.testslist%0A%0A depl
c6a9fcfe817128d3e7b0f52625bcd2e6c1c92f76
fix #4491: auth1 test needs sapi for login (#4492)
tests/auth1_test.py
tests/auth1_test.py
# -*- coding: utf-8 -*- u"""Test sirepo.auth :copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved. :license: http://www.apache.org/licenses/LICENSE-2.0.html """ from __future__ import absolute_import, division, print_function import pytest from pykern import pkcollections from sirepo import srunit @srunit.wrap_in_request(sim_types='myapp', want_user=False) def test_login(): from pykern import pkunit, pkcompat from pykern.pkunit import pkeq, pkok, pkre, pkfail, pkexcept from sirepo import auth import flask import sirepo.auth.guest import sirepo.cookie import sirepo.http_request import sirepo.uri_router import sirepo.util r = sirepo.uri_router.call_api('authState') pkre('LoggedIn": false.*Registration": false', pkcompat.from_bytes(r.data)) auth.process_request() with pkunit.pkexcept('SRException.*routeName=login'): auth.logged_in_user() with pkexcept('SRException.*routeName=login'): auth.require_user() sirepo.cookie.set_sentinel() # copying examples for new user takes time try: r = auth.login(sirepo.auth.guest, sim_type='myapp') pkfail('expecting sirepo.util.Response') except sirepo.util.Response as e: r = e.sr_args.response pkre(r'LoggedIn":\s*true.*Registration":\s*false', pkcompat.from_bytes(r.data)) u = auth.logged_in_user() pkok(u, 'user should exist') # guests do not require completeRegistration auth.require_user()
Python
0
@@ -537,16 +537,38 @@ t flask%0A + import sirepo.api%0A impo @@ -1108,16 +1108,70 @@ try:%0A + # TODO(rorour): get sapi from current request%0A @@ -1220,16 +1220,40 @@ ='myapp' +, sapi=sirepo.api.Base() )%0A
5798b7e66b17aa24564d379619faa4d16651a821
fix up benchmark test to actually work. Hasn't been run in a while, obviously
tests/benchmarks.py
tests/benchmarks.py
# Brute Force: # 481 hits # 29637.96 usec/pass # # Memory-based Rtree Intersection: # 481 hits # 1216.70 usec/pass # \Disk-based Rtree Intersection: # 481 hits # 2617.95 usec/pass import random import timeit try: import pkg_resources pkg_resources.require('Rtree') except: pass from rtree import Rtree as _Rtree TEST_TIMES = 20 # a very basic Geometry class Point(object): def __init__(self, x, y): self.x = x self.y = y # Scatter points randomly in a 1x1 box # class Rtree(_Rtree): pickle_protocol = -1 bounds = (0, 0, 6000000, 6000000) count = 30000 points = [] insert_object = None insert_object = {'a': range(100), 'b': 10, 'c': object(), 'd': dict(x=1), 'e': Point(2, 3)} index = Rtree() disk_index = Rtree('test', overwrite=1) coordinates = [] for i in xrange(count): x = random.randrange(bounds[0], bounds[2]) + random.random() y = random.randrange(bounds[1], bounds[3]) + random.random() point = Point(x, y) points.append(point) index.add(i, (x, y), insert_object) disk_index.add(i, (x, y), insert_object) coordinates.append((i, (x, y, x, y), insert_object)) s =""" bulk = Rtree(coordinates[:2000]) """ t = timeit.Timer(stmt=s, setup='from __main__ import coordinates, Rtree, insert_object') print "\nStream load:" print "%.2f usec/pass" % (1000000 * t.timeit(number=TEST_TIMES)/TEST_TIMES) s =""" idx = Rtree() i = 0 for point in points[:2000]: idx.add(i, (point.x, point.y), insert_object) i+=1 """ t = timeit.Timer(stmt=s, setup='from __main__ import points, Rtree, insert_object') print "\nOne-at-a-time load:" print "%.2f usec/pass\n\n" % (1000000 * t.timeit(number=TEST_TIMES)/TEST_TIMES) bbox = (1240000, 1010000, 1400000, 1390000) print count, "points" print "Query box: ", bbox print "" # Brute force all points within a 0.1x0.1 box s = """ hits = [p for p in points if p.x >= bbox[0] and p.x <= bbox[2] and p.y >= bbox[1] and p.y <= bbox[3]] """ t = timeit.Timer(stmt=s, setup='from __main__ import points, bbox') print "\nBrute Force:" print len([p for p in points if p.x >= bbox[0] and p.x <= bbox[2] and p.y >= bbox[1] and p.y <= bbox[3]]), "hits" print "%.2f usec/pass" % (1000000 * t.timeit(number=TEST_TIMES)/TEST_TIMES) # 0.1x0.1 box using intersection if insert_object is None: s = """ hits = [points[id] for id in index.intersection(bbox)] """ else: s = """ hits = [p.object for p in index.intersection(bbox, objects=insert_object)] """ t = timeit.Timer(stmt=s, setup='from __main__ import points, index, bbox, insert_object') print "\nMemory-based Rtree Intersection:" print len([points[id] for id in index.intersection(bbox)]), "hits" print "%.2f usec/pass" % (1000000 * t.timeit(number=100)/100) # run same test on disk_index. s = s.replace("index.", "disk_index.") t = timeit.Timer(stmt=s, setup='from __main__ import points, disk_index, bbox, insert_object') print "\nDisk-based Rtree Intersection:" print len(disk_index.intersection(bbox)), "hits" print "%.2f usec/pass" % (1000000 * t.timeit(number=TEST_TIMES)/TEST_TIMES) if insert_object: s = """ hits = disk_index.intersection(bbox, objects="raw") """ t = timeit.Timer(stmt=s, setup='from __main__ import points, disk_index, bbox, insert_object') print "\nDisk-based Rtree Intersection without Item() wrapper (objects='raw'):" result = disk_index.intersection(bbox, objects="raw") print len(result), "raw hits" print "%.2f usec/pass" % (1000000 * t.timeit(number=TEST_TIMES)/TEST_TIMES) assert 'a' in result[0], result[0] import os try: os.remove('test.dat') os.remove('test.idx') except: pass
Python
0
@@ -2946,25 +2946,27 @@ ction:%22%0A -print len +hits = list (disk_in @@ -2988,16 +2988,32 @@ n(bbox)) +%0Aprint len(hits) , %22hits%22 @@ -3389,16 +3389,21 @@ esult = +list( disk_ind @@ -3434,24 +3434,25 @@ jects=%22raw%22) +) %0A print l
646db72eca34f6006d189f0a143d0c00388d1955
Update viehicle.py
sketches/ev_steering_1/viehicle.py
sketches/ev_steering_1/viehicle.py
class Viehicle(): def __init__(self, x, y): self.acceleration = PVector(0, 0) self.velocity = PVector(0, 0) self.location = PVector(x, y) self.r = 8.0 self.maxspeed = 5 self.maxforce = 0.1 self.d = 25 def update(self): self.velocity.add(self.acceleration) self.velocity.limit(self.maxspeed) self.location.add(self.velocity) self.acceleration.mult(0) def applyForce(self, force): self.acceleration.add(force) def seek(self, target): desired = PVector.sub(target, self.location) # Check Boundaries if self.location.x < self.d: desired = PVector(self.maxspeed, self.velocity.y) elif self.location.x > width - self.d: desired = PVector(-self.maxspeed, self.velocity.y) if self.location.y < self.d: desired = PVector(self.velocity.x, self.maxspeed) elif self.location.y > height - self.d: desired = PVector(self.velocity.x, -self.maxspeed) desired.normalize() desired.mult(self.maxspeed) steer = PVector.sub(desired, self.velocity) steer.limit(self.maxforce) self.applyForce(steer) def display(self): theta = self.velocity.heading() + PI/2 fill(color(98, 199, 119)) stroke(1) strokeWeight(1) with pushMatrix(): translate(self.location.x, self.location.y) rotate(theta) with beginShape(): vertex(0, -self.r*2) vertex(-self.r, self.r*2) vertex(self.r, self.r*2)
Python
0
@@ -449,28 +449,24 @@ mult(0)%0A - %0A def app
50af61832d781a5730c0b6d35a24e3ac4f1a5282
Always return nil if something went wrong
rsqueakvm/plugins/database_plugin.py
rsqueakvm/plugins/database_plugin.py
# -*- coding: utf-8 -*- from rsqueakvm.database import dbm from rsqueakvm.error import PrimitiveFailedError from rsqueakvm.plugins.plugin import Plugin from rsqueakvm.primitives.bytecodes import * from rsqueakvm.model.database import W_DBObject from sqpyte import interpreter from sqpyte.capi import CConfig DatabasePlugin = Plugin() @DatabasePlugin.expose_primitive(unwrap_spec=[object, str, bool]) def primitiveSQLConnect(interp, s_frame, w_rcvr, filename, sqpyte): if sqpyte: return dbm.connect(interp.space, interpreter.SQPyteDB, filename) return dbm.connect(interp.space, interpreter.SQLite3DB, filename) @DatabasePlugin.expose_primitive(clean_stack=False) def primitiveSQLExecute(interp, s_frame, argcount): if not 2 <= argcount <= 3: raise PrimitiveFailedError( 'wrong number of arguments: %s' % argcount) args = None if argcount == 3: args = interp.space.unwrap_array(s_frame.pop()) arg3_w = s_frame.pop() sql = interp.space.unwrap_string(arg3_w) arg2_w = s_frame.pop() db_handle = interp.space.unwrap_longlong(arg2_w) connection = dbm.get_connection(db_handle) cursor_handle = dbm.execute(connection, sql, args) return interp.space.wrap_int(cursor_handle) @DatabasePlugin.expose_primitive(unwrap_spec=[object, int]) def primitiveSQLNext(interp, s_frame, w_rcvr, cursor_handle): return dbm.cursor(cursor_handle).next() @DatabasePlugin.expose_primitive(unwrap_spec=[object, int]) def primitiveSQLColumnCount(interp, s_frame, w_rcvr, cursor_handle): return dbm.cursor(cursor_handle).column_count() @DatabasePlugin.expose_primitive(unwrap_spec=[object, int]) def primitiveSQLColumnNames(interp, s_frame, w_rcvr, cursor_handle): return dbm.cursor(cursor_handle).column_names() @DatabasePlugin.expose_primitive(unwrap_spec=[object, int, int]) def primitiveSQLColumnName(interp, s_frame, w_rcvr, cursor_handle, index): if index < 1: raise PrimitiveFailedError('Index must be >= 1') # Smalltalk counts from 1, rest of world from 0 return dbm.cursor(cursor_handle).column_name(index - 1) @DatabasePlugin.expose_primitive(unwrap_spec=[object, int]) def primitiveSQLClose(interp, s_frame, w_rcvr, db_handle): return dbm.close(interp.space, db_handle) @DatabasePlugin.expose_primitive(unwrap_spec=[object, int]) def primitiveSQLModeSwitch(interp, s_frame, w_rcvr, mode): if mode == 1: dbm.driver = interpreter.SQLite3DB elif mode == 2: dbm.driver = interpreter.SQPyteDB else: dbm.driver = None return interp.space.w_nil @DatabasePlugin.expose_primitive(unwrap_spec=[object, str]) def primitiveSetDBFile(interp, s_frame, w_rcvr, db_file_name): dbm.db_file_name = db_file_name return interp.space.w_nil @DatabasePlugin.expose_primitive(unwrap_spec=[object]) def primitiveCloseDBObject(interp, s_frame, w_rcvr): dbm.connection(interp.space).close() return interp.space.w_nil @DatabasePlugin.expose_primitive(unwrap_spec=[object]) def primitiveSQLAllInstances(interp, s_frame, w_class): class_name = w_class.classname(interp.space).split(' ')[0] handle = dbm.connection(interp.space) cursor_handle = dbm.execute(handle, 'SELECT * FROM %s;' % class_name) return interp.space.wrap_int(cursor_handle) @DatabasePlugin.expose_primitive(unwrap_spec=[object, int]) def primitiveSQLNextObject(interp, s_frame, w_rcvr, cursor_handle): query = dbm.cursor(cursor_handle).raw_next() if query.column_type(0) == CConfig.SQLITE_NULL: return interp.space.w_nil elif query.column_type(0) != CConfig.SQLITE_INTEGER: raise PrimitiveFailedError('First column not an integer') object_id = query.column_int64(0) num_cols = query.data_count() obj = W_DBObject(interp.space, w_rcvr, num_cols, object_id=object_id) return obj
Python
0.878975
@@ -3500,90 +3500,19 @@ uery -.column_type(0) == CConfig.SQLITE_NULL:%0A return interp.space.w_nil%0A elif + is None or que @@ -3569,64 +3569,32 @@ r -aise PrimitiveFailedError('First column not an integer') +eturn interp.space.w_nil %0A
6032ba08c0222bd0f10f045d242d48a2b4db92ff
change in the qc file structure
ehive/runnable/process/RunMutiQC.py
ehive/runnable/process/RunMutiQC.py
import os, subprocess,fnmatch from shutil import copy2 from ehive.runnable.IGFBaseProcess import IGFBaseProcess from igf_data.utils.fileutils import get_temp_dir,remove_dir class RunMutiQC(IGFBaseProcess): def param_defaults(self): params_dict=super(IGFBaseProcess,self).param_defaults() params_dict.update({ 'force_overwrite':True, 'multiqc_dir_label':'multiqc', 'multiqc_exe':'multiqc', 'multiqc_options':{'--zip-data-dir':''}, 'demultiplexing_stats_file':'Stats/Stats.json' }) return params_dict def run(self): try: seqrun_igf_id=self.param_required('seqrun_igf_id') demultiplexing_stats_file=self.param_required('demultiplexing_stats_file') qc_files_name=self.param('qc_files_name') multiqc_exe=self.param('multiqc_exe') multiqc_options=self.param('multiqc_options') multiqc_dir_label=self.param('multiqc_dir_label') force_overwrite=self.param('force_overwrite') base_results_dir=self.param_required('base_results_dir') project_name=self.param_required('project_name') seqrun_date=self.param_required('seqrun_date') flowcell_id=self.param_required('flowcell_id') tag=self.para,_required('tag') if tag not in ['known','undetermined']: raise ValueError('unknown status tag {0}'.format(tag)) # check valid status tags if qc_files_name not in ['qc_known','qc_undetermined']: raise ValueError('unknown status tag {0}'.format(tag)) # check valid qc files qc_files=self.param_required(qc_files_name) # get specific qc files fastq_dir=[f_dir for f_dir in qc_files.keys()][0] # consider only the first fastq dir fastqc_files=list() fastqscreen_files=list() for fastq_dir, qc_output in qc_files.items(): fastqc_files.extend([fqc_dir for fqc_dir in qc_output['fastqc'].keys()]) fastqscreen_files.extend([fsr_dir for fsr_dir in qc_output['fastqscreen'].keys()]) lane_index_info=os.path.basename(fastq_dir) # get lane and index info multiqc_result_dir=os.path.join(base_results_dir, \ project_name, \ seqrun_date, \ flowcell_id, \ lane_index_info,\ tag, \ multiqc_dir_label) # get multiqc final output path if os.path.exists(multiqc_result_dir) and force_overwrite: remove_dir(multiqc_result_dir) # remove existing output dir if force_overwrite is true if not os.path.exists(multiqc_result_dir): os.makedirs(multiqc_result_dir,mode=0o775) # create output dir if its not present temp_work_dir=get_temp_dir() # get a temp work dir multiqc_input_list=os.path.join(temp_work_dir,'multiqc_input_file.txt') # get name of multiqc input file demultiplexing_stats_file=oa.path.join(fastq_dir, demultiplexing_stats_file) with open(multiqc_input_list,'w') as multiqc_input_file: # writing multiqc input if not os.path.exists(demultiplexing_stats_file): raise IOError('demultiplexing stats file {0} not found'.\ format(demultiplexing_stats_file)) # check demultiplexing stats file multiqc_input_file.write('{}\n'.format(demultiplexing_stats_file)) # add demultiplexing stat to list for fastqc_file in fastqc_files: if not os.path.exists(fastqc_file): raise IOError('fasqc file {0} not found'.\ format(fastqc_file)) # check fastqc file multiqc_input_file.write('{}\n'.format(fastqc_file)) # add fastqc file to list for fastqscreen_file in fastqscreen_files: if not os.path.exists(fastqscreen_file): raise IOError('fastqscreen file {0} not found'.\ format(fastqscreen_file)) # check fastqscreen file multiqc_input_file.write('{}\n'.format(fastqscreen_file)) # add fastqscreen file to list multiqc_report_title='Project:{0},Sequencing_date:{1},Flowcell_lane:{2},status:{0}'.\ format(project_name, \ seqrun_date,\ lane_index_info,\ tag) # get multiqc report title and filename multiqc_param=self.format_tool_options(multiqc_options) # format multiqc params multiqc_cmd=[multiqc_exe, '--file-list',multiqc_input_list, '--outdir',temp_work_dir, '--title',multiqc_report_title, ] # multiqc base parameters multiqc_cmd.extend(multiqc_param) # add additional parameters subprocess.check_call(multiqc_cmd) # run multiqc multiqc_html=None multiqc_data=None for root, dirs,files in os.walk(top=temp_work_dir): for file in files: if fnmatch.fnmatch(file, '*.html'): copy2(os.path.join(root,file),multiqc_result_dir) multiqc_html=os.path.join(multiqc_result_dir,file) # get multiqc html path elif fnmatch.fnmatch(file, '*.zip'): copy2(os.path.join(root,file),multiqc_result_dir) multiqc_data=os.path.join(multiqc_result_dir,file) # get multiqc data path self.param('dataflow_params',{'multiqc_html':multiqc_html, \ 'multiqc_data':multiqc_data}) except Exception as e: message='seqrun: {2}, Error in {0}: {1}'.format(self.__class__.__name__, \ e, \ seqrun_igf_id) self.warning(message) self.post_message_to_slack(message,reaction='fail') # post msg to slack for failed jobs raise
Python
0.000001
@@ -1971,23 +1971,16 @@ fastqc'%5D -.keys() %5D)%0A @@ -2055,23 +2055,16 @@ screen'%5D -.keys() %5D)%0A
e2adcd5681a0afbed7be7c1451dfcdaf66d9790c
Indent the if for _img (derp)
swood/sample.py
swood/sample.py
from PIL import Image import numpy as np from . import complain import pyfftw import wave pyfftw.interfaces.cache.enable() class CalculatedFFT: def __init__(self, avgdata, spacing): self.avgdata = avgdata self.spacing = spacing class Sample: def __init__(self, filename, binsize, volume=0.8, delete_raw_data=True): self.binsize = binsize if binsize < 2: raise complain.ComplainToUser("FFT bin size must be at least 2.") self.delete_raw = delete_raw_data # delete raw data after FFT analysis self._maxfreq = None self._fft = None self._img = None self.wav = self.parse_wav(filename) self.volume = 256 ** 4 / (256 ** self.sampwidth) * volume def parse_wav(self, filename): try: with wave.open(filename, "rb") as wavfile: self.sampwidth = wavfile.getsampwidth() self.framerate = wavfile.getframerate() self.channels = wavfile.getnchannels() self.length = wavfile.getnframes() if self.sampwidth == 1: self.size = np.int8 elif self.sampwidth == 2: self.size = np.int16 elif self.sampwidth == 3 or self.sampwidth == 4: self.size = np.int32 else: raise wave.Error wav = np.zeros((self.channels, self.length), dtype=self.size) for i in range(0, self.length): frame = wavfile.readframes(1) for chan in range(self.channels): wav[chan][i] = int.from_bytes(frame[self.sampwidth * chan:self.sampwidth * (chan + 1)], byteorder="little", signed=True) return wav except IOError: raise complain.ComplainToUser("Error opening WAV file at path '{}'.".format(filename)) except wave.Error: raise complain.ComplainToUser("This WAV type is not supported. Try opening the file in Audacity and exporting it as a standard WAV.") @property def fft(self): if not self._fft: if self.binsize % 2 != 0: print("Warning: Bin size must be a multiple of 2, correcting automatically") self.binsize += 1 spacing = float(self.framerate) / self.binsize avgdata = np.zeros(self.binsize // 2, dtype=np.float64) for chan in range(self.channels): for i in range(0, self.wav.shape[1], self.binsize): data = np.array(self.wav[chan][i:i + self.binsize], dtype=self.size) if len(data) != self.binsize: continue fft = pyfftw.interfaces.numpy_fft.fft(data) fft = np.abs(fft[:self.binsize // 2]) avgdata += fft del data del fft if max(avgdata) == 0: print("Warning: Bin size is too large to analyze sample; dividing by 2 and trying again") self.binsize = self.binsize // 2 self._fft = self.fft else: if self.delete_raw: del self.wav self._fft = CalculatedFFT(avgdata, spacing) return self._fft @property def img(self): if not self._img: self._img = Image.frombytes("I", (self.length, self.channels), (self.wav * self.volume).astype(np.int32).tobytes(), "raw", "I", 0, 1) # Pillow recommends those last args because of a bug in the raw parser # See http://pillow.readthedocs.io/en/3.2.x/reference/Image.html?highlight=%22raw%22#PIL.Image.frombuffer return self._img @property def maxfreq(self): if not self._maxfreq: self._maxfreq = (np.argmax(self.fft.avgdata[1:]) * self.fft.spacing) + (self.fft.spacing / 2) return self._maxfreq def __len__(self): return self.length
Python
0.000114
@@ -3419,32 +3419,36 @@ f._img:%0A + self._img = Imag @@ -3480,24 +3480,28 @@ + (self.length @@ -3518,16 +3518,20 @@ nnels),%0A + @@ -3615,16 +3615,20 @@ + %22raw%22, %22 @@ -3637,16 +3637,20 @@ , 0, 1)%0A + @@ -3720,16 +3720,20 @@ parser%0A +
b58c8b4f9d049207b7e7e0e4de7058959df90b70
Use sendgrid's Subject type when sending email. (#1033)
src/appengine/libs/mail.py
src/appengine/libs/mail.py
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helpers for sending mail.""" from builtins import str from sendgrid import SendGridAPIClient from sendgrid.helpers.mail import From from sendgrid.helpers.mail import HtmlContent from sendgrid.helpers.mail import Mail from sendgrid.helpers.mail import To from config import db_config from metrics import logs def send(to_email, subject, html_content): """Send email.""" sendgrid_api_key = db_config.get_value('sendgrid_api_key') if not sendgrid_api_key: logs.log_warn('Skipping email as SendGrid API key is not set in config.') return from_email = db_config.get_value('sendgrid_sender') if not from_email: logs.log_warn('Skipping email as SendGrid sender is not set in config.') return message = Mail( from_email=From(str(from_email)), to_emails=To(str(to_email)), subject=subject, html_content=HtmlContent(str(html_content))) try: sg = SendGridAPIClient(sendgrid_api_key) response = sg.send(message) logs.log( 'Sent email to %s.' % to_email, status_code=response.status_code, body=response.body, headers=response.headers) except Exception: logs.log_error('Failed to send email to %s.' % to_email)
Python
0
@@ -788,16 +788,58 @@ rt Mail%0A +from sendgrid.helpers.mail import Subject%0A from sen @@ -1436,23 +1436,32 @@ subject= +Subject( subject +) ,%0A
425ae0042b050773e7c55f3cdc34ca3a68069238
use test app
sacrud/pyramid_ext/tests/__init__.py
sacrud/pyramid_ext/tests/__init__.py
# -*- coding: utf-8 -*- from sqlalchemy.orm import sessionmaker from sqlalchemy import create_engine, orm import unittest from sacrud.tests.test_models import User, Profile, PHOTO_PATH, Base from sacrud.action import get_relations, delete_fileobj, read, update, delete from sacrud.action import get_pk, index, create from pyramid.testing import DummyRequest from StringIO import StringIO import glob import os from zope.sqlalchemy import ZopeTransactionExtension import transaction from pyramid import testing from pyramid.config import Configurator from webtest.app import TestApp from pyramid.url import route_url class MockCGIFieldStorage(object): pass class SacrudTests(unittest.TestCase): def setUp(self): request = testing.DummyRequest() config = testing.setUp(request=request) config.registry.settings['sqlalchemy.url'] = "sqlite:///:memory:" config.include('sacrud.pyramid_ext') settings = config.registry.settings settings['sacrud_models'] = (User, Profile) config.scan() engine = create_engine('sqlite:///:memory:') DBSession = orm.scoped_session( orm.sessionmaker(extension=ZopeTransactionExtension())) DBSession.remove() DBSession.configure(bind=engine) session = DBSession self.session = session # To create tables, you typically do: #User.metadata.create_all(engine) User.metadata.create_all(engine) Profile.metadata.create_all(engine) self.app = config.make_wsgi_app() self.testapp = TestApp(self.app) def tearDown(self): def clear_files(): for filename in glob.glob("%s/*.html" % (PHOTO_PATH, )): os.remove(os.path.join(PHOTO_PATH, filename)) clear_files() self.session.remove() testing.tearDown() def add_user(self): user = User(u'Vasya', u'Pupkin', u"123") self.session.add(user) transaction.commit() user = self.session.query(User).get(1) return user def test_home_view(self): self.add_user() request = testing.DummyRequest() name = route_url('sa_home', request) response = self.testapp.get(name) self.failUnlessEqual(response.status, '200 OK') self.failUnlessEqual("Tables" in response, True) self.failUnlessEqual("user" in response, True) self.failUnlessEqual("profile" in response, True) def test_list_view(self): request = testing.DummyRequest() name = route_url('sa_list', request, table="user") response = self.app.get(name) self.failUnlessEqual(response.status, '200 OK') def test_add_view(self): pass def test_update_view(self): pass def test_delete_view(self): pass
Python
0.000001
@@ -2649,16 +2649,20 @@ = self. +test app.get(
b393b432c4f24906e1919999402ed56bde49086e
Fix test case - found another trunk tunnel on layer 0.
integration-test/546-road-sort-keys-tunnel.py
integration-test/546-road-sort-keys-tunnel.py
# tunnels at level = 0 #https://www.openstreetmap.org/way/167952621 assert_has_feature( 16, 10475, 25324, "roads", {"kind": "highway", "kind_detail": "motorway", "id": 167952621, "name": "Presidio Pkwy.", "is_tunnel": True, "sort_rank": 333}) # http://www.openstreetmap.org/way/259492762 assert_has_feature( 16, 19267, 24634, "roads", {"kind": "major_road", "kind_detail": "trunk", "id": 259492762, "name": "Raymond Blvd.", "is_tunnel": True, "sort_rank": 331}) # http://www.openstreetmap.org/way/277441866 assert_has_feature( 16, 17563, 25792, "roads", {"kind": "major_road", "kind_detail": "trunk", "id": 277441866, "name": "Gatlinburg Spur Road (north)", "is_tunnel": True, "sort_rank": 331}) #https://www.openstreetmap.org/way/117837633 assert_has_feature( 16, 16808, 24434, "roads", {"kind": "major_road", "kind_detail": "primary", "id": 117837633, "name": "Dixie Hwy.", "is_tunnel": True, "sort_rank": 330}) #https://www.openstreetmap.org/way/57782075 assert_has_feature( 16, 16812, 24391, "roads", {"kind": "major_road", "kind_detail": "secondary", "id": 57782075, "name": "S Halsted St.", "is_tunnel": True, "sort_rank": 329}) #https://www.openstreetmap.org/way/57708079 assert_has_feature( 16, 16813, 24386, "roads", {"kind": "major_road", "kind_detail": "tertiary", "id": 57708079, "name": "W 74th St.", "is_tunnel": True, "sort_rank": 327}) #https://www.openstreetmap.org/way/56393654 assert_has_feature( 16, 16808, 24362, "roads", {"kind": "minor_road", "kind_detail": "residential", "id": 56393654, "name": "S Paulina St.", "is_tunnel": True, "sort_rank": 310}) #https://www.openstreetmap.org/way/190835369 assert_has_feature( 16, 16814, 24363, "roads", {"kind": "minor_road", "kind_detail": "service", "id": 190835369, "name": "S Wong Pkwy.", "is_tunnel": True, "sort_rank": 308})
Python
0
@@ -292,18 +292,18 @@ /2594927 -62 +89 %0Aassert_ @@ -331,16 +331,16 @@ 1926 -7 +6 , 2463 -4 +5 , %22r @@ -410,18 +410,18 @@ 2594927 -62 +89 ,%0A %22 @@ -432,20 +432,20 @@ %22: %22 -Raymond Blvd +McCarter Hwy .%22,
c3b92c1de1c8a2b9e0b3e585277186d5e453a06e
Copy the namespace of the root as well, otherwise it gets added to the string elements themselves and this gets messy and ugly
java/graveyard/support/scripts/copy-string.py
java/graveyard/support/scripts/copy-string.py
#!/usr/bin/env python import os import os.path import sys import lxml.etree source_path = os.path.expanduser('~/workspace/git/android/packages/apps/Mms') #source_path = os.path.expanduser('~/workspace/git/android/platform/packages/apps/Mms') #source_path = os.path.expanduser('~/workspace/git/android/frameworks/base/core/res') dest_path = os.path.expanduser('~/workspace/git/android-sms-merge/android_sms_merge') def main(): if len(sys.argv) < 2: sys.exit('Error: STRING is required') string_to_copy = sys.argv[1] source_res_path = os.path.join(source_path, 'res') dest_res_path = os.path.join(dest_path, 'res') # This allows lxml to output much nicer looking output parser = lxml.etree.XMLParser(remove_blank_text=True) for values_folder in os.listdir(source_res_path): source_values_path = os.path.join(source_res_path, values_folder) if (os.path.isdir(source_values_path) and values_folder.startswith('values')): source_strings_path = os.path.join(source_values_path, 'strings.xml') if (os.path.isfile(source_strings_path)): source_root = lxml.etree.parse(source_strings_path, parser) for source_element in source_root.iter('string'): if source_element.get('name') == string_to_copy: dest_values_path = os.path.join(dest_res_path, values_folder) # Create the destination values folder if necessary if not os.path.exists(dest_values_path): os.mkdir(dest_values_path) dest_strings_path = os.path.join(dest_values_path, 'strings.xml') if not os.path.exists(dest_strings_path): root = lxml.etree.Element('resources') root.append(source_element) dest_root = lxml.etree.ElementTree(root) else: dest_root = lxml.etree.parse(dest_strings_path, parser) # Iterate over the elements in the destination file it = dest_root.iter('string') while True: try: dest_element = it.next() # Don't insert duplicate elements if dest_element.attrib == source_element.attrib: break # Insert the new string alphabetically if string_to_copy < dest_element.get('name'): dest_element.addprevious(source_element) # Don't process any more destination elements break except StopIteration: # If we made it this far, add it to the end dest_element.addnext(source_element) break # Write the updated XML file dest_root.write( dest_strings_path, encoding='utf-8', pretty_print=True, xml_declaration=True, ) if __name__ == '__main__': main()
Python
0
@@ -1931,19 +1931,133 @@ ent( -'resources' +%0A source_root.getroot().tag,%0A nsmap=source_root.getroot().nsmap )%0A
eeb2276306942120481f2b3e5b41056cb9cdfb5d
Add citations to edges
indra/cx_assembler.py
indra/cx_assembler.py
import json import itertools from collections import OrderedDict from indra.statements import * class CxAssembler(): # http://www.ndexbio.org/data-model/ def __init__(self): self.statements = [] self.existing_nodes = {} self.existing_edges = {} self.cx = {'nodes': [], 'edges': [], 'nodeAttributes': [], 'edgeAttributes': [], 'networkAttributes': []} self.id_counter = 0 def add_statements(self, stmts): for stmt in stmts: self.statements.append(stmt) def make_model(self): for stmt in self.statements: if isinstance(stmt, Phosphorylation): self.add_phosphorylation(stmt) elif isinstance(stmt, Dephosphorylation): self.add_dephosphorylation(stmt) elif isinstance(stmt, ActivityActivity): self.add_activityactivity(stmt) elif isinstance(stmt, Complex): self.add_complex(stmt) network_name = 'indra_assembled' network_description = '' self.cx['networkAttributes'].append({'n': 'name', 'v': network_name}) self.cx['networkAttributes'].append({'n': 'description', 'v': network_description}) def add_phosphorylation(self, stmt): if stmt.enz is None: return enz_id = self.add_node(stmt.enz) sub_id = self.add_node(stmt.sub) self.add_edge(enz_id, sub_id, 'Phosphorylation', stmt) def add_dephosphorylation(self, stmt): if stmt.enz is None: return enz_id = self.add_node(stmt.enz) sub_id = self.add_node(stmt.sub) self.add_edge(enz_id, sub_id, 'Dephosphorylation', stmt) def add_complex(self, stmt): for m1, m2 in itertools.combinations(stmt.members, 2): m1_id = self.add_node(m1) m2_id = self.add_node(m2) self.add_edge(m1_id, m2_id, 'Complex', stmt) def add_activityactivity(self, stmt): subj_id = self.add_node(stmt.subj) obj_id = self.add_node(stmt.obj) # TODO: take into account relation here self.add_edge(subj_id, obj_id, 'ActivityActivity', stmt) def add_node(self, agent): node_key = agent.name try: node_id = self.existing_nodes[node_key] return node_id except KeyError: pass node_id = self.id_counter self.existing_nodes[node_key] = node_id node = {'@id': node_id, 'n': agent.name} self.cx['nodes'].append(node) for db_name, db_ids in agent.db_refs.iteritems(): node_attribute = {'po': node_id, 'n': db_name, 'v': db_ids} self.cx['nodeAttributes'].append(node_attribute) self.id_counter += 1 return node_id def add_edge(self, source, target, interaction, stmt): edge_key = (source, target, interaction) try: edge_id = self.existing_edges[edge_key] return edge_id except KeyError: pass edge_id = self.id_counter self.existing_nodes[edge_key] = edge_id edge = {'@id': edge_id, 's': source, 't': target, 'i': interaction} self.cx['edges'].append(edge) indra_stmt_str = '%s' % stmt edge_attribute = {'po': edge_id, 'n': 'INDRA statement', 'v': indra_stmt_str} self.cx['edgeAttributes'].append(edge_attribute) self.id_counter += 1 return edge_id def print_cx(self): full_cx = OrderedDict() full_cx['numberVerification'] = [{'longNumber': 281474976710655}] full_cx['metaData'] = [{'idCounter': self.id_counter, 'name': 'nodes'}, {'idCounter': self.id_counter, 'name': 'edges'}] for k, v in self.cx.iteritems(): full_cx[k] = v full_cx = [{k: v} for k, v in full_cx.iteritems()] json_str = json.dumps(full_cx, indent=2) return json_str def save_model(self, fname='model.cx'): with open(fname, 'wt') as fh: cx_str = self.print_cx() fh.write(cx_str)
Python
0
@@ -403,59 +403,222 @@ ' -networkAttributes': %5B%5D%7D%0A self.id_counter = 0 +citations': %5B%5D, 'edgeCitations': %5B%5D,%0A 'networkAttributes': %5B%5D%7D%0A self.id_counter = 0%0A%0A def _get_new_id(self):%0A ret = self.id_counter%0A self.id_counter += 1%0A return ret %0A%0A @@ -2616,34 +2616,37 @@ e_id = self. -id_counter +_get_new_id() %0A sel @@ -3040,37 +3040,8 @@ te)%0A - self.id_counter += 1%0A @@ -3321,34 +3321,37 @@ e_id = self. -id_counter +_get_new_id() %0A sel @@ -3549,16 +3549,138 @@ d(edge)%0A + self.add_edge_attributes(edge_id, stmt)%0A return edge_id%0A%0A def add_edge_attributes(self, edge_id, stmt):%0A @@ -3915,51 +3915,456 @@ -self.id_counter += 1%0A return edge_id +pmids = %5Be.pmid for e in stmt.evidence if e.pmid is not None%5D%0A for pmid in pmids:%0A citation_id = self._get_new_id()%0A citation = %7B'@id': citation_id,%0A 'dc:identifier': 'pmid:%25s' %25 pmid%7D%0A self.cx%5B'citations'%5D.append(citation)%0A edge_citation = %7B'citations': %5Bcitation_id%5D,%0A 'po': %5Bedge_id%5D%7D%0A self.cx%5B'edgeCitations'%5D.append(edge_citation) %0A%0A
8944bbec97ffdb026bc1662028f8a8bd8633c23a
Remove child/parent aspect.
ingestors/ingestor.py
ingestors/ingestor.py
import sys import traceback import os.path import logging import importlib import inspect import hashlib from datetime import datetime try: import magic except ImportError as error: logging.exception(error) class States: """Available ingestor states.""" #: Initiated, but no processing was done yet. NEW = u'new' #: Initiated and the processing was started, but not finished. STARTED = u'started' #: The ingestor processing ended. FINISHED = u'finished' #: All available states. ALL = [NEW, STARTED, FINISHED] class Statuses: """Available ingestor statuses.""" #: Indicates that during the processing no errors or failures occured. SUCCESS = u'success' #: Indicates occurance of errors during the processing. FAILURE = u'failure' #: Indicates a complete ingestor stop due to system issue. STOPPED = u'stopped' #: All available statuses. ALL = [SUCCESS, FAILURE, STOPPED] class Result(dict): """Generic ingestor result class. Mainly a dict implementation with object like getters/setters. """ __getattr__ = dict.__getitem__ __setattr__ = dict.__setitem__ def __init__(self, *args, **kwargs): """Generic ingestor result class constructor. Initializes some of the attributes: - ``mime_type``, guessed MIME type of the file - ``file_size``, the size of the file - ``checksum``, the SHA digest of the file - ``title``, the title of the document (optional) - ``authors``, a list of the document authors (if any) - ``content``, the document body, usually text - ``order``, the order or page (if available) """ self.mime_type = None self.file_size = 0 self.checksum = None self.title = None self.authors = [] self.content = None self.order = 0 super(Result, self).__init__(self, *args, **kwargs) def extract_file_info(self, fio, file_path, blocksize=65536): """Extracts and updates general file info from its data and path. :param fio: An instance of the file to process. :type fio: py:class:`io.FileIO` :param file_path: The file path. :type file_path: str :param blocksize: The blocksize to read chunks of data. :type blocksize: int """ sha_hash = hashlib.sha1() for chunk in iter(lambda: fio.read(blocksize), b''): sha_hash.update(chunk) self.checksum = sha_hash.hexdigest() self.file_size = fio.tell() self.title = os.path.basename(file_path) fio.seek(0) class Ingestor(object): """Generic ingestor class.""" #: Result object factory class. RESULT_CLASS = Result #: List of MIME types it handles. MIME_TYPES = [] #: Available states. STATES = States #: Available statuses. STATUSES = Statuses #: A list of exception types leading to a failure status. FAILURE_EXCEPTIONS = [ TypeError, ValueError, ArithmeticError, AssertionError ] def __init__(self, fio, file_path, parent=None, mime_type=None): """Generic ingestor constructor class. :param fio: An instance of the file to process. :type fio: py:class:`io.FileIO` :param file_path: The file path. :type file_path: str :param parent: Indicates parent file if this is was part of a composed file. Examples: archives, email files, etc. :type parent: :py:class:`Ingestor` """ self.fio = fio self.file_path = os.path.realpath(file_path) self.parent = parent self.children = [] self.state = States.NEW self.status = Statuses.SUCCESS self.started_at = None self.ended_at = None self.logger = logging.getLogger(self.__module__) self.failure_exceptions = tuple(self.FAILURE_EXCEPTIONS) self.result = Result(mime_type=mime_type) # Do not extract file info unless it is a new file if mime_type: self.result.extract_file_info(self.fio, self.file_path) def configure(self): """Ingestor configuration endpoint. Initializes different aspects of the ingestor. Returns a dictionary with configuration values. A good example where to use it, is to overwrite the implementation and provide external calls to ``os.environ`` to fetch different variables or resolve system paths for executables. :rtype: dict """ return {} def before(self): """Callback called before the processing starts.""" pass def after(self): """Callback called after the processing starts.""" pass def before_child(self): """Callback called before the processing of a child file starts.""" pass def after_child(self): """Callback called after the processing of a child starts.""" pass def exception_handler(self): """Ingestor error handler.""" self.log_exception() def log_exception(self): """Extract and log the latest exception.""" lines = traceback.format_exception(*sys.exc_info()) self.logger.error('\n'.join(lines)) def ingest(self, config): """The ingestor implementation. Should be overwritten. This method does not return anything. Use the ``result`` attribute to store any resulted data. :param dict config: A dictionary with settings. """ raise NotImplemented() def run(self): """Main execution loop of an ingestor.""" self.state = States.STARTED self.before() self.started_at = datetime.utcnow() config = self.configure() try: self.ingest(config) except Exception as exception: self.exception_handler() if isinstance(exception, self.failure_exceptions): self.status = Statuses.FAILURE else: self.status = Statuses.STOPPED finally: self.ended_at = datetime.utcnow() self.state = States.FINISHED self.after() @classmethod def find_ingestors(cls, cache=[]): """Finds available ingestors and caches the results. :return: A list of classes. :rtype: list """ if cache: return cache module = importlib.import_module(__package__) for attr_name in dir(module): attr = getattr(module, attr_name) if inspect.isclass(attr) and issubclass(cls, Ingestor): cache.append(attr) return cache @classmethod def match(cls, fio, blocksize=4096): """Runs file mime type detection to discover appropriate ingestor class. :param fio: File object to run detection. :type fio: :py:class:`io.FileIO` :return: Detected ingestor class and file mime type. :rtype: tuple """ mime_type = magic.from_buffer(fio.read(blocksize), mime=True) fio.seek(0) for ingestor_class in cls.find_ingestors(): if mime_type in ingestor_class.MIME_TYPES: return ingestor_class, mime_type logging.getLogger(__package__).error( 'No ingestors matched mime type: {}'.format(mime_type)) return None, mime_type
Python
0.000009
@@ -3141,21 +3141,8 @@ ath, - parent=None, mim @@ -3373,197 +3373,8 @@ str%0A - :param parent: Indicates parent file if this is was part of a composed%0A file. Examples: archives, email files, etc.%0A :type parent: :py:class:%60Ingestor%60%0A @@ -3461,64 +3461,8 @@ th)%0A - self.parent = parent%0A self.children = %5B%5D%0A @@ -4547,237 +4547,8 @@ ss%0A%0A - def before_child(self):%0A %22%22%22Callback called before the processing of a child file starts.%22%22%22%0A pass%0A%0A def after_child(self):%0A %22%22%22Callback called after the processing of a child starts.%22%22%22%0A pass%0A%0A
90c42beafe4dc5168224fd96cf7891695c7cf346
fix save default values
ini_tools/ini_file.py
ini_tools/ini_file.py
import os from config_parser import WZConfigParser from profile_loader import Profile, get_profiles_name_list from generate_ini_header import get_header class WZException(Exception): pass class IniFile(dict): profiles = get_profiles_name_list() def get_profile_for_ini(self): name = os.path.basename(self.path)[:-4] if name in self.profiles: return Profile(name) # hack for research elif name[:-5] in self.profiles: return Profile(name[:-5]) else: raise WZException("Can't find profile for %s" % self.path) def __init__(self, path, data_dict=None): self.path = path self.name = os.path.basename(path)[:-4] self.profile = self.get_profile_for_ini() if data_dict: self.update(data_dict) else: config = WZConfigParser() config.load(path) for section_name in config.sections(): self[section_name] = dict(config.items(section_name)) def save(self, filename=None): if filename is None: filename = self.path text_list = [get_header(self.profile)] for section_name, section_items in self.items(): section_list = ['', '[%s]' % section_name] for item in sorted(section_items.items(), key=lambda x: self.profile.field_order.index(x[0])): prepared_value = self.prepare_value(item) if prepared_value: section_list.append(prepared_value) text_list.extend(section_list) with open(filename, 'w') as fd: fd.write('\n'.join(text_list)) def prepare_value(self, item): key, val = item field = self.profile[key] if str(field.get('default')) == val: return None if field['type'] == 'pie': return "%s = %s" % (key, val.lower()) return "%s = %s" % item #if __name__ == '__main__': # ini_file = IniFile("G:/warzone2100/data/base/stats/propulsion.ini") # with open('tmp.ini', 'w') as fd: # ini_file.save(fd) @classmethod def from_dict(cls, data_dict, dest_file): return IniFile()
Python
0.000001
@@ -1797,19 +1797,24 @@ t')) == +str( val +) :%0A
8f898be3d642bb4690e19b7e91ba087fba68dac0
Fix bug that conditions are ignored other than last [ignore_properties add-on]
jumeaux/addons/judgement/ignore_properties.py
jumeaux/addons/judgement/ignore_properties.py
# -*- coding:utf-8 -*- """For example of config judgement: - name: jumeaux.addons.judgement.ignore_properties config: ignores: - title: reason image: https://......png link: https://...... conditions: - path: '/route' changed: - root['items'][0] - root['unit'] - path: '/repositories' added: - root['items'][\d+] removed: - root['items'] """ import logging import re from fn import _ from owlmixin import OwlMixin from owlmixin.owlcollections import TList from typing import Optional, List from jumeaux.addons.judgement import JudgementExecutor from jumeaux.models import JudgementAddOnPayload, DiffKeys logger = logging.getLogger(__name__) class Condition(OwlMixin): path: Optional[str] added: TList[str] removed: TList[str] changed: TList[str] def __init__(self, path: Optional[str]=None, added: Optional[List[str]]=None, removed: Optional[List[str]]=None, changed: Optional[List[str]]=None): self.path = path self.added = TList(added) if added is not None else TList() self.removed = TList(removed) if removed is not None else TList() self.changed = TList(changed) if changed is not None else TList() class Ignore(OwlMixin): title: Optional[str] conditions: TList[Condition] image: Optional[str] link: Optional[str] def __init__(self, title: str, conditions: TList[Condition], image: Optional[str]=None, link: Optional[str]=None): self.title = title self.conditions = Condition.from_dicts(conditions) self.image = image self.link = link class Config(OwlMixin): ignores: TList[Ignore] def __init__(self, ignores): self.ignores = Ignore.from_dicts(ignores) class Executor(JudgementExecutor): config: Config def __init__(self, config: dict): self.config = Config.from_dict(config or {}) def exec(self, payload: JudgementAddOnPayload): if payload.regard_as_same or payload.diff_keys is None: return payload def filter_diff_keys(diff_keys: DiffKeys, condition: Condition) -> DiffKeys: if condition.path and not re.search(condition.path, payload.path): return diff_keys return DiffKeys.from_dict({ "added": payload.diff_keys.added.reject( lambda dk: condition.added.any(lambda ig: re.search(ig, dk)) ), "removed": payload.diff_keys.removed.reject( lambda dk: condition.removed.any(lambda ig: re.search(ig, dk)) ), "changed": payload.diff_keys.changed.reject( lambda dk: condition.changed.any(lambda ig: re.search(ig, dk)) ) }) filtered_diff_keys = self.config.ignores.flat_map(_.conditions).reduce(filter_diff_keys, payload.diff_keys) return JudgementAddOnPayload.from_dict({ "path": payload.path, "qs": payload.qs, "headers": payload.headers, "res_one": payload.res_one, "res_other": payload.res_other, "diff_keys": payload.diff_keys.to_dict(), "regard_as_same": not (filtered_diff_keys.added or filtered_diff_keys.removed or filtered_diff_keys.changed) })
Python
0
@@ -331,18 +331,16 @@ ems'%5D%5B0%5D - %0A @@ -2417,32 +2417,24 @@ %22added%22: -payload. diff_keys.ad @@ -2437,32 +2437,32 @@ s.added.reject(%0A + @@ -2568,32 +2568,24 @@ %22removed%22: -payload. diff_keys.re @@ -2692,32 +2692,32 @@ ),%0A + @@ -2727,24 +2727,16 @@ anged%22: -payload. diff_key
59bdc15846158db9123a764f87cdb0dd1a959a22
remove print statements from unit test
test_qudt4dt.py
test_qudt4dt.py
__author__ = 'adam' #import urllib #import time #from subprocess import Popen #import shlex #import os import fusekiutils import qudt4dt import unittest class TestQudt(unittest.TestCase): def setUp(self,result = None): self.barb = qudt4dt.Barbara("http://localhost:3030") def test_get_unit_class(self): self.assertEqual(self.barb.get_unit_class(u'http://qudt.org/vocab/unit#DegreeCelsius'), [u'http://qudt.org/schema/qudt#TemperatureUnit']) def test_get_units_in_class(self): result = self.barb.get_units_in_class(u'http://qudt.org/schema/qudt#TemperatureUnit') units = [u'http://qudt.org/vocab/unit#DegreeFahrenheit', u'http://qudt.org/vocab/unit#Kelvin', u'http://qudt.org/vocab/unit#PlanckTemperature', u'http://qudt.org/vocab/unit#DegreeCentigrade', u'http://qudt.org/vocab/unit#DegreeCelsius', u'http://qudt.org/vocab/unit#DegreeRankine'] print result self.assertItemsEqual(result, units) def test_get_units_in_same_class(self): result = self.barb.get_units_in_same_class(u'http://qudt.org/vocab/unit#DegreeCelsius') units = [u'http://qudt.org/vocab/unit#DegreeFahrenheit', u'http://qudt.org/vocab/unit#Kelvin', u'http://qudt.org/vocab/unit#PlanckTemperature', u'http://qudt.org/vocab/unit#DegreeCentigrade', u'http://qudt.org/vocab/unit#DegreeCelsius', u'http://qudt.org/vocab/unit#DegreeRankine'] print result self.assertItemsEqual(result, units) def test_convert_value(self): convert_value = self.barb.convert_value degreeCelsius = u'http://qudt.org/vocab/unit#DegreeCelsius' degreeFahrenheit = u'http://qudt.org/vocab/unit#DegreeFahrenheit' inch = u'http://qudt.org/vocab/unit#Inc' temperatureUnit = u'http://qudt.org/schema/qudt#TemperatureUnit' self.assertAlmostEqual(convert_value(degreeCelsius,degreeFahrenheit,100),212.003333333) self.assertRaises(ValueError,convert_value,degreeFahrenheit,inch,300) self.assertRaises(ValueError,convert_value,temperatureUnit,degreeFahrenheit,300) def main(): try: print "launching fuseki..." fuseki = fusekiutils.LaunchFuseki() unittest.main() print "" finally: print "Terminating fuseki..." fuseki.terminate() if __name__ == '__main__': main()
Python
0.000018
@@ -1008,37 +1008,16 @@ nkine'%5D%0A - print result%0A @@ -1581,29 +1581,8 @@ e'%5D%0A - print result%0A
3640cb895bb93d144a615d4b745af135016d67af
order imports
src/plone.server/plone/server/__init__.py
src/plone.server/plone/server/__init__.py
# -*- encoding: utf-8 -*- # create logging import logging logger = logging.getLogger('plone.server') from zope.i18nmessageid import MessageFactory # noqa _ = MessageFactory('plone') # load the patch before anything else. from plone.server import patch # noqa # load defined migrations from plone.server.migrate import migrations # noqa from plone.server import interfaces from plone.server import languages import collections app_settings = { "databases": [], "address": 8080, "static": [], "utilities": [], "root_user": { "password": "" }, "auth_extractors": [ "plone.server.auth.extractors.BearerAuthPolicy", "plone.server.auth.extractors.BasicAuthPolicy", "plone.server.auth.extractors.WSTokenAuthPolicy", ], "auth_user_identifiers": [], "auth_token_validators": [ "plone.server.auth.validators.SaltedHashPasswordValidator", "plone.server.auth.validators.JWTValidator" ], "default_layers": [ interfaces.IDefaultLayer ], "http_methods": { "PUT": interfaces.IPUT, "POST": interfaces.IPOST, "PATCH": interfaces.IPATCH, "DELETE": interfaces.IDELETE, "GET": interfaces.IGET, "OPTIONS": interfaces.IOPTIONS, "HEAD": interfaces.IHEAD, "CONNECT": interfaces.ICONNECT }, "renderers": collections.OrderedDict({ "application/json": interfaces.IRendererFormatJson, "text/html": interfaces.IRendererFormatHtml, "*/*": interfaces.IRendererFormatRaw }), "languages": { "en": languages.IEN, "en-us": languages.IENUS, "ca": languages.ICA }, "default_permission": 'zope.Public', "available_addons": {}, "api_definition": {}, "cors": { "allow_origin": ["http://localhost:8080"], "allow_methods": ["GET", "POST", "DELETE", "HEAD", "PATCH", "OPTIONS"], "allow_headers": ["*"], "expose_headers": ["*"], "allow_credentials": True, "max_age": 3660 }, "jwt": { "secret": "foobar", "algorithm": "HS256" } } SCHEMA_CACHE = {} PERMISSIONS_CACHE = {} FACTORY_CACHE = {} BEHAVIOR_CACHE = {}
Python
0.000002
@@ -22,166 +22,8 @@ -*- -%0A# create logging%0Aimport logging%0Alogger = logging.getLogger('plone.server')%0A%0Afrom zope.i18nmessageid import MessageFactory # noqa%0A_ = MessageFactory('plone') %0A%0A# @@ -98,16 +98,89 @@ # noqa%0A +%0Afrom plone.server import interfaces%0Afrom plone.server import languages%0A%0A # load d @@ -253,100 +253,178 @@ oqa%0A -%0A from -plone.server import interfaces%0Afrom plone.server import languages%0A%0A%0Aimport collections +zope.i18nmessageid import MessageFactory%0A%0Aimport collections%0Aimport logging%0A%0A# create logging%0Alogger = logging.getLogger('plone.server')%0A%0A_ = MessageFactory('plone') %0A%0A%0Aa
93473bed77b3cfdfea8e943d65f5661aff3fc8de
add show plot in example code
librosa/util/_nnls.py
librosa/util/_nnls.py
#!/usr/bin/env python # -*- coding: utf-8 -*- '''Non-negative least squares''' # The scipy library provides an nnls solver, but it does # not generalize efficiently to matrix-valued problems. # We therefore provide an alternate solver here. # # The vectorized solver uses the L-BFGS-B over blocks of # data to efficiently solve the constrained least-squares problem. import numpy as np import scipy.optimize from .utils import MAX_MEM_BLOCK __all__ = ['nnls'] def _nnls_obj(x, shape, A, B): '''Compute the objective and gradient for NNLS''' # Scipy's lbfgs flattens all arrays, so we first reshape # the iterate x x = x.reshape(shape) # Compute the difference matrix diff = np.dot(A, x) - B # Compute the objective value value = 0.5 * np.sum(diff**2) # And the gradient grad = np.dot(A.T, diff) # Flatten the gradient return value, grad.flatten() def _nnls_lbfgs_block(A, B, x_init=None, **kwargs): '''Solve the constrained problem over a single block Parameters ---------- A : np.ndarray [shape=(m, d)] The basis matrix B : np.ndarray [shape=(m, N)] The regression targets x_init : np.ndarray [shape=(d, N)] An initial guess kwargs Additional keyword arguments to `scipy.optimize.fmin_l_bfgs_b` Returns ------- x : np.ndarray [shape=(d, N)] Non-negative matrix such that Ax ~= B ''' # If we don't have an initial point, start at the projected # least squares solution if x_init is None: x_init = np.linalg.lstsq(A, B, rcond=None)[0] np.clip(x_init, 0, None, out=x_init) # Adapt the hessian approximation to the dimension of the problem kwargs.setdefault('m', A.shape[1]) # Construct non-negative bounds bounds = [(0, None)] * x_init.size shape = x_init.shape # optimize x, obj_value, diagnostics = scipy.optimize.fmin_l_bfgs_b(_nnls_obj, x_init, args=(shape, A, B), bounds=bounds, **kwargs) # reshape the solution return x.reshape(shape) def nnls(A, B, **kwargs): '''Non-negative least squares. Given two matrices A and B, find a non-negative matrix X that minimizes the sum squared error: err(X) = sum_i,j ((AX)[i,j] - B[i, j])^2 Parameters ---------- A : np.ndarray [shape=(m, n)] The basis matrix B : np.ndarray [shape=(m, N)] The target matrix. kwargs Additional keyword arguments to `scipy.optimize.fmin_l_bfgs_b` Returns ------- X : np.ndarray [shape=(n, N), non-negative] A minimizing solution to |AX - B|^2 See Also -------- scipy.optimize.nnls scipy.optimize.fmin_l_bfgs_b Examples -------- Approximate a magnitude spectrum from its mel spectrogram >>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=30, duration=10) >>> S = np.abs(librosa.stft(y, n_fft=2048)) >>> M = librosa.feature.melspectrogram(S=S, sr=sr, power=1) >>> mel_basis = librosa.filters.mel(sr, n_fft=2048, n_mels=M.shape[0]) >>> S_recover = librosa.util.nnls(mel_basis, M) Plot the results >>> import matplotlib.pyplot as plt >>> plt.figure() >>> plt.subplot(3,1,1) >>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max), y_axis='log') >>> plt.colorbar() >>> plt.title('Original spectrogram (1025 bins)') >>> plt.subplot(3,1,2) >>> librosa.display.specshow(librosa.amplitude_to_db(M, ref=np.max), ... y_axis='mel') >>> plt.title('Mel spectrogram (128 bins)') >>> plt.colorbar() >>> plt.subplot(3,1,3) >>> librosa.display.specshow(librosa.amplitude_to_db(S_recover, ref=np.max), ... y_axis='log') >>> plt.colorbar() >>> plt.title('Reconstructed spectrogram (1025 bins)') >>> plt.tight_layout() ''' # If B is a single vector, punt up to the scipy method if B.ndim == 1: return scipy.optimize.nnls(A, B)[0] n_columns = int(MAX_MEM_BLOCK // (A.shape[-1] * A.itemsize)) # Process in blocks: if B.shape[-1] <= n_columns: return _nnls_lbfgs_block(A, B, **kwargs).astype(A.dtype) x = np.linalg.lstsq(A, B, rcond=None)[0].astype(A.dtype) np.clip(x, 0, None, out=x) x_init = x for bl_s in range(0, x.shape[-1], n_columns): bl_t = min(bl_s + n_columns, B.shape[-1]) x[:, bl_s:bl_t] = _nnls_lbfgs_block(A, B[:, bl_s:bl_t], x_init=x_init[:, bl_s:bl_t], **kwargs) return x
Python
0
@@ -4064,16 +4064,35 @@ ayout()%0A + %3E%3E%3E plt.show()%0A '''%0A
d6b509d7c625016a6ae5514a8b4d0e3dd6d6a9f4
Set up global tempdir manager in BaseCommand
src/pip/_internal/cli/base_command.py
src/pip/_internal/cli/base_command.py
"""Base Command class, and related routines""" from __future__ import absolute_import, print_function import logging import logging.config import optparse import os import platform import sys import traceback from pip._internal.cli import cmdoptions from pip._internal.cli.command_context import CommandContextMixIn from pip._internal.cli.parser import ( ConfigOptionParser, UpdatingDefaultsHelpFormatter, ) from pip._internal.cli.status_codes import ( ERROR, PREVIOUS_BUILD_DIR_ERROR, SUCCESS, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND, ) from pip._internal.exceptions import ( BadCommand, CommandError, InstallationError, PreviousBuildDirError, UninstallationError, ) from pip._internal.utils.deprecation import deprecated from pip._internal.utils.logging import BrokenStdoutLoggingError, setup_logging from pip._internal.utils.misc import get_prog from pip._internal.utils.typing import MYPY_CHECK_RUNNING from pip._internal.utils.virtualenv import running_under_virtualenv if MYPY_CHECK_RUNNING: from typing import List, Tuple, Any from optparse import Values __all__ = ['Command'] logger = logging.getLogger(__name__) class Command(CommandContextMixIn): usage = None # type: str ignore_require_venv = False # type: bool def __init__(self, name, summary, isolated=False): # type: (str, str, bool) -> None super(Command, self).__init__() parser_kw = { 'usage': self.usage, 'prog': '%s %s' % (get_prog(), name), 'formatter': UpdatingDefaultsHelpFormatter(), 'add_help_option': False, 'name': name, 'description': self.__doc__, 'isolated': isolated, } self.name = name self.summary = summary self.parser = ConfigOptionParser(**parser_kw) # Commands should add options to this option group optgroup_name = '%s Options' % self.name.capitalize() self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name) # Add the general options gen_opts = cmdoptions.make_option_group( cmdoptions.general_group, self.parser, ) self.parser.add_option_group(gen_opts) def handle_pip_version_check(self, options): # type: (Values) -> None """ This is a no-op so that commands by default do not do the pip version check. """ # Make sure we do the pip version check if the index_group options # are present. assert not hasattr(options, 'no_index') def run(self, options, args): # type: (Values, List[Any]) -> Any raise NotImplementedError def parse_args(self, args): # type: (List[str]) -> Tuple[Any, Any] # factored out for testability return self.parser.parse_args(args) def main(self, args): # type: (List[str]) -> int try: with self.main_context(): return self._main(args) finally: logging.shutdown() def _main(self, args): # type: (List[str]) -> int options, args = self.parse_args(args) # Set verbosity so that it can be used elsewhere. self.verbosity = options.verbose - options.quiet level_number = setup_logging( verbosity=self.verbosity, no_color=options.no_color, user_log_file=options.log, ) if sys.version_info[:2] == (2, 7): message = ( "A future version of pip will drop support for Python 2.7. " "More details about Python 2 support in pip, can be found at " "https://pip.pypa.io/en/latest/development/release-process/#python-2-support" # noqa ) if platform.python_implementation() == "CPython": message = ( "Python 2.7 reached the end of its life on January " "1st, 2020. Please upgrade your Python as Python 2.7 " "is no longer maintained. " ) + message deprecated(message, replacement=None, gone_in=None) if options.skip_requirements_regex: deprecated( "--skip-requirements-regex is unsupported and will be removed", replacement=( "manage requirements/constraints files explicitly, " "possibly generating them from metadata" ), gone_in="20.1", issue=7297, ) # TODO: Try to get these passing down from the command? # without resorting to os.environ to hold these. # This also affects isolated builds and it should. if options.no_input: os.environ['PIP_NO_INPUT'] = '1' if options.exists_action: os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action) if options.require_venv and not self.ignore_require_venv: # If a venv is required check if it can really be found if not running_under_virtualenv(): logger.critical( 'Could not find an activated virtualenv (required).' ) sys.exit(VIRTUALENV_NOT_FOUND) try: status = self.run(options, args) # FIXME: all commands should return an exit status # and when it is done, isinstance is not needed anymore if isinstance(status, int): return status except PreviousBuildDirError as exc: logger.critical(str(exc)) logger.debug('Exception information:', exc_info=True) return PREVIOUS_BUILD_DIR_ERROR except (InstallationError, UninstallationError, BadCommand) as exc: logger.critical(str(exc)) logger.debug('Exception information:', exc_info=True) return ERROR except CommandError as exc: logger.critical('%s', exc) logger.debug('Exception information:', exc_info=True) return ERROR except BrokenStdoutLoggingError: # Bypass our logger and write any remaining messages to stderr # because stdout no longer works. print('ERROR: Pipe to stdout was broken', file=sys.stderr) if level_number <= logging.DEBUG: traceback.print_exc(file=sys.stderr) return ERROR except KeyboardInterrupt: logger.critical('Operation cancelled by user') logger.debug('Exception information:', exc_info=True) return ERROR except BaseException: logger.critical('Exception:', exc_info=True) return UNKNOWN_ERROR finally: self.handle_pip_version_check(options) return SUCCESS
Python
0
@@ -889,16 +889,80 @@ et_prog%0A +from pip._internal.utils.temp_dir import global_tempdir_manager%0A from pip @@ -3186,32 +3186,226 @@ st%5Bstr%5D) -%3E int%0A + # Intentionally set as early as possible so globally-managed temporary%0A # directories are available to the rest of the code.%0A self.enter_context(global_tempdir_manager())%0A%0A options,