commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
3c9d45ad67b1a1c274cc5ee7a78d174595445733
Update websocket
websocket_data_collector.py
websocket_data_collector.py
#!venv/bin/python ''' websocket_data_collector.py This script uses websockets to transmit data collected by the NeuroPy module to a remote server. ''' import NeuroPy.NeuroPy as NP import socketIO_client import json import click from threading import Lock CLIENT_ID = "CLIENT1" # declare this globally socketIO = None lock = None def on_connect(): print("connected") def on_disconnect(): print("disconnected") def on_callback_response(*args): print("On callback response: ", args) # generic callback function for neuropy # which sends the data collected over socketio def generic_callback(variable_name, variable_val): # generate the dictionary to send to the remote server # as specified in the doc return_dict = {} return_dict["client_id"] = CLIENT_ID # for now, do nothing when setting rawData if variable_name == "rawData": return return_dict["data"] = [{"type": variable_name, "value": variable_val}] lock.acquire() socketIO.emit("data", return_dict, on_callback_response) lock.release() def start_data_collection(serial_port, num_seconds=-1): headset_obj = NP.NeuroPy(serial_port, 9600, log=False) headset_obj.setCallBack("attention", generic_callback) headset_obj.setCallBack("meditation", generic_callback) headset_obj.setCallBack("rawValue", generic_callback) headset_obj.setCallBack("delta", generic_callback) headset_obj.setCallBack("theta", generic_callback) headset_obj.setCallBack("lowAlpha", generic_callback) headset_obj.setCallBack("highAlpha", generic_callback) headset_obj.setCallBack("lowBeta", generic_callback) headset_obj.setCallBack("highBeta", generic_callback) headset_obj.setCallBack("lowGamma", generic_callback) headset_obj.setCallBack("midGamma", generic_callback) headset_obj.setCallBack("poorSignal", generic_callback) headset_obj.setCallBack("blinkStrength", generic_callback) headset_obj.start() @click.command() @click.argument('host') @click.argument('port') @click.option('--serial_port', default="/dev/tty.MindWaveMobile-SerialPo", help="Serial port of bluetooth headset") @click.option('--time', default=-1, help="Number of seconds to collect data") def main(host, port, serial_port, time): lock = Lock() socketIO = socketIO_client.SocketIO(host, port) print("Got here") #socketIO.on("connect", on_connect) #socketIO.on("disconnected", on_disconnect) #start_data_collection(serial_port, time) for i in range(10): socketIO.emit("data", {"test": i}) socketIO.wait(seconds=1) if __name__ == "__main__": main()
Python
0.000001
@@ -724,118 +724,8 @@ doc%0A - return_dict = %7B%7D%0A return_dict%5B%22client_id%22%5D = CLIENT_ID%0A%0A # for now, do nothing when setting rawData%0A @@ -752,12 +752,13 @@ %22raw -Data +Value %22:%0A @@ -775,49 +775,86 @@ urn%0A +%0A -%0A return_dict%5B%22data%22%5D = %5B%7B%22type%22: +global filename%0A print(%22writing%22)%0A filename.write(%22%7B%7D %7B%7D%5Cn%22.format( vari @@ -867,17 +867,8 @@ ame, - %22value%22: var @@ -880,108 +880,9 @@ _val -%7D%5D%0A lock.acquire()%0A socketIO.emit(%22data%22, return_dict, on_callback_response)%0A lock.release( +) )%0A%0Ad @@ -1778,16 +1778,101 @@ .start() +%0A if num_seconds != -1:%0A time.sleep(num_seconds)%0A headset_obj.stop() %0A%0A@click @@ -1899,20 +1899,23 @@ gument(' -host +runfile ')%0A@clic @@ -1926,20 +1926,24 @@ gument(' -port +clientid ')%0A@clic @@ -2085,18 +2085,17 @@ default= --1 +5 , help=%22 @@ -2139,26 +2139,33 @@ ef main( -host, port +runfile, clientid , serial @@ -2186,189 +2186,87 @@ +g lo -ck = Lock()%0A socketIO = socketIO_client.SocketIO(host, port)%0A print(%22Got here%22)%0A #socketIO.on(%22connect%22, on_connect)%0A #socketIO.on(%22disconnected%22, on_disconnect +bal filename%0A filename = open(%22%7B%7D:%7B%7D%22.format(runfile,clientid), %22w%22 )%0A -# star @@ -2305,108 +2305,8 @@ ime) -%0A for i in range(10):%0A socketIO.emit(%22data%22, %7B%22test%22: i%7D)%0A socketIO.wait(seconds=1)%0A %0A%0Ai
531c81d5654783da9443a2392fe878344ff07b3c
Update feed2db.py
newsman/bin/text_based_feeds/feed2db.py
newsman/bin/text_based_feeds/feed2db.py
#!/usr/bin/env python #-*- coding: utf-8 -*- """ feed2db works to turn text-based feed list into database """ # @author chengdujin # @contact chengdujin@gmail.com # @created Jul. 30, 2013 import sys reload(sys) sys.setdefaultencoding('UTF-8') sys.path.append('../..') from config.settings import Collection from config.settings import db # CONSTANTS from config.settings import FEED_REGISTRAR FILE_PREFIX = '/home/work/newsman/newsman/bin/text_based_feeds/feed_lists/' #FILE_PREFIX = '/home/ubuntu/newsman/newsman/bin/text_based_feeds/feed_lists/' #FILE_PREFIX = '/home/jinyuan/Downloads/newsman/newsman/bin/text_based_feeds/feed_lists/' def _parse_task(line): """ read *_feeds_list.txt """ line = line.strip() if line: task = line.strip().split('*|*') # task[1] refers to categories if len(task) == 5: return task[0].strip(), task[1].strip(), task[2].strip(), task[3].strip(), task[4].strip(), None elif len(task) == 6: return task[0].strip(), task[1].strip(), task[2].strip(), task[3].strip(), task[4].strip(), task[5].strip() else: return None else: return None def _convert(language='en', country=None): """ turn text-based feed infor into database items Note. 1. categories: [(), ()] """ # read in file content feeds_list = open('%s%s_%s_feeds_list' % (FILE_PREFIX, language, country), 'r') lines = feeds_list.readlines() feeds_list.close() # open datbase db_feeds = Collection(db, FEED_REGISTRAR) for line in lines: if line.strip(): language, category, transcoder, feed_link, feed_title, labels = _parse_task(line) if feed_link: category = '%s::%s' % (country, category) # break labels if labels: labels = ['%s::%s' % (category, label.strip()) for label in labels.split(',')] print feed_title, labels existing_item = db_feeds.find_one({'link':feed_link}) if not existing_item: db_feeds.save({'language': language, 'countries':[country], 'feed_link': feed_link, 'categories': [category], 'labels':labels, 'feed_title': feed_title, 'latest_update': None, 'updated_times': 0, 'transcoder': transcoder}) else: new_item = existing_item new_item['language'] = language new_item['categories'] = list(set(new_item['categories'].extend([category]))) new_item['labels'] = list(set(new_item['labels'].extend(labels))) new_item['countries'] = list(set(new_item['countries'].append(country))) new_item['transcoder'] = transcoder new_item['feed_title'] = feed_title db_feeds.update({'_id': item['_id']}, new_item) else: continue else: continue if __name__ == "__main__": if len(sys.argv) > 1: _convert(sys.argv[1], sys.argv[2]) else: print 'Please indicate a language and country'
Python
0
@@ -391,16 +391,17 @@ GISTRAR%0A +# FILE_PRE @@ -539,33 +539,32 @@ ds/feed_lists/'%0A -# FILE_PREFIX = '/ @@ -1553,16 +1553,57 @@ GISTRAR) +%0A db_id_list = open('db_id_list', 'w') %0A%0A fo @@ -2008,29 +2008,20 @@ nt feed_ -title, labels +link %0A%0A @@ -2066,16 +2066,21 @@ d_one(%7B' +feed_ link':fe @@ -2127,16 +2127,46 @@ g_item:%0A + print '+'%0A @@ -2414,32 +2414,62 @@ else:%0A + print '*'%0A @@ -2595,35 +2595,40 @@ es'%5D = list(set( -new +existing _item%5B'categorie @@ -2698,27 +2698,32 @@ = list(set( -new +existing _item%5B'label @@ -2796,19 +2796,24 @@ ist(set( -new +existing _item%5B'c @@ -2997,16 +2997,25 @@ %7B'_id': +existing_ item%5B'_i @@ -3030,16 +3030,87 @@ w_item)%0A + db_id_list.write(str(existing_item%5B'_id'%5D) + '%5Cn')%0A @@ -3179,16 +3179,39 @@ ontinue%0A + db_id_list.close()%0A %0A%0Aif __n
0cc7fbea3952485e8274c8df1b223fc791181035
Complete migrate from django to toilets script
ona_migration_script/migrate_toilets.py
ona_migration_script/migrate_toilets.py
Python
0.000001
@@ -0,0 +1,2418 @@ +import argparse%0A%0Afrom ona import OnaApiClient%0A%0A%0Adef generate_location(lat, lon):%0A return ' '.join(%5Bstr(lat), str(lon)%5D)%0A%0ACONVERSIONS = %7B%0A 'code': 'toilet_code', 'section': 'toilet_section',%0A 'cluster': 'toilet_cluster'%7D%0AADDITIONS = %7B%0A 'toilet_location': (generate_location, %5B'lat', 'lon'%5D)%0A%7D%0ADEFAULTS = %7B%0A 'toilet_state': 'no_issue', 'toilet_issue': '', 'toilet_issue_date': ''%7D%0A%0Aparser = argparse.ArgumentParser(description='Migrate submissions')%0Aparser.add_argument(%0A 'url', type=str,%0A help='The full URL to get the JSON toilet information from')%0Aparser.add_argument(%0A 'to_id', type=str,%0A help=%22The id (number) of the form to migrate submissions to%22)%0Aparser.add_argument(%0A 'username', type=str, help='The Ona username used to log in')%0Aparser.add_argument(%0A 'password', type=str, help='The Ona password used to log in')%0Aargs = parser.parse_args()%0A%0Aclient = OnaApiClient(args.username, args.password)%0A%0A%0Adef get_toilet_info_from_django():%0A url = args.url%0A headers = %7B%0A %22Content-type%22: %22application/json; charset=utf-8%22%0A %7D%0A r = client.session.request(%0A 'GET', url, headers=headers)%0A r.raise_for_status()%0A return r.json()%0A%0A%0Adef get_fields_from_form(formid):%0A form = client.get_form_information(formid)%0A fields = %5B%5D%0A for child in form.get('children'):%0A fields.append(child.get('name'))%0A return fields%0A%0Atoilet_data = get_toilet_info_from_django()%0Ato_fields = get_fields_from_form(args.to_id)%0A%0Afor toilet in toilet_data:%0A new_toilet = toilet.copy()%0A # Add fields%0A for field, (function, arguments) in ADDITIONS.iteritems():%0A arguments = %5Btoilet%5Barg%5D for arg in arguments%5D%0A new_toilet%5Bfield%5D = function(*arguments)%0A # Migrate fields%0A for field in toilet:%0A if field in CONVERSIONS:%0A new_toilet%5BCONVERSIONS%5Bfield%5D%5D = toilet%5Bfield%5D%0A # Remove deleted fields%0A if field not in to_fields:%0A del new_toilet%5Bfield%5D%0A # Add missing fields%0A for field in to_fields:%0A if field not in new_toilet:%0A new_toilet%5Bfield%5D = DEFAULTS.get(field, None)%0A # Post submission to new form%0A form_id_string = client.get_form(args.to_id)%5B'id_string'%5D%0A try:%0A client.submission(%7B%0A %22id%22: form_id_string,%0A %22submission%22: new_toilet,%0A %7D)%0A except:%0A print %22Error sending form %25s. Submission: %22 %25 form_id_string%0A print new_toilet%0A
2fc39a00cc1e720499b33637ff91220b0107d064
Fix test
lib/freetypy/tests/test_face.py
lib/freetypy/tests/test_face.py
# -*- coding: utf-8 -*- # Copyright (c) 2015, Michael Droettboom All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # The views and conclusions contained in the software and # documentation are those of the authors and should not be interpreted # as representing official policies, either expressed or implied, of # the FreeBSD Project. from __future__ import print_function, unicode_literals, absolute_import import freetypy as ft from .util import * def _test_face(face): expected_flags = ( ft.FACE_FLAG.GLYPH_NAMES | ft.FACE_FLAG.HINTER | ft.FACE_FLAG.HORIZONTAL | ft.FACE_FLAG.KERNING | ft.FACE_FLAG.SCALABLE | ft.FACE_FLAG.SFNT) assert face.face_flags & expected_flags == expected_flags assert face.num_faces == 1 assert face.face_index == 0 try: face.num_faces = 2 except AttributeError: pass else: assert False, "Shouldn't be able to write to freetypy attributes" assert face.style_flags == 0 assert face.num_glyphs == 268 assert face.family_name == "Bitstream Vera Sans" assert isinstance(face.family_name, type("")) assert face.style_name == "Roman" assert isinstance(face.style_name, type("")) assert face.available_sizes == [] assert face.units_per_em == 2048 assert face.ascender == 1901 assert face.descender == -483 assert face.height == 2384 assert face.max_advance_width == 2748 assert face.max_advance_height == 2384 assert face.underline_position == -284 assert face.underline_thickness == 143 assert repr(face) == "<freetypy.Face 'Bitstream Vera Sans'>" def test_face(): face = ft.Face(vera_path()) _test_face(face) def test_face_from_file_like(): import io buf = io.BytesIO() with open(vera_path(), 'rb') as fd: buf.write(fd.read()) buf.seek(0) face = ft.Face(buf) _test_face(face) def test_face_set_transform(): face = ft.Face(vera_path()) face.set_transform([[2, 0], [0, 2]], [20, 20]) face.set_char_size(12, 12, 300, 300) glyph = face.load_char(65, ft.LOAD.RENDER) assert glyph.advance == (4352, 0) assert glyph.advance.x == 4352 assert glyph.advance.y == 0 x, y = glyph.advance assert x == 4352 assert y == 0 def test_kerning(): face = ft.Face(vera_path()) face.set_char_size(24, 24, 300, 300) assert face.face_flags & ft.FACE_FLAG.KERNING A = face.get_char_index(ord('A')) V = face.get_char_index(ord('V')) assert face.get_kerning(A, V, ft.KERNING.UNSCALED) == (-2.046875, 0.0) assert face.get_kerning(A, V, ft.KERNING.DEFAULT) == (-6, 0) def test_get_glyph_name(): face = ft.Face(vera_path()) names = (".notdef .null nonmarkingreturn space exclam quotedbl numbersign " "dollar percent ampersand quotesingle parenleft").split() for i, name in enumerate(names): assert face.get_glyph_name(i) == name def test_char_iter(): face = ft.Face(vera_path()) chars = list(face.get_chars()) assert len(chars) == 256 assert chars[-1] == (64258, 193)
Python
0.000004
@@ -3371,22 +3371,9 @@ 65, -ft.LOAD.RENDER +0 )%0A%0A
b2e6d1ad8c20f72a75796639b9b5036cadb45e7b
Change back to serializing an explicit sweep order for the scripter.
JSONHelpers.py
JSONHelpers.py
import json, sys from traits.api import HasTraits import instruments from Sweeps import Sweep, SweepLibrary from MeasFilters import MeasFilterLibrary from QGL.Channels import PhysicalChannel, LogicalChannel, PhysicalQuadratureChannel from types import FunctionType class LibraryEncoder(json.JSONEncoder): """ Helper for QLab to encode all the classes we use. """ def default(self, obj): #For the pulse functions in channels just return the name if isinstance(obj, FunctionType): return obj.__name__ elif isinstance(obj, HasTraits): jsonDict = obj.__getstate__() #For channels' linked AWG or generator just return the name if isinstance(obj, PhysicalChannel): awg = jsonDict.pop('AWG') jsonDict['AWG'] = awg.name source = jsonDict.pop('generator', None) if source: jsonDict['generator'] = source.name if isinstance(obj, LogicalChannel): physChan = jsonDict.pop('physChan') jsonDict['physChan'] = physChan.name if isinstance(obj, PhysicalQuadratureChannel): gateChan = jsonDict.pop('gateChan') jsonDict['gateChan'] = gateChan.name #Inject the class name for decoding jsonDict['__class__'] = obj.__class__.__name__ jsonDict['__module__'] = obj.__class__.__module__ #Strip out __traits_version__ if '__traits_version__' in jsonDict: del jsonDict['__traits_version__'] return jsonDict else: return super(LibraryEncoder, self).default(obj) class LibraryDecoder(json.JSONDecoder): def __init__(self, **kwargs): super(LibraryDecoder, self).__init__(object_hook=self.dict_to_obj, **kwargs) def dict_to_obj(self, jsonDict): if '__class__' in jsonDict: #Pop the class and module className = jsonDict.pop('__class__') moduleName = jsonDict.pop('__module__') __import__(moduleName) #Re-encode the strings as ascii (this should go away in Python 3) jsonDict = {k.encode('ascii'):v for k,v in jsonDict.items()} #For points sweeps pop the stop if moduleName == 'Sweeps': jsonDict.pop('stop', None) inst = getattr(sys.modules[moduleName], className)(**jsonDict) return inst else: return jsonDict class ChannelDecoder(json.JSONDecoder): def __init__(self, **kwargs): super(ChannelDecoder, self).__init__(object_hook=self.dict_to_obj, **kwargs) def dict_to_obj(self, jsonDict): import QGL.PulseShapes from Libraries import instrumentLib if '__class__' in jsonDict: #Pop the class and module className = jsonDict.pop('__class__') moduleName = jsonDict.pop('__module__') __import__(moduleName) #Re-encode the strings as ascii (this should go away in Python 3) jsonDict = {k.encode('ascii'):v for k,v in jsonDict.items()} #Instantiate the instruments associated with channels awg = jsonDict.pop('AWG', None) if awg: jsonDict['AWG'] = instrumentLib[awg] generator = jsonDict.pop('generator', None) if generator: jsonDict['generator'] = instrumentLib[generator] inst = getattr(sys.modules[moduleName], className)(**jsonDict) return inst else: #Re-encode the strings as ascii (this should go away in Python 3) jsonDict = {k.encode('ascii'):v for k,v in jsonDict.items()} shapeFun = jsonDict.pop('shapeFun',None) if shapeFun: jsonDict['shapeFun'] = getattr(QGL.PulseShapes, shapeFun) return jsonDict class ScripterEncoder(json.JSONEncoder): """ Helper for QLab to encode all the classes for the matlab experiment script. """ def __init__(self, CWMode=False, **kwargs): super(ScripterEncoder, self).__init__(**kwargs) self.CWMode = CWMode def default(self, obj): if isinstance(obj, HasTraits): #For the instrument library pull out enabled instruments from the dictionary if isinstance(obj, instruments.InstrumentManager.InstrumentLibrary): jsonDict = {name:instr for name,instr in obj.instrDict.items() if instr.enabled} #For the measurment library just pull-out enabled measurements from the filter dictionary elif isinstance(obj, MeasFilterLibrary): jsonDict = {name:filt for name,filt in obj.filterDict.items() if filt.enabled} #For the sweep library we return a list of sweeps in order elif isinstance(obj, SweepLibrary): return [obj.sweepDict[k] for k in obj.sweepOrder] #For the scope we nest the averager, vertical, horizontal settings elif isinstance(obj, instruments.Digitizers.AlazarATS9870): jsonDict = obj.get_scripter_dict() #For instruments we need to add the Matlab deviceDriver name elif isinstance(obj, instruments.Instrument.Instrument): jsonDict = obj.__getstate__() jsonDict['deviceName'] = obj.__class__.__name__ #If it is an AWG convert channel list into dictionary channels = jsonDict.pop('channels', None) if channels: for ct,chan in enumerate(channels): jsonDict['chan_{}'.format(ct+1)] = chan #If in CWMode, add the run method to AWGs if self.CWMode: if isinstance(obj, instruments.AWGs.AWG): jsonDict['run'] = '{}' #Inject the sweep type for sweeps elif isinstance(obj, Sweep): jsonDict = obj.__getstate__() jsonDict['type'] = obj.__class__.__name__ else: jsonDict = obj.__getstate__() #Strip out __traits_version__ jsonDict.pop('__traits_version__', None) return jsonDict else: return super(QLabEncoder, self).default(obj)
Python
0
@@ -4179,41 +4179,117 @@ %09%09%09%09 -return %5Bobj.sweepDict%5Bk%5D for k in +jsonDict = %7Bname:sweep for name,sweep in obj.sweepDict.items() if sweep.enabled%7D%0A%09%09%09%09jsonDict%5B'sweepOrder'%5D = obj @@ -4299,17 +4299,16 @@ eepOrder -%5D %0A%09%09%09#For
183154c9a20d876eeeaf6703a9b3d618ad92cb60
remove assert in favor an if/else
openstackclient/common/parseractions.py
openstackclient/common/parseractions.py
# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """argparse Custom Actions""" import argparse from openstackclient.i18n import _ class KeyValueAction(argparse.Action): """A custom action to parse arguments as key=value pairs Ensures that ``dest`` is a dict """ def __call__(self, parser, namespace, values, option_string=None): # Make sure we have an empty dict rather than None if getattr(namespace, self.dest, None) is None: setattr(namespace, self.dest, {}) # Add value if an assignment else remove it if '=' in values: getattr(namespace, self.dest, {}).update([values.split('=', 1)]) else: getattr(namespace, self.dest, {}).pop(values, None) class MultiKeyValueAction(argparse.Action): """A custom action to parse arguments as key1=value1,key2=value2 pairs Ensure that ``dest`` is a list. The list will finally contain multiple dicts, with key=value pairs in them. NOTE: The arguments string should be a comma separated key-value pairs. And comma(',') and equal('=') may not be used in the key or value. """ def __init__(self, option_strings, dest, nargs=None, required_keys=None, optional_keys=None, **kwargs): """Initialize the action object, and parse customized options Required keys and optional keys can be specified when initializing the action to enable the key validation. If none of them specified, the key validation will be skipped. :param required_keys: a list of required keys :param optional_keys: a list of optional keys """ if nargs: raise ValueError("Parameter 'nargs' is not allowed, but got %s" % nargs) super(MultiKeyValueAction, self).__init__(option_strings, dest, **kwargs) # required_keys: A list of keys that is required. None by default. if required_keys and not isinstance(required_keys, list): raise TypeError("'required_keys' must be a list") self.required_keys = set(required_keys or []) # optional_keys: A list of keys that is optional. None by default. if optional_keys and not isinstance(optional_keys, list): raise TypeError("'optional_keys' must be a list") self.optional_keys = set(optional_keys or []) def __call__(self, parser, namespace, values, metavar=None): # Make sure we have an empty list rather than None if getattr(namespace, self.dest, None) is None: setattr(namespace, self.dest, []) params = {} for kv in values.split(','): # Add value if an assignment else raise ArgumentTypeError if '=' in kv: params.update([kv.split('=', 1)]) else: msg = ("Expected key=value pairs separated by comma, " "but got: %s" % (str(kv))) raise argparse.ArgumentTypeError(msg) # Check key validation valid_keys = self.required_keys | self.optional_keys if valid_keys: invalid_keys = [k for k in params if k not in valid_keys] if invalid_keys: msg = _("Invalid keys %(invalid_keys)s specified.\n" "Valid keys are: %(valid_keys)s.") raise argparse.ArgumentTypeError( msg % {'invalid_keys': ', '.join(invalid_keys), 'valid_keys': ', '.join(valid_keys)} ) if self.required_keys: missing_keys = [k for k in self.required_keys if k not in params] if missing_keys: msg = _("Missing required keys %(missing_keys)s.\n" "Required keys are: %(required_keys)s.") raise argparse.ArgumentTypeError( msg % {'missing_keys': ', '.join(missing_keys), 'required_keys': ', '.join(self.required_keys)} ) # Update the dest dict getattr(namespace, self.dest, []).append(params) class RangeAction(argparse.Action): """A custom action to parse a single value or a range of values Parses single integer values or a range of integer values delimited by a colon and returns a tuple of integers: '4' sets ``dest`` to (4, 4) '6:9' sets ``dest`` to (6, 9) """ def __call__(self, parser, namespace, values, option_string=None): range = values.split(':') if len(range) == 0: # Nothing passed, return a zero default setattr(namespace, self.dest, (0, 0)) elif len(range) == 1: # Only a single value is present setattr(namespace, self.dest, (int(range[0]), int(range[0]))) elif len(range) == 2: # Range of two values if int(range[0]) <= int(range[1]): setattr(namespace, self.dest, (int(range[0]), int(range[1]))) else: msg = "Invalid range, %s is not less than %s" % \ (range[0], range[1]) raise argparse.ArgumentError(self, msg) else: # Too many values msg = "Invalid range, too many values" raise argparse.ArgumentError(self, msg) class NonNegativeAction(argparse.Action): """A custom action to check whether the value is non-negative or not Ensures the value is >= 0. """ def __call__(self, parser, namespace, values, option_string=None): try: assert(int(values) >= 0) setattr(namespace, self.dest, values) except Exception: msg = "%s expected a non-negative integer" % (str(option_string)) raise argparse.ArgumentTypeError(msg)
Python
0.000001
@@ -6169,32 +6169,11 @@ -try:%0A assert( +if int( @@ -6184,17 +6184,17 @@ es) %3E= 0 -) +: %0A @@ -6249,23 +6249,11 @@ e -xcept Exception +lse :%0A
d03a947723a182cbda604a1ceaa0a45595726cb7
add type of work in trello
Worklog-trello.py
Worklog-trello.py
import requests import json, csv from datetime import datetime import time, calendar from pytz import timezone import ConfigParser, os, sys import logging #logging.basicConfig(level=logging.INFO) #logger = logging.getLogger( __name__ ) logger = logging.getLogger() console = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') console.setFormatter(formatter) logger.addHandler(console) logger.setLevel(logging.INFO) config = ConfigParser.ConfigParser() config.read('trello.ini') key = config.get('TRELLO', 'key') token = config.get('TRELLO', 'token') thelist = config.get('TRELLO', 'list') startdatefield = config.get('CUSTOM', 'startdate') durationfield = config.get('CUSTOM', 'duration') tempoidfield = config.get('CUSTOM', 'tempoid') daysfield = config.get('CUSTOM', 'period') def getCardsbyMember(member): url = "https://api.trello.com/1/members/%s" % (member) querystring = {"fields":"username,fullName", "cards":"open","card_fields":"name,idList", "key":key, "token":token} headers = { 'cache-control': "no-cache", } response = requests.request("GET", url, headers=headers, params=querystring) data = json.loads(response.text) return data def checkMembers(cardid): url = "https://api.trello.com/1/cards/%s/members" % (cardid) querystring = {"key":key, "token":token} headers = { 'cache-control': "no-cache", } response = requests.request("GET", url, headers=headers, params=querystring) data = response.text logger.debug("checkMembers: " + data) j = json.loads(data) return len(j) def archiveCard(cardid): url = "https://api.trello.com/1/cards/%s/closed" % (cardid) querystring = {"value":"true", "key":key, "token":token} headers = { 'cache-control': "no-cache", } response = requests.request("PUT", url, headers=headers, params=querystring) def getTempoData(cardid): url = "https://api.trello.com/1/cards/%s/pluginData" % (cardid) querystring = {"key":key, "token":token} headers = { 'cache-control': "no-cache", } response = requests.request("GET", url, headers=headers, params=querystring) data = response.text j_data = data[1:len(data)-1] if len(j_data) > 0: return json.loads(j_data) else: return "NULL" def writeToCSV(tempoid,startdate,duration,description,membername): data = [tempoid, startdate, duration, description] f = open(membername+'-'+str(filedate)+'.csv','a') w = csv.writer(f) w.writerow(data) f.close() # http://stackoverflow.com/questions/466345/converting-string-into-datetime def getWorklogDate(period,date): # remove the timezone from the string which come from trello date=date[0:len(date)-6] struct_time = datetime.strptime(date, "%Y-%m-%dT%H:%M:%S") #struct_time = datetime.strptime("2017-02-10T12:00:00", "%Y-%m-%dT%H:%M:%S") struct_time.strftime('%w') datetime_obj_hk = timezone('Asia/Hong_Kong').localize(struct_time) n = int(period) date_arr = [] while n >= 1: date_arr.append(datetime_obj_hk.strftime("%Y-%m-%dT%H:%M:%S")) weekofday=int(datetime_obj_hk.strftime('%w'))+1 # unix time 86400 per day unix_datetime = calendar.timegm(datetime_obj_hk.utctimetuple()) # 0 = sunday if weekofday == 7: unix_datetime = unix_datetime + 172800 # 6 = Sat elif weekofday == 6: unix_datetime = unix_datetime + 259200 else: unix_datetime = unix_datetime + 86400 datetime_obj_hk = timezone('Asia/Hong_Kong').localize(datetime.fromtimestamp(unix_datetime)) n=n-1; return date_arr def getCompleteCard(cards): completedcard = [] logger.info("Found %s cards for %s" % (len(cards),membername)) for card in cards: # find the card if it save in ##Completed if card["idList"] == thelist: cardname = card["name"] # get the custom field tempo_d = getTempoData(card["id"]) # if there are TEMPO data if tempo_d != "NULL": try: d = json.loads(tempo_d["value"]) startdate = d["fields"][startdatefield] duration = int(d["fields"][durationfield])*60*60 tempoid = d["fields"][tempoidfield] try: period = d["fields"][daysfield] except KeyError: period = 1 pass date_arr = getWorklogDate(period,startdate) for d in date_arr: myDict = {"tempoid": tempoid, "startdate": d, "duration": duration, "cardname":cardname} completedcard.append(myDict) print card["id"] if checkMembers(card["id"]) == 1: archiveCard(card["id"]) else: logger.info("can't archive %s" % (cardname)) logger.info("added %s into csv" % (cardname)) except KeyError: logger.error("Skip %s because error" % (cardname)) logger.debug(sys.exc_info()) pass except: logger.error("Unexpected error: " + str(sys.exc_info()[0]) + " " + cardname) logger.debug(sys.exc_info()) else: logger.info("No TEMPO data in %s" % (cardname)) logger.info("%s have %s cards in completed list" % (membername,len(completedcard))) return completedcard filedate = datetime.today().strftime("%Y-%m-%d") member = sys.argv[1] if len(sys.argv) > 1 else "539010c187655c291d15ef13" membername = sys.argv[2] if len(sys.argv) > 1 else "andywong" d=getCardsbyMember(member) cards=getCompleteCard(cards=d["cards"]) for card in cards: writeToCSV(card["tempoid"],card["startdate"],card["duration"],card["cardname"],membername)
Python
0.000001
@@ -823,16 +823,69 @@ period') +%0Atypeofworkfield = config.get('CUSTOM', 'typeofwork') %0A%0Adef ge
99783e21eb63c7d44d1bb1d993b940825a0cdc66
Improve Visual proximity of comment.
account_hmrc_esl_declaration/wizard/account_vat_esl.py
account_hmrc_esl_declaration/wizard/account_vat_esl.py
# -*- coding: utf-8 -*- ############################################################################## # # ECSL Export for HMRC # Copyright (C) 2015 OpusVL (<http://opusvl.com/>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import csv import re from operator import methodcaller try: from cStringIO import StringIO except ImportError: from StringIO import StringIO from openerp import models, fields, api from ..maybe import odoo_maybe _INDICATOR_MAP = { # Mapping from transaction_indicator_type to the code used in the CSV file 'b2b_goods': '0', 'triangular': '2', 'b2b_services': '3', } class AccountVatESLWizard(models.TransientModel): # Based on odoo/addons/account/wizard/account_vat.py from upstream Odoo. # Code used and modified under AGPL v3. _name = 'account.vat.esl' _description = 'EC Sales Declaration' _inherit = 'account.common.report' period_from = fields.Many2one(string='Period', required=True) # We only care about one period based_on = fields.Selection( # Looking at how account.vat.declaration uses this, I think this field may be completely # redundant. It's in the model, but hidden to the user in the UI. So perhaps only # 'invoices' makes sense here. default='invoices', required=True, readonly=True, selection=[ ('invoices', 'Invoices'), ('payments', 'Payments'), ], string='Based on', ) chart_tax_id = fields.Many2one( comodel_name='account.tax.code', string='Chart of Tax', required=True, default=methodcaller('_default_chart_of_taxes'), ) def _default_chart_of_taxes(self): taxes = self.env['account.tax.code'].search( [ ('company_id', '=', self.env.user.company_id.id), ('name', '=ilike', '%Total value of EC sales, ex VAT%'), ], limit=1, ) return taxes and taxes.id or False @api.multi def create_esl(self): """This should be triggered by the form. """ self.ensure_one() return self.env['report'].get_action(self, 'account_hmrc_esl_declaration.esl_csv') def declaration_year(self): """Return year of declaration in YYYY format.""" # NOTE This assumes period name is in MM/YYYY format return self.period_from.name.split('/')[1] def declaration_month(self): """Return month of declaration in MM format.""" # NOTE This assumes period name is in MM/YYYY format return self.period_from.name.split('/')[0] @api.multi def esl_csv_records(self): """Return the CSV records in HMRC-compatible format as a list of rows. """ self.ensure_one() company = self.chart_tax_id.company_id title_record = ['HMRC_CAT_ESL_BULK_SUBMISSION_FILE'] header_record = [ odoo_maybe(company.vat, strip_leading_letters), company.subsidiary_identifier, self.declaration_year(), self.declaration_month(), 'GBP', company.name[:35], # NOTE truncating might not be sufficient '0', # "the indicator field (this will always be '0')" ] return [ title_record, header_record ] + self._detail_records() @api.multi def _detail_records(self): self.ensure_one() return [ # TODO ] @api.multi def esl_csv_data(self): """Return the CSV data as a string. """ data = StringIO() csv.writer(data).writerows(self.esl_csv_records()) return data.getvalue() def strip_leading_letters(instr): """Strip the leading letters off a string. >>> strip_leading_letters('GB12345678') '12345678' """ return re.sub(r'^[A-Z]+', r'', instr, count=1) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Python
0
@@ -3905,27 +3905,16 @@ '0', - # %22t
8ef9e3254fa3ad7b63a48db0b5b3b57c1bcc6c57
Initialize the amount variable
addons/account/wizard/wizard_statement_from_invoice.py
addons/account/wizard/wizard_statement_from_invoice.py
############################################################################## # # Copyright (c) 2008 Camptocamp SA All Rights Reserved. (JGG) # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsability of assessing all potential # consequences resulting from its eventual inadequacies and bugs # End users who are looking for a ready-to-use solution with commercial # garantees and support are strongly adviced to contract a Free Software # Service Company # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # ############################################################################## import wizard import pooler from tools.misc import UpdateableStr import time FORM = UpdateableStr() FIELDS = { 'lines': {'string': 'Invoices', 'type': 'many2many', 'relation': 'account.move.line'}, } START_FIELD = { 'date': {'string': 'Date payment', 'type': 'date','required':True, 'default': lambda *a: time.strftime('%Y-%m-%d')}, 'journal_id': {'string': 'Journal', 'type': 'many2many', 'relation': 'account.journal', 'domain': '[("type","in",["sale","purchase","cash"])]', 'help': 'This field allow you to choose the accounting journals you want for filtering the invoices. If you left this field empty, it will search on all sale, purchase and cash journals.'}, } START_FORM = '''<?xml version="1.0"?> <form string="Import invoices in statement"> <label string="Choose Journal and Payment Date" colspan="4"/> <field name="date"/> <field name="journal_id" colspan="4"/> </form>''' def _search_invoices(obj, cr, uid, data, context): pool = pooler.get_pool(cr.dbname) line_obj = pool.get('account.move.line') statement_obj = pool.get('account.bank.statement') journal_obj = pool.get('account.journal') statement = statement_obj.browse(cr, uid, data['id'], context=context) journal_ids = data['form']['journal_id'][0][2] if journal_ids == []: journal_ids = journal_obj.search(cr, uid, [('type', 'in', ('sale','cash','purchase'))], context=context) line_ids = line_obj.search(cr, uid, [ ('reconcile_id', '=', False), ('journal_id', 'in', journal_ids), ('account_id.reconcile', '=', True)], #order='date DESC, id DESC', #doesn't work context=context) FORM.string = '''<?xml version="1.0"?> <form string="Import entries from customer invoice"> <field name="lines" colspan="4" height="300" width="800" nolabel="1" domain="[('id', 'in', [%s])]"/> </form>''' % (','.join([str(x) for x in line_ids])) return {} def _populate_statement(obj, cursor, user, data, context): line_ids = data['form']['lines'][0][2] line_date=data['form']['date'] if not line_ids: return {} pool = pooler.get_pool(cursor.dbname) line_obj = pool.get('account.move.line') statement_obj = pool.get('account.bank.statement') statement_line_obj = pool.get('account.bank.statement.line') currency_obj = pool.get('res.currency') statement_reconcile_obj = pool.get('account.bank.statement.reconcile') statement = statement_obj.browse(cursor, user, data['id'], context=context) # for each selected move lines for line in line_obj.browse(cursor, user, line_ids, context=context): ctx = context.copy() # take the date for computation of currency => use payment date # if line.date_maturity: # ctx['date'] = line.date_maturity # else: ctx['date'] = line_date if line.amount_currency: amount = currency_obj.compute(cursor, user, line.currency_id.id, statement.currency.id, line.amount_currency, context=ctx) else: if line.debit > 0: amount=line.debit elif line.credit > 0: amount=-line.credit reconcile_id = statement_reconcile_obj.create(cursor, user, { 'line_ids': [(6, 0, [line.id])] }, context=context) if line.journal_id.type == 'sale': type = 'customer' elif line.journal_id.type == 'purchase': type = 'supplier' else: type = 'general' statement_line_obj.create(cursor, user, { 'name': line.name or '?', 'amount': amount, 'type': type, 'partner_id': line.partner_id.id, 'account_id': line.account_id.id, 'statement_id': statement.id, 'ref': line.ref, 'reconcile_id': reconcile_id, 'date':line_date, #time.strftime('%Y-%m-%d'), #line.date_maturity or, }, context=context) return {} class PopulateStatementFromInv(wizard.interface): """ Populate the current statement with selected invoices """ states = { 'init': { 'actions': [], 'result': { 'type': 'form', 'arch': START_FORM, 'fields':START_FIELD, 'state': [ ('end', '_Cancel'), ('go', '_Go', '', True), ] }, }, 'go': { 'actions': [_search_invoices], 'result': { 'type': 'form', 'arch': FORM, 'fields': FIELDS, 'state': [ ('end', '_Cancel','', True), ('finish', 'O_k','', True) ] }, }, 'finish': { 'actions': [], 'result': { 'type': 'action', 'action': _populate_statement, 'state': 'end' }, }, } PopulateStatementFromInv('populate_statement_from_inv')
Python
0.000027
@@ -4182,24 +4182,45 @@ = line_date%0A + amount = 0.0%0A if l
c09e02f83369bf0290df2260f4409787ed8519cf
Use a plain TestCase for the two suites that alter INSTALLED_APPS
analytical/tests/test_tag_chartbeat.py
analytical/tests/test_tag_chartbeat.py
""" Tests for the Chartbeat template tags and filters. """ import re from django.conf import settings from django.contrib.sites.models import Site from django.http import HttpRequest from django.template import Context from analytical.templatetags.chartbeat import ChartbeatTopNode, \ ChartbeatBottomNode from analytical.tests.utils import TagTestCase, override_settings from analytical.utils import AnalyticalException @override_settings(INSTALLED_APPS=[a for a in settings.INSTALLED_APPS if a != 'django.contrib.sites'], CHARTBEAT_USER_ID="12345") class ChartbeatTagTestCaseNoSites(TagTestCase): def test_rendering_setup_no_site(self): r = ChartbeatBottomNode().render(Context()) self.assertTrue('var _sf_async_config={"uid": "12345"};' in r, r) @override_settings(INSTALLED_APPS=settings.INSTALLED_APPS + ["django.contrib.sites"], CHARTBEAT_USER_ID="12345") class ChartbeatTagTestCaseWithSites(TagTestCase): def test_rendering_setup_site(self): from django.core.management import call_command from django.db.models import loading loading.cache.loaded = False call_command("syncdb", verbosity=0) site = Site.objects.create(domain="test.com", name="test") with override_settings(SITE_ID=site.id): r = ChartbeatBottomNode().render(Context()) self.assertTrue(re.search( 'var _sf_async_config={.*"uid": "12345".*};', r), r) self.assertTrue(re.search( 'var _sf_async_config={.*"domain": "test.com".*};', r), r) @override_settings(CHARTBEAT_AUTO_DOMAIN=False) def test_auto_domain_false(self): """ Even if 'django.contrib.sites' is in INSTALLED_APPS, if CHARTBEAT_AUTO_DOMAIN is False, ensure there is no 'domain' in _sf_async_config. """ r = ChartbeatBottomNode().render(Context()) self.assertTrue('var _sf_async_config={"uid": "12345"};' in r, r) class ChartbeatTagTestCase(TagTestCase): """ Tests for the ``chartbeat`` template tag. """ def setUp(self): super(ChartbeatTagTestCase, self).setUp() self.settings_manager.set(CHARTBEAT_USER_ID='12345') def test_top_tag(self): r = self.render_tag('chartbeat', 'chartbeat_top', {'chartbeat_domain': "test.com"}) self.assertTrue('var _sf_startpt=(new Date()).getTime()' in r, r) def test_bottom_tag(self): r = self.render_tag('chartbeat', 'chartbeat_bottom', {'chartbeat_domain': "test.com"}) self.assertTrue(re.search( 'var _sf_async_config={.*"uid": "12345".*};', r), r) self.assertTrue(re.search( 'var _sf_async_config={.*"domain": "test.com".*};', r), r) def test_top_node(self): r = ChartbeatTopNode().render( Context({'chartbeat_domain': "test.com"})) self.assertTrue('var _sf_startpt=(new Date()).getTime()' in r, r) def test_bottom_node(self): r = ChartbeatBottomNode().render( Context({'chartbeat_domain': "test.com"})) self.assertTrue(re.search( 'var _sf_async_config={.*"uid": "12345".*};', r), r) self.assertTrue(re.search( 'var _sf_async_config={.*"domain": "test.com".*};', r), r) def test_no_user_id(self): self.settings_manager.delete('CHARTBEAT_USER_ID') self.assertRaises(AnalyticalException, ChartbeatBottomNode) def test_wrong_user_id(self): self.settings_manager.set(CHARTBEAT_USER_ID='123abc') self.assertRaises(AnalyticalException, ChartbeatBottomNode) def test_render_internal_ip(self): self.settings_manager.set(ANALYTICAL_INTERNAL_IPS=['1.1.1.1']) req = HttpRequest() req.META['REMOTE_ADDR'] = '1.1.1.1' context = Context({'request': req}) r = ChartbeatBottomNode().render(context) self.assertTrue(r.startswith( '<!-- Chartbeat disabled on internal IP address'), r) self.assertTrue(r.endswith('-->'), r)
Python
0
@@ -213,16 +213,49 @@ Context +%0Afrom django.test import TestCase %0A%0Afrom a @@ -637,27 +637,24 @@ CaseNoSites( -Tag TestCase):%0A @@ -991,19 +991,16 @@ thSites( -Tag TestCase @@ -1014,33 +1014,13 @@ def -test_rendering_setup_site +setUp (sel @@ -1206,16 +1206,57 @@ ity=0)%0A%0A + def test_rendering_setup_site(self):%0A
a7827ecf5e480c228c881180e63633712e3dbc3c
Modify ARDUINO_SEARCH_PATHS to include default ubuntu package location
site_scons/find_avrdude.py
site_scons/find_avrdude.py
import sys import os from itertools import chain from path import path home_dir = path('~').expand() ARDUINO_SEARCH_PATHS = [home_dir, ] if os.name == 'nt': from win32com.shell import shell, shellcon mydocs = shell.SHGetFolderPath(0, shellcon.CSIDL_PERSONAL, 0, 0) AVRDUDE_NAME = 'avrdude.exe' ARDUINO_SEARCH_PATHS += [path(mydocs), path('%SYSTEMDRIVE%/').expand(), path('%PROGRAMFILES%').expand(), ] else: AVRDUDE_NAME = 'avrdude' ARDUINO_SEARCH_PATHS += [home_dir / path('local/opt'), ] def get_arduino_paths(): fs = [] for p in ARDUINO_SEARCH_PATHS: fs += get_avrdude_list(p) if not fs: print >> sys.stderr, '''\ ERROR: arduino install directory not found! Searched: %s''' % '\n '.join(ARDUINO_SEARCH_PATHS) sys.exit(1) fs.sort(key=lambda x: -x.ctime) avrdude = fs[0] p = avrdude.parent while p and p.name != 'hardware': p = p.parent if not p: print >> sys.stderr, '''Arduino install path not found.''' sys.exit(1) arduino_path = p.parent avrdude_conf = list(arduino_path.walkfiles('avrdude.conf')) if not avrdude_conf: print >> sys.stderr, '''avrdude configuration (avrdude.conf) path not found.''' sys.exit(1) else: avrdude_conf = avrdude_conf[0] return arduino_path, avrdude, avrdude_conf def get_avrdude_list(p): return list(set(chain(*[d.walkfiles(AVRDUDE_NAME) for d in p.dirs('arduino*')]))) if __name__ == '__main__': arduino_path, avrdude, avrdude_conf = get_arduino_paths() print 'found arduino path:', arduino_path print 'using newest avrdude:', avrdude print 'using avrdude config:', avrdude_conf
Python
0
@@ -537,38 +537,27 @@ += %5B -home_dir / path('local/opt'), +path(%22/usr/share/%22) %5D%0A%0A%0A
03c237551aa08cb70fd397cc348e75531cdabd0e
fix schemas for password views
src/eduid_webapp/security/schemas.py
src/eduid_webapp/security/schemas.py
# -*- coding: utf-8 -*- # # Copyright (c) 2016 NORDUnet A/S # All rights reserved. # # Redistribution and use in source and binary forms, with or # without modification, are permitted provided that the following # conditions are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # 3. Neither the name of the NORDUnet nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # from marshmallow import fields from eduid_common.api.schemas.base import EduidSchema class CredentialSchema(EduidSchema): credential_type = fields.String(required=True) created_ts = fields.String(required=True) success_ts = fields.String(required=True) class CredentialList(EduidSchema): credentials = fields.Nested(CredentialSchema, many=True) csrf_token = fields.String(required=True) class SecurityResponseSchema(EduidSchema): payload = fields.Nested(CredentialList, only=('credentials', 'csrf_token')) class CsrfSchema(EduidSchema): csrf_token = fields.String(required=True) class SecurityPasswordSchema(EduidSchema): password = fields.String(required=True) new_password = fields.String(required=True) repeat_password = fields.String(required=True) csrf_token = fields.String(required=True)
Python
0.000002
@@ -1681,16 +1681,36 @@ e import + FluxStandardAction, EduidSc @@ -2067,35 +2067,42 @@ ponseSchema( -EduidSchema +FluxStandardAction ):%0A paylo @@ -2172,16 +2172,71 @@ oken'))%0A + csrf_token = fields.String(attribute='csrf_token')%0A %0A%0Aclass
daa92b15852b3572d7ef03392b061184dbbc76c1
fix to use the right cert in AuthServer provisioning
Code/scripts/provisionAuthServer.py
Code/scripts/provisionAuthServer.py
#!/usr/bin/env python from __future__ import print_function from subprocess import check_call import os import argparse parser = argparse.ArgumentParser(description="Generates the keys for authServer.exe") parser.add_argument("--scriptPath", required="true", help="The path to the directory that contains the scripts used by provisionAuthServer.py") args = parser.parse_args() check_call([os.path.join(args.scriptPath, "createPrincipal.py"), "5", "AuthServer"]) check_call(["./cryptUtility.exe", "-EncapsulateMessage", "AuthServerPublicKey.xml", "authServer/signingKeyMetaData", "AuthServerPrivateKey.xml", "authServer/signingKey"]) check_call(["cp", "AuthServerPublicKey.xml", "authServer/signingCert"])
Python
0
@@ -513,25 +513,25 @@ eMessage%22, %22 -A +a uthServerPub @@ -523,37 +523,29 @@ %22authServer -PublicKey.xml +/cert %22, %22authServ
93c34ad24f4dc675f1f8d212a6b1a7e53daf381b
change pinout values
RP/static_funcs.py
RP/static_funcs.py
#------------------------------------------------------------------------------- # Name: Static funcs # Purpose: # # Author: I070890 # # Created: 18/01/2015 # Copyright: (c) I070890 2015 # Licence: <your licence> #------------------------------------------------------------------------------- import time, os, subprocess, multiprocessing import RPi.GPIO as GPIO # A couple of variables # --------------------- debug = False # debug mode for console output #debug = True # debug mode for console output first_iter = True def init_func(): # Wait for 2 seconds to allow the ultrasonics to settle (probably not needed) # --------------------------------------------------------------------------- print "Waiting for 2 seconds....." time.sleep(2) # Go # -- print "Running....\nStart Beep process...." GPIO.setmode(GPIO.BCM) def distance(GPIO_ECHO,GPIO_TRIG): debug_print ("GPIO_TRIG = " + str(GPIO_TRIG) + ",GPIO_ECHO = " + str(GPIO_ECHO)) # Set GPIO Channels # ----------------- GPIO.setup(GPIO_ECHO, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) GPIO.setup(GPIO_TRIG, GPIO.OUT) GPIO.output(GPIO_TRIG, False) # A couple of variables # --------------------- EXIT = 0 # Infinite loop decpulsetrigger = 0.0001 # Trigger duration inttimeout = 1000 # Number of loop iterations before timeout called min_dist = 100 # Never ending loop # ----------------- while EXIT < 10: # Trigger high for 0.0001s then low GPIO.output(GPIO_TRIG, True) time.sleep(decpulsetrigger) GPIO.output(GPIO_TRIG, False) # Wait for echo to go high (or timeout) i_countdown = inttimeout while (GPIO.input(GPIO_ECHO) == 0 and i_countdown > 0): i_countdown -= 1 # If echo is high than the i_countdown not zero if i_countdown > 0: # Start timer and init timeout countdown echostart = time.time() i_countdown = inttimeout # Wait for echo to go low (or timeout) while (GPIO.input(GPIO_ECHO) == 1 and i_countdown > 0): i_countdown -= 1 # Stop timer echoend = time.time() # Echo duration echoduration = echoend - echostart # Display distance if i_countdown > 0: i_distance = (echoduration*1000000)/58 debug_print("Distance = " + str(i_distance) + "cm") min_dist = min(min_dist,i_distance) else: debug_print("Distance - timeout") # Wait at least .01s before re trig (or in this case .1s) time.sleep(.1) EXIT +=1 return min_dist def debug_print(line,must_print = False): if debug or must_print: print line def beep_func(printOutput = True, GPIO_ECHO_INPUT = None ): while True: if GPIO_ECHO_INPUT != None: GPIO_ECHO_BEEP = [0] GPIO_TRIG_BEEP = [1] else: GPIO_ECHO_BEEP = 22 GPIO_TRIG_BEEP = 27 calc_dist = distance(GPIO_ECHO_BEEP,GPIO_TRIG_BEEP) print "BEEP dist is: " + str(calc_dist) if calc_dist < 60: cmd = "(speaker-test -t sine -f " + str(75*calc_dist) + " -l 1 -p 1024 -P 4 > /dev/null)& pid=$!; sleep 0.25s; kill -9 $pid" print cmd os.system(cmd) time.sleep(0.1)
Python
0.000002
@@ -3118,17 +3118,17 @@ BEEP = 2 -2 +6 %0A @@ -3440,17 +3440,24 @@ +debug_ print - +( cmd +) %0A
fe4c62acd52a4060eebf4284c15c465970ea8932
remove duplicate enum key (#7173)
sdk/cognitiveservices/azure-cognitiveservices-language-luis/azure/cognitiveservices/language/luis/authoring/models/_luis_authoring_client_enums.py
sdk/cognitiveservices/azure-cognitiveservices-language-luis/azure/cognitiveservices/language/luis/authoring/models/_luis_authoring_client_enums.py
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from enum import Enum class TrainingStatus(str, Enum): needs_training = "NeedsTraining" in_progress = "InProgress" trained = "Trained" class OperationStatusType(str, Enum): failed = "Failed" failed = "FAILED" success = "Success"
Python
0.000003
@@ -684,30 +684,8 @@ ed%22%0A - failed = %22FAILED%22%0A
b56882c5d3a145b571deb10f901f35c071c5c34d
Update release.py to blacklist thumbs.db and .ds_store as opposed to whitelisting known file types.
Release/release.py
Release/release.py
VERSION = '0.2.0' import shutil import os import io import sys def copyDirectory(source, target): source = source.replace('/', os.sep) target = target.replace('/', os.sep) os.makedirs(target) for file in os.listdir(source): fullpath = os.path.join(source, file) fulltargetpath = os.path.join(target, file) if os.path.isdir(fullpath): copyDirectory(fullpath, fulltargetpath) elif file.endswith('.txt') or file.endswith('.cry'): # The intent of this is to avoid os generated files like thumbs.db # Tweak if new file types are added. shutil.copyfile(fullpath, fulltargetpath) def readFile(path): c = open(path.replace('/', os.sep), 'rt') text = c.read() c.close() return text def writeFile(path, content, lineEnding): content = content.replace("\r\n", "\n").replace("\r", "\n").replace("\n", lineEnding) ucontent = unicode(content, 'utf-8') with io.open(path.replace('/', os.sep), 'w', newline=lineEnding) as f: f.write(ucontent) def runCommand(cmd): c = os.popen(cmd) output = c.read() c.close() return output def main(args): librariesForRelease = [ 'Audio', 'Core', 'Easing', 'FileIO', 'FileIOCommon', 'Game', 'Gamepad', 'Graphics2D', 'GraphicsText', 'Http', 'ImageEncoder', 'ImageResources', 'ImageWebResources', 'Json', 'Math', 'Random', 'Resources', 'UserData', 'Web', 'Xml', ] if len(args) != 1: print("usage: python release.py windows|mono") return platform = args[0] if not platform in ('windows', 'mono'): print ("Invalid platform: " + platform) return copyToDir = 'crayon-' + VERSION + '-' + platform if os.path.exists(copyToDir): shutil.rmtree(copyToDir) os.makedirs(copyToDir) if platform == 'mono': print runCommand('xbuild /p:Configuration=Release ../Compiler/CrayonOSX.sln') else: print runCommand(' '.join([ r'C:\Windows\Microsoft.NET\Framework\v4.0.30319\MSBuild.exe', '/p:Configuration=Release', r'..\Compiler\CrayonWindows.sln' ])) shutil.copyfile('../Compiler/bin/Release/Crayon.exe', copyToDir + '/crayon.exe') shutil.copyfile('../Compiler/bin/Release/Interpreter.dll', copyToDir + '/Interpreter.dll') shutil.copyfile('../Compiler/bin/Release/Resources.dll', copyToDir + '/Resources.dll') shutil.copyfile('../Compiler/bin/Release/LICENSE.txt', copyToDir + '/LICENSE.txt') shutil.copyfile('../README.md', copyToDir + '/README.md') if platform == 'windows': setupFile = readFile("setup-windows.txt") writeFile(copyToDir + '/Setup Instructions.txt', setupFile, '\r\n') if platform == 'mono': setupFile = readFile("setup-mono.md") writeFile(copyToDir + '/Setup Instructions.txt', setupFile, '\n') for lib in librariesForRelease: sourcePath = '../Libraries/' + lib targetPath = copyToDir + '/libs/' + lib copyDirectory(sourcePath, targetPath) print("Release directory created: " + copyToDir) main(sys.argv[1:])
Python
0
@@ -414,112 +414,34 @@ ile. -endswith('.txt') or file.endswith('.cry'):%0D%0A%09%09%09# The intent of this is to avoid os generated files like +lower() in ('.ds_store', ' thum @@ -449,49 +449,29 @@ s.db +'): %0D%0A%09%09%09 -# Tweak if new file types are added. +pass%0D%0A%09%09else: %0D%0A%09%09
ba7fbebe4285de482028a5b88cc939b910bbcc6c
Remove some duplicated code
pombola/core/management/commands/core_find_stale_elasticsearch_documents.py
pombola/core/management/commands/core_find_stale_elasticsearch_documents.py
import sys from django.core.management.base import BaseCommand from haystack import connections as haystack_connections from haystack.exceptions import NotHandled from haystack.query import SearchQuerySet from haystack.utils.app_loading import ( haystack_get_models, haystack_load_apps ) def get_all_indexed_models(): backends = haystack_connections.connections_info.keys() available_models = {} for backend_key in backends: unified_index = haystack_connections[backend_key].get_unified_index() for app in haystack_load_apps(): for model in haystack_get_models(app): try: unified_index.get_index(model) except NotHandled: continue model_name = model.__module__ + '.' + model.__name__ available_models[model_name] = { 'backend_key': backend_key, 'app': app, 'model': model, } return available_models def get_models_to_check(model_names, available_models): models_to_check = [] if model_names: missing_models = False for model_name in model_names: if model_name in available_models: models_to_check.append(model_name) else: missing_models = True print "There was no model {0} with a search index".format(model_name) if missing_models: print "Some models were not found; they must be one of:" for model in sorted(available_models.keys()): print " ", model sys.exit(1) else: models_to_check = sorted(available_models.keys()) return models_to_check class Command(BaseCommand): args = 'MODEL ...' help = 'Get all search results for the given models' def handle(self, *args, **options): available_models = get_all_indexed_models() models_to_check = get_models_to_check(args, available_models) # Now we know which models to check, do that: for model_name in models_to_check: model_details = available_models[model_name] backend_key = model_details['backend_key'] model = model_details['model'] backend = haystack_connections[backend_key].get_backend() unified_index = haystack_connections[backend_key].get_unified_index() index = unified_index.get_index(model) qs = index.build_queryset() print "Checking {0} ({1} objects in the database)".format( model_name, qs.count() ) # Get all the primary keys from the database: pks_in_database = set( unicode(pk) for pk in qs.values_list('pk', flat=True) ) # Then go through every search result for that # model, and check that the primary key is one # that's in the database: for search_result in SearchQuerySet(using=backend.connection_alias).models(model): if search_result.pk not in pks_in_database: print " stale search entry for primary key", search_result.pk
Python
0.003916
@@ -441,16 +441,114 @@ ckends:%0A + connection = haystack_connections%5Bbackend_key%5D%0A backend = connection.get_backend()%0A @@ -617,16 +617,16 @@ index()%0A - @@ -741,32 +741,40 @@ + index = unified_index.g @@ -1019,16 +1019,56 @@ nd_key,%0A + 'backend': backend,%0A @@ -1127,16 +1127,52 @@ model,%0A + 'index': index,%0A @@ -2399,322 +2399,35 @@ -backend_key = model_details%5B'backend_key'%5D%0A model = model_details%5B'model'%5D%0A%0A backend = haystack_connections%5Bbackend_key%5D.get_backend()%0A unified_index = haystack_connections%5Bbackend_key%5D.get_unified_index()%0A%0A index = unified_index.get_index(model)%0A%0A qs = index +qs = model_details%5B'index'%5D .bui @@ -2949,21 +2949,59 @@ Set( -using= +%0A using=model_details%5B' backend +'%5D .con @@ -3013,16 +3013,29 @@ on_alias +%0A ).models @@ -3040,16 +3040,33 @@ ls(model +_details%5B'model'%5D ):%0A
7a72aa3ae46cf945ee47ecb3a84c482f2deddb7a
Add possibility of not remembering past - useful for when not using BPTT.
pybrain/structure/networks/recurrent.py
pybrain/structure/networks/recurrent.py
# -*- coding: utf-8 -*- """Module that contains the RecurrentNetwork class.""" __author__ = 'Justin Bayer, bayer.justin@googlemail.com' from pybrain.structure.networks.network import Network from pybrain.structure.connections.shared import SharedConnection class RecurrentNetworkComponent(object): sequential = True def __init__(self, name=None, *args, **kwargs): self.recurrentConns = [] self.maxoffset = 0 def __str__(self): s = super(RecurrentNetworkComponent, self).__str__() s += " Recurrent Connections:\n %s" % ( sorted(self.recurrentConns, key=lambda c: c.name)) return s def _containerIterator(self): for c in super(RecurrentNetworkComponent, self)._containerIterator(): yield c for c in self.recurrentConns: if c.paramdim and not isinstance(c, SharedConnection): yield c def addRecurrentConnection(self, c): """Add a connection to the network and mark it as a recurrent one.""" if isinstance(c, SharedConnection): if c.mother not in self.motherconnections: self.motherconnections.append(c.mother) c.mother.owner = self elif c.paramdim > 0: c.owner = self self.recurrentConns.append(c) self.sorted = False def activate(self, inpt): """Do one transformation of an input and return the result.""" self.inputbuffer[self.offset] = inpt self.forward() return self.outputbuffer[self.offset - 1].copy() def backActivate(self, outerr): """Do one transformation of an output error outerr backward and return the error on the input.""" self.outputerror[self.offset - 1] = outerr self.backward() return self.inputerror[self.offset].copy() def forward(self): """Produce the output from the input.""" if not (self.offset + 1 < self.inputbuffer.shape[0]): self._growBuffers() super(RecurrentNetworkComponent, self).forward() self.offset += 1 self.maxoffset = max(self.offset, self.maxoffset) def backward(self): """Produce the input error from the output error.""" self.offset -= 1 super(RecurrentNetworkComponent, self).backward() def _isLastTimestep(self): return self.offset == self.maxoffset def _forwardImplementation(self, inbuf, outbuf): assert self.sorted, ".sortModules() has not been called" index = 0 offset = self.offset for m in self.inmodules: m.inputbuffer[offset] = inbuf[index:index + m.indim] index += m.indim if offset > 0: for c in self.recurrentConns: c.forward(offset - 1, offset) for m in self.modulesSorted: m.forward() for c in self.connections[m]: c.forward(offset, offset) index = 0 for m in self.outmodules: outbuf[index:index + m.outdim] = m.outputbuffer[offset] index += m.outdim def _backwardImplementation(self, outerr, inerr, outbuf, inbuf): assert self.sorted, ".sortModules() has not been called" index = 0 offset = self.offset for m in self.outmodules: m.outputerror[offset] = outerr[index:index + m.outdim] index += m.outdim if not self._isLastTimestep(): for c in self.recurrentConns: c.backward(offset, offset + 1) for m in reversed(self.modulesSorted): for c in self.connections[m]: c.backward(offset, offset) m.offset = offset m.backward() index = 0 for m in self.inmodules: inerr[index:index + m.indim] = m.inputerror[offset] index += m.indim def sortModules(self): self.recurrentConns.sort(key=lambda x: x.name) super(RecurrentNetworkComponent, self).sortModules() class RecurrentNetwork(RecurrentNetworkComponent, Network): """Class that implements networks which can work with sequential data. Until .reset() is called, the network keeps track of all previous inputs and thus allows the use of recurrent connections and layers that look back in time.""" bufferlist = Network.bufferlist def __init__(self, *args, **kwargs): Network.__init__(self, *args, **kwargs) RecurrentNetworkComponent.__init__(self, *args, **kwargs)
Python
0
@@ -345,16 +345,24 @@ __(self, + forget, name=No @@ -442,16 +442,109 @@ fset = 0 +%0A if forget:%0A self.increment = 0%0A else:%0A self.increment = 1 %0A%0A de @@ -1672,17 +1672,30 @@ ffset - -1 +self.increment %5D.copy() @@ -1885,17 +1885,30 @@ ffset - -1 +self.increment %5D = oute @@ -2088,17 +2088,30 @@ ffset + -1 +self.increment %3C self. @@ -2245,17 +2245,30 @@ fset += -1 +self.increment %0A @@ -2427,17 +2427,30 @@ fset -= -1 +self.increment %0A @@ -2972,17 +2972,30 @@ ffset - -1 +self.increment , offset @@ -3732,17 +3732,30 @@ ffset + -1 +self.increment )%0A%0A @@ -4530,16 +4530,46 @@ time +, unless forget is set to True .%22%22%22%0A%0A @@ -4617,32 +4617,46 @@ f __init__(self, + forget=False, *args, **kwargs @@ -4746,32 +4746,40 @@ t.__init__(self, + forget, *args, **kwargs
a64cb6ecb6a0d563a10db2a32614553a08ca7fed
add first hover info for bokeh plotter
xyzpy/plot/plotter_bokeh.py
xyzpy/plot/plotter_bokeh.py
import functools from ..manage import auto_xyz_ds from .core import _prepare_data_and_styles, _process_plot_range @functools.lru_cache(1) def _init_bokeh_nb(): """Cache this so it doesn't happen over and over again. """ from bokeh.plotting import output_notebook output_notebook() def bshow(figs, nb=True, **kwargs): from bokeh.plotting import show if nb: _init_bokeh_nb() show(figs) else: show(figs) def blineplot(ds, y_coo, x_coo, z_coo=None, # Figure options figsize=(6, 5), # absolute figure size title=None, # Line coloring options colors=None, colormap="xyz", colormap_log=False, colormap_reverse=False, # Legend options legend=None, legend_loc=0, # legend location ztitle=None, # legend title zlabels=None, # legend labels legend_ncol=1, # number of columns in the legend legend_bbox=None, # Where to anchor the legend to # x-axis options xtitle=None, xtitle_pad=10, # distance between label and axes line xlims=None, # plotting range on x axis xticks=None, # where to place x ticks xticklabels_hide=False, # hide labels but not actual ticks xlog=False, # logarithmic x scale # y-axis options ytitle=None, ytitle_pad=10, # distance between label and axes line ylims=None, # plotting range on y-axis yticks=None, # where to place y ticks yticklabels_hide=False, # hide labels but not actual ticks ylog=False, # logarithmic y scale # Shapes markers=None, # use markers for each plotted point line_styles=None, # iterable of line-styles, e.g. '--' line_widths=None, # iterable of line-widths zorders=None, # draw order # Misc options padding=None, # plot range padding (as fraction) vlines=None, # iterable of vertical lines to plot hlines=None, # iterable of horizontal lines to plot gridlines=True, font=('Source Sans Pro', 'PT Sans', 'Liberation Sans', 'Arial'), fontsize_title=20, fontsize_ticks=16, fontsize_xtitle=20, fontsize_ytitle=20, fontsize_ztitle=20, fontsize_zlabels=18, return_fig=False, **kwargs): """Interactively plot a dataset using bokeh. """ from bokeh.plotting import figure from bokeh.models import Span # Prepare data and labels etc ------------------------------------------- # xlims, ylims = _process_plot_range(xlims, ylims, ds, x_coo, y_coo, padding) z_vals, cols, zlabels, gen_xy = _prepare_data_and_styles( ds=ds, y_coo=y_coo, x_coo=x_coo, z_coo=z_coo, zlabels=zlabels, colors=colors, colormap=colormap, colormap_log=colormap_log, colormap_reverse=colormap_reverse, engine='BOKEH') # Make figure and custom lines etc -------------------------------------- # p = figure(width=int(figsize[0] * 100), height=int(figsize[1] * 100), x_axis_label=x_coo, y_axis_label=y_coo, x_axis_type=('log' if xlog else 'linear'), y_axis_type=('log' if ylog else 'linear'), toolbar_location="above", toolbar_sticky=False, active_scroll="wheel_zoom", x_range=xlims, y_range=ylims, webgl=False) if hlines: for hl in hlines: p.add_layout(Span(location=hl, dimension='width', level='glyph', line_color=(127, 127, 127), line_dash='dashed', line_width=1)) if vlines: for vl in vlines: p.add_layout(Span(location=vl, dimension='height', level='glyph', line_color=(127, 127, 127), line_dash='dashed', line_width=1)) # Plot lines and markers on figure -------------------------------------- # for x, y in gen_xy(): col = next(cols) zlabel = next(zlabels) p.line(x, y, legend=zlabel, color=col, line_width=1.3) if markers: p.circle(x, y, legend=zlabel, color=col) if return_fig: return p bshow(p, **kwargs) def xyz_blineplot(x, y_z, **blineplot_opts): """ Take some x-coordinates and an array, convert them to a Dataset treating as multiple lines, then send to ilineplot. """ ds = auto_xyz_ds(x, y_z) return blineplot(ds, 'y', 'x', 'z', **blineplot_opts)
Python
0
@@ -2919,32 +2919,134 @@ bokeh.%0A %22%22%22%0A + # TODO: toggle legend%0A # TODO: automatic plot range, and padding%0A # TODO: hover z_coo info%0A%0A from bokeh.p @@ -3100,16 +3100,27 @@ ort Span +, HoverTool %0A%0A # @@ -4882,16 +4882,182 @@ olor=col +, name=zlabel)%0A%0A p.add_tools(HoverTool(%0A tooltips=%5B%0A (%22(%22 + x_coo + %22, %22 + y_coo + %22)%22, %22($x, $y)%22),%0A (z_coo, %22@%22 + z_coo),%0A %5D) )%0A%0A i
93c489f9024386602c6674f4d31b1c7cdd1b3339
Fix runtime error.
src/cid/utils/fileUtils.py
src/cid/utils/fileUtils.py
# -*- encoding: utf-8 -*- """ @authors: Sebastiรกn Ortiz V. neoecos@gmail.com @license: GNU AFFERO GENERAL PUBLIC LICENSE Caliope Server is the web server of Caliope's Framework Copyright (C) 2013 Infometrika This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ #system, and standard library import os import json import re import mimetypes import gzip import StringIO #flask from flask import request, current_app #werkezug from werkzeug.datastructures import Headers from werkzeug.wsgi import wrap_file from werkzeug.exceptions import NotFound from cid.utils.jsOptimizer import jsOptimizer def loadJSONFromFile(filename, root_path=None): if filename is not None: if not os.path.isabs(filename): filename = os.path.join(root_path, filename) if not os.path.isfile(filename): print "Error : JSON file " + filename + " not found" raise NotFound("JSON file " + filename + " not found") try: json_data = re.sub("(?:/\\*(?:[^*]|(?:\\*+[^*/]))*\\*+/)", '', open(filename).read(), re.MULTILINE) json_data = json.loads(json_data) except IOError: json_data = {} print "Error: can\'t find file or read data" except ValueError: json_data = {} print "Error, is not a JSON" + filename else: return json_data def loadJSONFromFileNoPath(filename): if not os.path.isfile(filename): print "Error : JSON file " + filename + " not found" raise NotFound("JSON file " + filename + " not found") try: json_data = re.sub("(?:/\\*(?:[^*]|(?:\\*+[^*/]))*\\*+/)", '', open(filename).read(), re.MULTILINE) json_data = json.loads(json_data) except IOError: json_data = {} print "Error: can\'t find file or read data" except ValueError: json_data = {} print "Error, is not a JSON" + filename else: return json_data def send_from_memory(filename): """ :param filename: Name of the file to be loaded. """ if not os.path.isfile(filename): raise NotFound() #if filename is not None: #if not os.path.isabs(filename): #filename = os.path.join(current_app.root_path, filename) mimetype = mimetypes.guess_type(filename)[0] if mimetype is None: mimetype = 'application/octet-stream' if current_app.config['cache_enabled']: data = jsOptimizer().get_file(os.path.abspath(filename), current_app.storekv) else: data = None if data: headers = Headers() headers['Content-Encoding'] = 'gzip' headers['Content-Length'] = len(data) headers['Cache-Control'] = "max-age=172800, public, must-revalidate" rv = current_app.response_class(data, mimetype=mimetype, headers=headers, direct_passthrough=True) else: file = open(filename, 'rb') data = wrap_file(request.environ, file) headers = Headers() rv = current_app.response_class(data, mimetype=mimetype, headers=headers, direct_passthrough=False) return rv #From #https://github.com/elasticsales/Flask-gzip/blob/master/flask_gzip.py class Gzip(object): def __init__(self, compress_level=6, minimum_size=500): self.compress_level = compress_level self.minimum_size = minimum_size def after_request(self, response): accept_encoding = request.headers.get('Accept-Encoding', '') if 'gzip' not in accept_encoding.lower(): return response if response.direct_passthrough: return response if (200 > response.status_code >= 300) or len( response.data) < self.minimum_size or 'Content-Encoding' in response.headers: return response gzip_buffer = StringIO.StringIO() gzip_file = gzip.GzipFile(mode='wb', compresslevel=self.compress_level, fileobj=gzip_buffer) gzip_file.write(response.data) gzip_file.close() response.data = gzip_buffer.getvalue() response.headers['Content-Encoding'] = 'gzip' response.headers['Content-Length'] = len(response.data) response.headers['Cache-Control'] = "max-age=172800, public, must-revalidate" return response
Python
0.000011
@@ -1258,20 +1258,18 @@ ot_path= -None +'' ):%0A i @@ -1307,34 +1307,21 @@ if -not os.path.isabs(filename +len(root_path ):%0A
0bdc25cab0deeca81cedace37f135bef40f6eb09
use shutdownMicroprocess() instead; fix hilighted by tests.
Sketches/PT/dns.py
Sketches/PT/dns.py
#!/usr/bin/env python # (C) 2006 British Broadcasting Corporation and Kamaelia Contributors(1) # All Rights Reserved. # # You may only modify and redistribute this under the terms of any of the # following licenses(2): Mozilla Public License, V1.1, GNU General # Public License, V2.0, GNU Lesser General Public License, V2.1 # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://kamaelia.sourceforge.net/AUTHORS - please extend this file, # not this notice. # (2) Reproduced in the COPYING file, and at: # http://kamaelia.sourceforge.net/COPYING # Under section 3.5 of the MPL, we are using this text since we deem the MPL # notice inappropriate for this file. As per MPL/GPL/LGPL removal of this # notice is prohibited. # # Please contact us via: kamaelia-list-owner@lists.sourceforge.net # to discuss alternative licensing. # ------------------------------------------------------------------------- """\ ======================== Non-blocking DNS lookups ======================== This component will process DNS requests, using the blocking syscall gethostbyname(). It will take hostnames recieved on "inbox" and puts a tuple of (hostname, ip) in "outbox". In the event of a failure, the specific message will be placed on "signal" in the form (hostname, error code). Example Usage ------------- Type hostnames, and they will be resolved and printed out. pipeline( ConsoleReader(">>> ", ""), GetHostByName(), ConsoleEchoer(), ).run() How does it work? ----------------- The gethostbyname() syscall is a blocking one, and its use unmodified in a kamaelia system can be a problem. This threadedcomponent processes requests and can block without problems. Note that although all requests are processed sequentially, this may not always be the case, and should not be relied on, hence returning the hostname along with the IP address. """ from Axon.ThreadedComponent import threadedcomponent from Axon.Ipc import producerFinished, shutdown import socket class GetHostByName(threadedcomponent): def __init__(self, oneShot = False): self.oneShot = oneShot super(GetHostByName, self).__init__() def doLookup(self, data): try: hostname = socket.gethostbyname(data) except socket.gaierror, e: self.send((data, e[1]), "signal") else: self.send((data, hostname), "outbox") def main(self): if self.oneShot: self.doLookup(self, oneShot) self.send(producerFinished(self), "signal") return while True: while self.dataReady("inbox"): returnval = self.doLookup(self.recv("inbox")) if returnval != None: self.send(returnval, "outbox") while self.dataReady("control"): msg = self.recv("control") if isinstance(msg, producerFinished) or isinstance(msg, shutdown): self.send(producerFinished(self), "signal") return self.pause() __kamaelia_components__ = ( GetHostByName, ) if __name__ == "__main__": from Kamaelia.Chassis.Pipeline import Pipeline from Kamaelia.Util.Console import ConsoleReader, ConsoleEchoer Pipeline(ConsoleReader(">>> ", ""),GetHostByName(),ConsoleEchoer()).run()
Python
0
@@ -15,16 +15,17 @@ python%0A +# %0A# (C) 2 @@ -30,9 +30,9 @@ 200 -6 +7 Bri @@ -1396,16 +1396,17 @@ out -. +:: %0A%0A -p +P ipel @@ -1508,16 +1508,17 @@ .run()%0A%0A +%0A How does @@ -1912,12 +1912,165 @@ s.%0A%0A +If this component recieves producerFinished or shutdown on the %22signal%22 inbox, %0Ait will emit a producerFinished on the %22control%22 outbox, and shut down.%0A %22%22%22%0A +%0A from @@ -2165,16 +2165,28 @@ shutdown +Microprocess %0Aimport @@ -2639,26 +2639,25 @@ oLookup(self -, +. oneShot)%0A @@ -3110,16 +3110,28 @@ shutdown +Microprocess ):%0A
5f1f1145d4f01f4b30e8782d284feb44781c21ad
Use sorted on the set to parametrize tests so that pytest-xdist works
tests/cupyx_tests/scipy_tests/special_tests/test_ufunc_dispatch.py
tests/cupyx_tests/scipy_tests/special_tests/test_ufunc_dispatch.py
import numpy import cupy import scipy.special import cupyx.scipy.special from cupy import testing import pytest scipy_ufuncs = { f for f in scipy.special.__all__ if isinstance(getattr(scipy.special, f), numpy.ufunc) } cupyx_scipy_ufuncs = { f for f in dir(cupyx.scipy.special) if isinstance(getattr(cupyx.scipy.special, f), cupy.ufunc) } @testing.gpu @testing.with_requires("scipy") @pytest.mark.parametrize("ufunc", cupyx_scipy_ufuncs & scipy_ufuncs) class TestUfunc: @testing.numpy_cupy_allclose(atol=1e-5) def test_dispatch(self, xp, ufunc): ufunc = getattr(scipy.special, ufunc) # some ufunc (like sph_harm) do not work with float inputs # therefore we retrieve the types from the ufunc itself types = ufunc.types[0] args = [ cupy.testing.shaped_random((5,), xp, dtype=types[i]) for i in range(ufunc.nargs - 1) ] res = ufunc(*args) assert type(res) == xp.ndarray return res
Python
0
@@ -358,17 +358,16 @@ unc)%0A%7D%0A%0A -%0A @testing @@ -437,16 +437,23 @@ ufunc%22, +sorted( cupyx_sc @@ -478,16 +478,17 @@ _ufuncs) +) %0Aclass T
b8bc5256eb4deae2047f703849d5ad04d24f3b3a
Add another logging change
spacelaunchnow/settings.py
spacelaunchnow/settings.py
""" Django settings for spacelaunchnow project. Generated by 'django-admin startproject' using Django 1.11.1. For more information on this file, see https://docs.djangoproject.com/en/1.11/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) from spacelaunchnow import config BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) STATIC_ROOT = os.path.normpath(os.path.join(BASE_DIR, 'staticfiles')) SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = config.keys['DJANGO_SECRET_KEY'] # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False ALLOWED_HOSTS = ['localhost', '.calebjones.me', '159.203.85.8', '.spacelaunchnow.me', '127.0.0.1'] REST_FRAMEWORK = { 'PAGE_SIZE': 5, 'DEFAULT_MODEL_SERIALIZER_CLASS': 'drf_toolbox.serializers.ModelSerializer', } LOGIN_REDIRECT_URL = '/' LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'standard': { }, }, 'handlers': { 'django_default': { 'class': 'logging.FileHandler', 'filename': 'log/django.log', 'formatter': 'standard' }, 'console': { 'class': 'logging.StreamHandler', 'formatter': 'standard' }, 'digest': { 'class': 'logging.FileHandler', 'filename': 'log/bot/daily_digest.log', 'formatter': 'standard' }, 'notifications': { 'class': 'logging.FileHandler', 'filename': 'log/bot/notification.log', 'formatter': 'standard' } }, 'loggers': { 'django': { 'handlers': ['django_default', 'console'], 'propagate': True, }, 'bot.digest': { 'handlers': ['django_default', 'digest'], 'level': 'DEBUG', 'propagate': True, }, 'bot.notifications': { 'handlers': ['django_default', 'notifications'], 'level': 'DEBUG', 'propagate': True, } }, } # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework', 'api.apps.ApiConfig', 'rest_framework_docs', 'bot', 'djcelery', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'spacelaunchnow.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [BASE_DIR + '/templates/'], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'spacelaunchnow.wsgi.application' # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = False # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.11/howto/static-files/ STATIC_URL = '/staticfiles/' # CELERY STUFF BROKER_URL = "amqp://spacelaunchnow:spacelaunchnow@localhost:5672/vhost_spacelaunchnow" CELERY_ACCEPT_CONTENT = ['json'] CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend' CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler' CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' CELERY_TIMEZONE = 'UTC'
Python
0
@@ -1320,24 +1320,170 @@ tandard': %7B%0A + 'format': '%25(asctime)s %5B%25(levelname)s%5D - %5B%25(name)s: %25(module)s %25(lineno)s%5D - %25(message)s',%0A 'datefmt': '%25m-%25d-%25Y %25H:%25M:%25S'%0A %7D,%0A
72c0c74936b7a7c1c9df572adb41f33283c74d57
don't need to autoescape the output of urlize
localtv/templatetags/filters.py
localtv/templatetags/filters.py
# Copyright 2009 - Participatory Culture Foundation # # This file is part of Miro Community. # # Miro Community is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Miro Community is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Miro Community. If not, see <http://www.gnu.org/licenses/>. import datetime import re from BeautifulSoup import BeautifulSoup, Comment, Tag from django.template import Library from django.utils.html import urlize from django.utils.safestring import mark_safe register = Library() def simpletimesince(value, arg=None): """Formats a date as the time since that date (i.e. "4 days, 6 hours").""" from django.utils.timesince import timesince if not value: return u'' try: if arg: return timesince(value, arg) return timesince(value, datetime.datetime.utcnow()).split(', ')[0] except (ValueError, TypeError): return u'' def sanitize(value, extra_filters=None): """ Sanitize the given HTML. Based on code from: * http://www.djangosnippets.org/snippets/1655/ * http://www.djangosnippets.org/snippets/205/ """ if value is None: return u'' if '<' not in value: # no HTML return urlize(mark_safe(value), nofollow=True, autoescape=True) # convert plain-text links into HTML js_regex = re.compile(r'[\s]*(&#x.{1,7})?'.join(list('javascript')), re.IGNORECASE) allowed_tags = ('p i strong em b u a h1 h2 h3 h4 h5 h6 pre br img ul ' 'ol li span').split() allowed_attributes = 'href src style'.split() whitelist = False extra_tags = () extra_attributes = () if isinstance(extra_filters, basestring): if '|' in extra_filters: parts = extra_filters.split('|') else: parts = [extra_filters.split()] if parts[0] == 'whitelist': whitelist = True parts = parts[1:] extra_tags = parts[0].split() if len(parts) > 1: extra_attributes = parts[1].split() elif extra_filters: extra_tags = extra_filters if whitelist: allowed_tags, allowed_attributes = extra_tags, extra_attributes else: allowed_tags = set(allowed_tags) | set(extra_tags) allowed_attributes = set(allowed_attributes) | set(extra_attributes) soup = BeautifulSoup(value) for comment in soup.findAll(text=lambda text: isinstance(text, Comment)): # remove comments comment.extract() for tag in soup.findAll(True): if tag.name not in allowed_tags: tag.hidden = True else: tag.attrs = [(attr, js_regex.sub('', val)) for attr, val in tag.attrs if attr in allowed_attributes] return mark_safe(soup.renderContents().decode('utf8')) def wmode_transparent(value): soup = BeautifulSoup(value) param_tag = Tag(soup, 'param', [ ('name', 'wmode'), ('value', 'transparent')]) for html_object in soup.findAll('object'): html_object.insert(0, param_tag) for flash_embed in soup.findAll('embed', type="application/x-shockwave-flash"): flash_embed['wmode'] = 'transparent' return mark_safe(soup.prettify()) register.filter(simpletimesince) register.filter(sanitize) register.filter(wmode_transparent)
Python
0.99954
@@ -1745,48 +1745,8 @@ True -,%0A autoescape=True ) #
146e794ca65311ade1a9d81045b5709c8634fd62
add input visibility processstore
magpie/adapter/magpieprocess.py
magpie/adapter/magpieprocess.py
""" Store adapters to read data from magpie. """ from six.moves.urllib.parse import urlparse import logging import requests import json LOGGER = logging.getLogger("TWITCHER") from magpie.definitions.twitcher_definitions import * from magpie.definitions.pyramid_definitions import ConfigurationError, HTTPOk, HTTPCreated, HTTPNotFound # import 'process' elements separately than 'twitcher_definitions' because not defined in master from twitcher.config import get_twitcher_configuration, TWITCHER_CONFIGURATION_EMS from twitcher.exceptions import ProcessNotFound from twitcher.store import processstore_defaultfactory from twitcher.store.base import ProcessStore from twitcher.visibility import VISIBILITY_PUBLIC, VISIBILITY_PRIVATE class MagpieProcessStore(ProcessStore): """ Registry for OWS processes. Uses default process store for most operations. Uses magpie to update process access and visibility. """ def __init__(self, registry): try: # add 'http' scheme to url if omitted from config since further 'requests' calls fail without it # mostly for testing when only 'localhost' is specified # otherwise twitcher config should explicitly define it in MAGPIE_URL url_parsed = urlparse(registry.settings.get('magpie.url').strip('/')) if url_parsed.scheme in ['http', 'https']: self.magpie_url = url_parsed.geturl() else: self.magpie_url = 'http://{}'.format(url_parsed.geturl()) LOGGER.warn("Missing scheme from MagpieServiceStore url, new value: '{}'".format(self.magpie_url)) self.twitcher_config = get_twitcher_configuration(registry.settings) except AttributeError: #If magpie.url does not exist, calling strip fct over None will raise this issue raise ConfigurationError('magpie.url config cannot be found') def save_process(self, process, overwrite=True, request=None): """Delegate execution to default twitcher process store.""" return processstore_defaultfactory(request.registry).save_process(process, overwrite, request) def delete_process(self, process_id, request=None): """Delegate execution to default twitcher process store.""" return processstore_defaultfactory(request.registry).delete_process(process_id, request) def list_processes(self, request=None): """Delegate execution to default twitcher process store.""" return processstore_defaultfactory(request.registry).list_processes(request) def fetch_by_id(self, process_id, request=None): """Delegate execution to default twitcher process store.""" return processstore_defaultfactory(request.registry).fetch_by_id(process_id, request) def _get_process_resource_id(self, process_id, request): resp = requests.get('{host}/groups/users/resources'.format(host=self.magpie_url), cookies=request.cookies) if resp.status_code != HTTPOk.code: raise resp.raise_for_status() try: ems_resources = resp.json()['resources']['api']['ems']['resources'] ems_processes = None for res_id in ems_resources: if ems_resources[res_id]['resource_name'] == 'processes': ems_processes = ems_resources[res_id]['children'] break if not ems_processes: raise ProcessNotFound("Could not find processes resource endpoint for visibility retrieval.") for process_res_id in ems_processes: if ems_processes[process_res_id]['resource_name'] == process_id: return ems_processes[process_res_id]['resource_id'] except KeyError: raise ProcessNotFound('Could not find process `{}` resource for visibility retrieval.'.format(process_id)) return None def get_visibility(self, process_id, request=None): """ Get visibility of a process. If twitcher is not in EMS mode, simply delegate execution to default twitcher process store. If twitcher is in EMS mode, return the magpie visibility status according to user permissions. """ if self.twitcher_config != TWITCHER_CONFIGURATION_EMS: return processstore_defaultfactory(request.registry).get_visibility(process_id, request) process_res_id = self._get_process_resource_id(process_id, request) return VISIBILITY_PUBLIC if process_res_id is not None else VISIBILITY_PRIVATE def set_visibility(self, process_id, visibility, request=None): """ Set visibility of a process. Delegate change of process visibility to default twitcher process store. If twitcher is in EMS mode, also modify magpie permissions of corresponding process access point. """ # write visibility to store to remain consistent in processes structures even if using magpie permissions processstore_defaultfactory(request.registry).set_visibility(process_id, request) if self.twitcher_config == TWITCHER_CONFIGURATION_EMS: process_res_id = self._get_process_resource_id(process_id, request) if not process_res_id: raise ProcessNotFound('Could not find process `{}` resource to change visibility.'.format(process_id)) if visibility == VISIBILITY_PRIVATE: path = '{host}/groups/users/resources/{id}/permissions/{perm}' \ .format(host=self.magpie_url, id=process_res_id, perm='read') reps = requests.delete(path, cookies=request.cookies) if reps.status_code not in (HTTPOk.code, HTTPNotFound.code): raise reps.raise_for_status() elif visibility == VISIBILITY_PUBLIC: path = '{host}/groups/users/resources/{id}/permissions'.format(host=self.magpie_url, id=process_res_id) reps = requests.post(path, cookies=request.cookies, data={u'permission_name': u'read'}) if reps.status_code not in (HTTPOk.code, HTTPCreated.code): raise reps.raise_for_status()
Python
0.000001
@@ -5052,32 +5052,44 @@ lity(process_id, + visibility, request)%0A%0A
fabd8e5a1fbb8dd083b05b053320b090fedad119
Fix cryptostate to no longer assign multiple states at once (issue #620)
mailpile/plugins/cryptostate.py
mailpile/plugins/cryptostate.py
from gettext import gettext as _ from mailpile.plugins import PluginManager _plugins = PluginManager(builtin=__file__) ##[ Keywords ]################################################################ def text_kw_extractor(index, msg, ctype, text): kw = set() if ('-----BEGIN PGP' in text and '\n-----END PGP' in text): kw.add('pgp:has') kw.add('crypto:has') return kw def meta_kw_extractor(index, msg_mid, msg, msg_size, msg_ts): kw, enc, sig = set(), set(), set() for part in msg.walk(): enc.add('mp_%s-%s' % ('enc', part.encryption_info['status'])) sig.add('mp_%s-%s' % ('sig', part.signature_info['status'])) # This is generic if (part.encryption_info.get('status') != 'none' or part.signature_info.get('status') != 'none'): kw.add('crypto:has') # This is OpenPGP-specific if (part.encryption_info.get('protocol') == 'openpgp' or part.signature_info.get('protocol') == 'openpgp'): kw.add('pgp:has') # FIXME: Other encryption protocols? for tname in (enc | sig): tag = index.config.get_tags(slug=tname) if tag: kw.add('%s:in' % tag[0]._key) return list(kw) _plugins.register_text_kw_extractor('crypto_tkwe', text_kw_extractor) _plugins.register_meta_kw_extractor('crypto_mkwe', meta_kw_extractor) ##[ Search helpers ]########################################################## def search(config, idx, term, hits): # # FIXME: Translate things like pgp:signed into a search for all the # tags that have signatures (good or bad). # return [] _plugins.register_search_term('crypto', search) _plugins.register_search_term('pgp', search)
Python
0
@@ -70,16 +70,80 @@ Manager%0A +from mailpile.crypto.state import EncryptionInfo, SignatureInfo%0A %0A%0A_plugi @@ -568,68 +568,67 @@ -for part in msg.walk():%0A enc.add('mp_%25s-%25s' %25 ('enc', +def crypto_eval(part):%0A # This is generic%0A if par @@ -640,33 +640,37 @@ ryption_info -%5B +.get( 'status' %5D))%0A @@ -657,19 +657,28 @@ 'status' -%5D)) +) != 'none': %0A @@ -678,19 +678,23 @@ -sig + enc .add('mp @@ -709,19 +709,19 @@ %25 (' -sig +enc ', part. sign @@ -712,33 +712,34 @@ 'enc', part. -signature +encryption _info%5B'statu @@ -744,25 +744,24 @@ tus'%5D))%0A -%0A # This i @@ -752,33 +752,40 @@ -# This is generic + kw.add('crypto:has') %0A @@ -788,32 +788,30 @@ if -( part. -encryption +signature _info.ge @@ -831,16 +831,17 @@ = 'none' +: %0A @@ -837,38 +837,60 @@ e':%0A - or +sig.add('mp_%25s-%25s' %25 ('sig', part.signature_ @@ -889,37 +889,33 @@ gnature_info -.get( +%5B 'status' ) != 'none') @@ -906,21 +906,11 @@ tus' -) != 'none'): +%5D)) %0A @@ -1184,16 +1184,758 @@ ocols?%0A%0A + def choose_one(fmt, statuses, ordering):%0A for o in ordering:%0A status = (fmt %25 o)%0A if status in statuses:%0A return set(%5Bstatus%5D)%0A return set(list(statuses)%5B:1%5D)%0A%0A # Evaluate all the message parts%0A crypto_eval(msg)%0A for part in msg.walk():%0A crypto_eval(part)%0A%0A # OK, we should have exactly encryption state...%0A if len(enc) %3C 1:%0A enc.add('mp_enc-none')%0A elif len(enc) %3E 1:%0A enc = choose_one('mp_enc-%25s', enc, EncryptionInfo.STATUSES)%0A%0A # ... and exactly one signature state.%0A if len(sig) %3C 1:%0A sig.add('mp_sig-none')%0A elif len(sig) %3E 1:%0A sig = choose_one('mp_sig-%25s', sig, SignatureInfo.STATUSES)%0A%0A # Emit tags for our states%0A for @@ -2062,17 +2062,16 @@ %5D._key)%0A -%0A retu
dfda6dad01050d1198779d1a33838f79adfc2198
Fix KeyError for FilePath
bro-otx.py
bro-otx.py
#!/usr/bin/env python import requests import sys from ConfigParser import ConfigParser from datetime import datetime, timedelta # The URL is hard coded. I'm comfortable doing this since it's unlikely that # the URL will change without resulting in an API change that will require # changes to this script. _URL = 'http://otx.alienvault.com/api/v1/pulses/subscribed' # Bro Intel file header format _HEADER = "#fields\tindicator\tindicator_type\tmeta.source\tmeta.url\tmeta.do_notice\n" # Mapping of OTXv2 Indicator types to Bro Intel types, additionally, # identifies unsupported intel types to prevent errors in Bro. _MAP = {"IPv4":"Intel::ADDR", "IPv6":"Intel::ADDR", "domain":"Intel::DOMAIN", "hostname":"Intel::DOMAIN", "email":"Intel::EMAIL", "URL":"Intel::URL", "URI":"Intel::URL", "FileHash-MD5":"Intel::FILE_HASH", "FileHash-SHA1":"Intel::FILE_HASH", "FileHash-SHA256":"Intel::FILE_HASH", "CVE":"Unsupported", "Mutex":"Unsupported", "CIDR":"Unsupported"} def _get(key, mtime, limit=20, next_request=''): ''' Retrieves a result set from the OTXv2 API using the restrictions of mtime as a date restriction. ''' headers = {'X-OTX-API-KEY': key} params = {'limit': limit, 'modified_since': mtime} if next_request == '': r = requests.get(_URL, headers=headers, params=params) else: r = requests.get(next_request, headers=headers) # Depending on the response code, return the valid response. if r.status_code == 200: return r.json() if r.status_code == 403: print("An invalid API key was specified.") sys.exit(1) if r.status_code == 400: print("An invalid request was made.") sys.exit(1) def iter_pulses(key, mtime, limit=20): ''' Creates an iterator that steps through Pulses since mtime using key. ''' # Populate an initial result set, after this the API will generate the next # request in the loop for every iteration. initial_results = _get(key, mtime, limit) for result in initial_results['results']: yield result next_request = initial_results['next'] while next_request: json_data = _get(key, mtime, next_request=next_request) for result in json_data['results']: yield result next_request = json_data['next'] def map_indicator_type(indicator_type): ''' Maps an OTXv2 indicator type to a Bro Intel Framework type. ''' return _MAP[indicator_type] def main(): '''Retrieve intel from OTXv2 API.''' config = ConfigParser() config.read('bro-otx.conf') key = config.get('otx', 'api_key') days = int(config.get('otx', 'days_of_history')) outfile = config.get('otx', 'outfile') do_notice = config.get('otx', 'do_notice') mtime = (datetime.now() - timedelta(days=days)).isoformat() with open(outfile, 'wb') as f: f.write(_HEADER) for pulse in iter_pulses(key, mtime): for indicator in pulse[u'indicators']: bro_type = map_indicator_type(indicator[u'type']) if bro_type == 'Unsupported': continue try: url = pulse[u'references'][0] except IndexError: url = 'https://otx.alienvault.com' fields = [indicator[u'indicator'], bro_type, pulse[u'author_name'], url, do_notice + '\n'] f.write('\t'.join(fields)) if __name__ == '__main__': main()
Python
0.000001
@@ -627,15 +627,21 @@ = %7B +%0A %22IPv4%22: + %22Int @@ -647,31 +647,24 @@ tel::ADDR%22,%0A - %22IPv6%22:%22 @@ -662,16 +662,17 @@ %22IPv6%22: + %22Intel:: @@ -678,23 +678,16 @@ :ADDR%22,%0A - %22dom @@ -691,16 +691,17 @@ domain%22: + %22Intel:: @@ -705,31 +705,24 @@ l::DOMAIN%22,%0A - %22hostnam @@ -724,16 +724,17 @@ stname%22: + %22Intel:: @@ -742,23 +742,16 @@ OMAIN%22,%0A - %22ema @@ -754,16 +754,17 @@ %22email%22: + %22Intel:: @@ -767,31 +767,24 @@ el::EMAIL%22,%0A - %22URL%22:%22I @@ -781,16 +781,17 @@ %22URL%22: + %22Intel:: @@ -792,31 +792,24 @@ ntel::URL%22,%0A - %22URI%22:%22I @@ -806,16 +806,17 @@ %22URI%22: + %22Intel:: @@ -821,23 +821,16 @@ ::URL%22,%0A - %22Fil @@ -840,16 +840,17 @@ sh-MD5%22: + %22Intel:: @@ -853,39 +853,32 @@ el::FILE_HASH%22,%0A - %22FileHash-SH @@ -881,16 +881,17 @@ h-SHA1%22: + %22Intel:: @@ -898,31 +898,24 @@ FILE_HASH%22,%0A - %22FileHas @@ -924,16 +924,17 @@ SHA256%22: + %22Intel:: @@ -949,105 +949,8 @@ H%22,%0A - %22CVE%22:%22Unsupported%22,%0A %22Mutex%22:%22Unsupported%22,%0A %22CIDR%22:%22Unsupported%22 %7D%0A%0Ad @@ -2430,17 +2430,21 @@ urn _MAP -%5B +.get( indicato @@ -2449,17 +2449,17 @@ tor_type -%5D +) %0A%0Adef ma @@ -3069,24 +3069,15 @@ ype -== 'Unsupported' +is None :%0A
518a0c3f5b4c3162f42691c395aa03616b5d8925
Add longer timeouts
browser.py
browser.py
# Copyright (C) Adam Piper, 2012 # See COPYING for licence details (GNU AGPLv3) import pycurl import StringIO from lxml.html import fromstring from urllib import urlencode from datetime import datetime class Browser(object): @classmethod def check_curl(cls, item): return item in pycurl.version_info()[8] def __init__(self): self._curl = pycurl.Curl() # note: this is an "easy" connection self._curl.setopt(pycurl.FOLLOWLOCATION, 1) # follow location headers self._curl.setopt(pycurl.AUTOREFERER, 1) self._curl.setopt(pycurl.MAXREDIRS, 20) self._curl.setopt(pycurl.ENCODING, "gzip") self._buf = StringIO.StringIO() self._curl.setopt(pycurl.WRITEFUNCTION, self._buf.write) # callback for content buffer self._curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/11.0.696.65 Chrome/11.0.696.65 Safari/534.24") self._curl.setopt(pycurl.COOKIEFILE, "") # use cookies self.reset() def reset(self): self._tree = None self._form = None self._curl.setopt(pycurl.HTTPGET, 1) self._form_data = {} self._roundtrip = None roundtrip = property(lambda self: self._roundtrip) def go(self, url): self._buf.truncate(0) self._curl.setopt(pycurl.URL, url) # execute try: before = datetime.now() self._curl.perform() except pycurl.error, e: code, message = e if code == 60: # SSL cert error; retry before = datetime.now() self._curl.perform() else: raise e self.reset() self._roundtrip = datetime.now() - before return self._curl.getinfo(pycurl.RESPONSE_CODE) def save(self, filename): with open(filename, 'w') as fp: fp.write(self.src) def parse(self): if self._tree is not None: return self._tree = fromstring(self.src) self._tree.make_links_absolute(self._curl.getinfo(pycurl.EFFECTIVE_URL)) # form selection/submission def select_form(self, idx): self.parse() try: self._form = self._tree.forms[idx] except TypeError: # perhaps we've been given a name/id self._form = self._tree.forms[filter(lambda f: idx in (f['name'], f['id']), self.forms)[0]['__number']] def __setitem__(self, *args, **kwargs): self._form_data.__setitem__(*args, **kwargs) def set_data(self, **kwargs): self._form_data.update(kwargs) def get_form_fields(self): return dict(self._form.form_values()) def submit(self, submit_button = None): data = self.get_form_fields() submits = self.submits assert len(submits) <= 1 or submit_button is not None, "Implicit submit is not possible; an explicit choice must be passed: %s" % submits if len(submits) > 0: try: submit = submits[0 if submit_button is None else submit_button] except TypeError: # perhaps we've been given a name/id submit = submits[filter(lambda b: submit_button in b.values(), submits)[0]['__number']] data[submit['name']] = submit['value'] if 'value' in submit else '' if self._form_data: data.update(self._form_data) data = urlencode(data) if self._form.method.upper() == 'POST': self._curl.setopt(pycurl.POST, 1) self._curl.setopt(pycurl.POSTFIELDS, data) return self.go(self._form.action) sep = '?' if self._form.action.find('?') == -1 else '&' return self.go("%(current)s%(sep)s%(data)s" % {'current': self._form.action, 'sep' : sep, 'data' : data}) def post(self, url, data): data = urlencode(data) self._curl.setopt(pycurl.POST, 1) self._curl.setopt(pycurl.POSTFIELDS, data) return self.go(url) # helpers @property def src(self): return self._buf.getvalue() @property def url(self): return self._curl.getinfo(pycurl.EFFECTIVE_URL) @property def title(self): self.parse() try: return self._tree.xpath("/html/head/title/text()")[0].strip() except IndexError: return None @property def forms(self): self.parse() forms = [] for i, form in enumerate(self._tree.forms): items = {'__number': i} for name, value in form.items(): if name in ('name', 'id', 'class'): items[name] = value forms.append(items) return forms @property def submits(self): assert self._form is not None, "A form must be selected: %s" % self.forms submit_lst = self._form.xpath("//input[@type='submit']") assert len(submit_lst) > 0, "The selected form must contain a submit button" submits = [] for i, submit in enumerate(submit_lst): items = {'__number': i} for name, value in submit.items(): if name in ('name', 'value'): items[name] = value submits.append(items) return submits def xpath(self, *argv, **kwargs): self.parse() return self._tree.xpath(*argv, **kwargs) def set_follow(self, switch): self._curl.setopt(pycurl.FOLLOWLOCATION, 1 if switch else 0) def set_debug(self, switch): def debug(typ, msg): indicators = {pycurl.INFOTYPE_TEXT: '%', pycurl.INFOTYPE_HEADER_IN: '<', pycurl.INFOTYPE_HEADER_OUT: '>', pycurl.INFOTYPE_DATA_OUT: '>>'} if typ in indicators.keys(): print "%(ind)s %(msg)s" % {'ind': indicators[typ], 'msg': msg.strip()} self._curl.setopt(pycurl.VERBOSE, 1 if switch else 0) self._curl.setopt(pycurl.DEBUGFUNCTION, debug)
Python
0.000074
@@ -1020,16 +1020,114 @@ cookies%0A + self._curl.setopt(pycurl.CONNECTTIMEOUT, 2)%0A self._curl.setopt(pycurl.TIMEOUT, 4);%0A
080bd6bc18baff7fa89307e486c72cfe906a9f25
save analysis output to `analysis_$METHOD_$N.xls` instead of `analysis.xls`
cea/analysis/sensitivity/sensitivity_demand_analyze.py
cea/analysis/sensitivity/sensitivity_demand_analyze.py
""" Analyze the results in the samples folder and write them out to an Excel file. This script assumes: - all the results have been added to `--samples-folder` in the format `result.%i.csv`, with `%i` replaced by the index into the samples array. - each result file has the same list of columns (the `--output-parameters` for the simulations were the same) - the `analyze_sensitivity` function is called with the same method and arguments as the sampling routine. """ import os import numpy as np import pickle import pandas as pd from SALib.analyze import sobol from SALib.analyze import morris def analyze_sensitivity(samples_path, method, parameters): """Run the analysis for each output parameter""" with open(os.path.join(args.samples_folder, 'problem.pickle'), 'r') as f: problem = pickle.load(f) samples = np.load(os.path.join(samples_path, 'samples.npy')) simulations = read_results(samples_path, len(samples)) buildings_num = simulations[0].shape[0] writer = pd.ExcelWriter(os.path.join(samples_path, 'analysis.xls')) output_parameters = list(simulations[0].columns[1:]) for parameter in output_parameters: results_1 = [] results_2 = [] results_3 = [] for building in range(buildings_num): simulations_parameter = np.array([x.loc[building, parameter] for x in simulations]) if method == 'sobol': VAR1, VAR2, VAR3 = 'S1', 'ST', 'ST_conf' sobol_result = sobol.analyze(problem, simulations_parameter, calc_second_order=parameters['calc_second_order']) results_1.append(sobol_result['S1']) results_2.append(sobol_result['ST']) results_3.append(sobol_result['ST_conf']) elif method == 'morris': VAR1, VAR2, VAR3 = 'mu_star', 'sigma', 'mu_star_conf' morris_result = morris.analyze(problem, samples, simulations_parameter, grid_jump=parameters['grid_jump'], num_levels=parameters['num_levels']) results_1.append(morris_result['mu_star']) results_2.append(morris_result['sigma']) results_3.append(morris_result['mu_star_conf']) else: raise ValueError('Invalid sampler method: %s' %s) pd.DataFrame(results_1, columns=problem['names']).to_excel(writer, parameter + VAR1) pd.DataFrame(results_2, columns=problem['names']).to_excel(writer, parameter + VAR2) pd.DataFrame(results_3, columns=problem['names']).to_excel(writer, parameter + VAR3) writer.save() def read_results(samples_folder, samples_count): """Read each `results.%i.csv` file into a DataFrame and return them as a list.""" results = [] for i in range(samples_count): result_file = os.path.join(samples_folder, 'result.%i.csv' % i) df = pd.read_csv(result_file) results.append(df) return results if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('-S', '--samples-folder', default='.', help='folder to place the output files (samples.npy, problem.pickle) in') parser.add_argument('-m', '--method', help='Method to use valid values: "morris" (default), "sobol"', default='morris') parser.add_argument('--calc-second-order', help='(sobol) calc_second_order parameter', type=bool, default=False) parser.add_argument('--grid-jump', help='(morris) grid_jump parameter', type=int, default=2) parser.add_argument('--num-levels', help='(morris) num_levels parameter', type=int, default=4) args = parser.parse_args() sampler_params = {} if args.method == 'morris': sampler_params['grid_jump'] = args.grid_jump sampler_params['num_levels'] = args.num_levels elif args.method == 'sobol': sampler_params['calc_second_order'] = args.calc_second_order analyze_sensitivity(samples_path=args.samples_folder, method=args.method, parameters=sampler_params)
Python
0.000004
@@ -881,24 +881,57 @@ les.npy'))%0A%0A + samples_count = len(samples)%0A simulati @@ -963,28 +963,29 @@ s_path, -len( samples -) +_count )%0A bu @@ -1090,13 +1090,45 @@ ysis -.xls' +_%25s_%25i.xls' %25 (method, samples_count) ))%0A%0A
a80336a2d305d4c7bfafb1448a9a6d090cc1751b
add Cell.get_volume()
c4/cell.py
c4/cell.py
from math import radians, sin, cos, acos, sqrt, pi class Cell(object): def __init__(self, parameters): if parameters is None: self.cell = None return assert len(parameters) == 6 self.a, self.b, self.c = parameters[:3] self.alpha, self.beta, self.gamma = parameters[3:] self.cell = parameters def __str__(self): return str(self.cell) # The orthogonalization matrix we use is described in ITfC B p.262: # "An alternative mode of orthogonalization, used by the Protein # Data Bank and most programs, is to align the a1 axis of the unit # cell with the Cartesian X1 axis, and to align the a*3 aixs with the # Cartesian X3 axis." def get_orth_matrix(self): a, b, c = self.a, self.b, self.c alpha = radians(self.alpha) beta = radians(self.beta) gamma = radians(self.gamma) alpha_star = acos((cos(gamma)*cos(beta) - cos(alpha)) / (sin(beta)*sin(gamma))) return Mat3( a, b*cos(gamma), c*cos(beta), 0, b*sin(gamma), -c*sin(beta)*cos(alpha_star), 0, 0, c*sin(beta)*sin(alpha_star)) def get_frac_matrix(self): return self.get_orth_matrix().inverse() def max_shift_in_mapping(self, other): trans = self.get_frac_matrix().dot(other.get_orth_matrix()) return (trans - Mat3.identity()).euclidean_norm() # This affects only primitive orthorhombic (P 2x 2x 2x). # Convert the "reference" (symmetry-based) settings to the "standard" # (cell-based) settings. See the SETTING keyword in POINTLESS. def to_standard(self): if self.alpha == self.beta == self.gamma == 90 and ( self.a > self.b or self.b > self.c): return Cell(sorted(self.cell[:3]) + self.cell[3:]) return self class Mat3(object): "Matrix 3x3" def __init__(self, *args): if len(args) == 1: self.m = tuple(args[0]) else: self.m = args assert len(self.m) == 9 def __getitem__(self, index): return self.m[index] def __str__(self): return "[%g %g %g; %g %g %g; %g %g %g]" % self.m def __repr__(self): return "Mat3" + str(self.m) def __add__(self, other): assert isinstance(other, Mat3) return Mat3(a+b for a,b in zip(self.m, other.m)) def __sub__(self, other): assert isinstance(other, Mat3) return Mat3(a-b for a,b in zip(self.m, other.m)) # scalar must be float def __mul__(self, scalar): assert isinstance(scalar, float) return Mat3(a*scalar for a in self.m) @staticmethod def identity(): return Mat3(1, 0, 0, 0, 1, 0, 0, 0, 1) def transpose(self): m = self.m return Mat3(m[0], m[3], m[6], m[1], m[4], m[7], m[2], m[5], m[8]) def dot(self, other): a = self.m b = other.m return Mat3(sum(a[3*row+i] * b[3*i+col] for i in range(3)) for row in range(3) for col in range(3)) def det(self): m = self.m return (m[0] * (m[4] * m[8] - m[5] * m[7]) - m[1] * (m[3] * m[8] - m[5] * m[6]) + m[2] * (m[3] * m[7] - m[4] * m[6])) def trace(self): m = self.m return m[0] + m[4] + m[8] def inverse(self): d = self.det() if d == 0: raise ValueError("Matrix is not invertible") m = self.m return Mat3(( m[4] * m[8] - m[5] * m[7]) / d, (-m[1] * m[8] + m[2] * m[7]) / d, ( m[1] * m[5] - m[2] * m[4]) / d, (-m[3] * m[8] + m[5] * m[6]) / d, ( m[0] * m[8] - m[2] * m[6]) / d, (-m[0] * m[5] + m[2] * m[3]) / d, ( m[3] * m[7] - m[4] * m[6]) / d, (-m[0] * m[7] + m[1] * m[6]) / d, ( m[0] * m[4] - m[1] * m[3]) / d) def induced_1norm(self): # aka 1-norm m = self.m return max(abs(m[0]) + abs(m[3]) + abs(m[6]), abs(m[1]) + abs(m[4]) + abs(m[7]), abs(m[2]) + abs(m[5]) + abs(m[8])) def euclidean_norm(self): # aka induced 2-norm A = self.dot(self.transpose()) # now get the largest eigenvalue of A (A is symmetric) # http://en.wikipedia.org/wiki/Eigenvalue_algorithm#3.C3.973_matrices p1 = A[1]**2 + A[2]**2 + A[5]**2 if p1 == 0: return max(A[0], A[4], A[8]) q = A.trace() / 3. p2 = (A[0] - q)**2 + (A[4] - q)**2 + (A[8] - q)**2 + 2 * p1 p = sqrt(p2 / 6.) B = (A - Mat3.identity() * q) * (1. / p) r = B.det() / 2. if r <= -1: phi = pi / 3. elif r >= 1: phi = 0 else: phi = acos(r) / 3. eig1 = q + 2 * p * cos(phi) return sqrt(eig1)
Python
0.000001
@@ -356,16 +356,295 @@ meters%0A%0A + def get_volume(self):%0A ca = cos(radians(self.alpha))%0A cb = cos(radians(self.beta))%0A cg = cos(radians(self.gamma))%0A return self.a * self.b * self.c * sqrt((1 - ca*ca - cb*cb - cg*cg) +%0A 2 * ca*cb*cg)%0A%0A def
d5c30bdae34450b4052f83f773ef993e89fc8bef
Prepare 1.1 release
c4ddev.pyp
c4ddev.pyp
# Copyright (C) 2014-2016 Niklas Rosenstein # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. __author__ = 'Niklas Rosenstein <rosensteinniklas@gmail.com>' __version__ = '1.0' import os import sys import c4d _added_paths = [] def add_path(path, module=sys): if not os.path.isabs(path): path = os.path.join(os.path.dirname(__file__), path) if path not in module.path: module.path.append(path) _added_paths.append((module, path)) # The third party modules in this plugin should be available globally. add_path('lib/py-shroud') add_path('lib/requests') import shroud add_path('lib', module=shroud) add_path('lib/py-localimport', module=shroud) def load_extensions(): extensions = [] ext_dir = os.path.join(os.path.dirname(__file__), 'ext') for file in os.listdir(ext_dir): if file.endswith('.py'): extensions.append(shroud.require(os.path.join(ext_dir, file))) return extensions extensions = load_extensions() def PluginMessage(msg_type, data): if msg_type == c4d.C4DPL_RELOADPYTHONPLUGINS: for mod, path in _added_paths: try: mod.path.remove(path) except ValueError: pass for extension in extensions: if hasattr(extension, 'PluginMessage'): extension.PluginMessage(msg_type, data) return True
Python
0
@@ -1175,17 +1175,17 @@ __ = '1. -0 +1 '%0A%0Aimpor
8c0e1a976e6341d565140725d51562cc9021f90e
add hostname to all messages
cc/reqs.py
cc/reqs.py
import time from cc.json import Struct, Field from cc.message import CCMessage __all__ = ['LogMessage', 'InfofileMessage', 'JobRequestMessage', 'JobConfigReplyMessage', 'TaskRegisterMessage', 'TaskSendMessage'] class BaseMessage(Struct): req = Field(str) def send_to(self, sock): cmsg = CCMessage(jmsg = self) sock.send_multipart(cmsg.zmsg) class LogMessage(BaseMessage): "log.*" level = Field(str) service_type = Field(str) job_name = Field(str) msg = Field(str) time = Field(float) pid = Field(int) line = Field(int) function = Field(str) class InfofileMessage(BaseMessage): "pub.infofile" mtime = Field(float) filename = Field(str) body = Field(str) class JobConfigRequestMessage(BaseMessage): "job.config" job_name = Field(str) class JobConfigReplyMessage(BaseMessage): "job.config" job_name = Field(str) config = Field(dict) class TaskRegisterMessage(BaseMessage): "req.task.register" host = Field(str) class TaskSendMessage(BaseMessage): "req.task.send" host = Field(str) def parse_json(js): return Struct.from_json(js)
Python
0.000001
@@ -71,16 +71,47 @@ CMessage +%0Afrom socket import gethostname %0A%0A__all_ @@ -284,16 +284,67 @@ eld(str) +%0A hostname = Field(str, default = gethostname()) %0A%0A de
89d9328696a01e70428fccfa890d847e91f5f5c4
Fix copy-paste bug
certifi.py
certifi.py
import platform if platform.system() == "Windows": import wincertstore import atexit import ssl certfile = wincertstore.CertFile() certfile.addstore("CA") certfile.addstore("ROOT") atexit.register(certfile.close) # cleanup and remove files on shutdown def where(): return certfile else: import ssl def where(): return ssl.ssl.get_default_verify_paths().openssl_cafile
Python
0.000002
@@ -368,20 +368,16 @@ return -ssl. ssl.get_ @@ -409,12 +409,13 @@ enssl_cafile +%0A
07b6f19e48ad09b99368a5ddd968615f9111c797
Update Condition to str type
src/pipelines/alphafold_inference_pipeline.py
src/pipelines/alphafold_inference_pipeline.py
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Universal Alphafold Inference Pipeline.""" from google_cloud_pipeline_components.v1.custom_job import create_custom_training_job_from_component from kfp.v2 import dsl from src import config from src.components import configure_run as ConfigureRunOp from src.components import data_pipeline from src.components import predict as PredictOp from src.components import relax as RelaxOp DataPipelineOp = create_custom_training_job_from_component( data_pipeline, display_name='Data Pipeline', machine_type=config.DATA_PIPELINE_MACHINE_TYPE, nfs_mounts=[dict( server=config.NFS_SERVER, path=config.NFS_PATH, mountPoint=config.NFS_MOUNT_POINT)], network=config.NETWORK ) @dsl.pipeline( name='alphafold-inference-pipeline', description='AlphaFold inference using original data pipeline.' ) def alphafold_inference_pipeline( sequence_path: str, project: str, region: str, max_template_date: str, model_preset: str = 'monomer', use_small_bfd: bool = True, num_multimer_predictions_per_model: int = 5, is_run_relax: bool = True ): """Universal Alphafold Inference Pipeline.""" run_config = ConfigureRunOp( sequence_path=sequence_path, model_preset=model_preset, num_multimer_predictions_per_model=num_multimer_predictions_per_model, ).set_display_name('Configure Pipeline Run') model_parameters = dsl.importer( artifact_uri=config.MODEL_PARAMS_GCS_LOCATION, artifact_class=dsl.Artifact, reimport=True ).set_display_name('Model parameters') reference_databases = dsl.importer( artifact_uri=config.NFS_MOUNT_POINT, artifact_class=dsl.Dataset, reimport=False, metadata={ 'uniref90': config.UNIREF90_PATH, 'mgnify': config.MGNIFY_PATH, 'bfd': config.BFD_PATH, 'small_bfd': config.SMALL_BFD_PATH, 'uniclust30': config.UNICLUST30_PATH, 'pdb70': config.PDB70_PATH, 'pdb_mmcif': config.PDB_MMCIF_PATH, 'pdb_obsolete': config.PDB_OBSOLETE_PATH, 'pdb_seqres': config.PDB_SEQRES_PATH, 'uniprot': config.UNIPROT_PATH, } ).set_display_name('Reference databases') data_pipeline = DataPipelineOp( project=project, location=region, ref_databases=reference_databases.output, sequence=run_config.outputs['sequence'], max_template_date=max_template_date, run_multimer_system=run_config.outputs['run_multimer_system'], use_small_bfd=use_small_bfd, ).set_display_name('Prepare Features') with dsl.ParallelFor(run_config.outputs['model_runners']) as model_runner: model_predict = PredictOp( model_features=data_pipeline.outputs['features'], model_params=model_parameters.output, model_name=model_runner.model_name, prediction_index=model_runner.prediction_index, run_multimer_system=run_config.outputs['run_multimer_system'], num_ensemble=run_config.outputs['num_ensemble'], random_seed=model_runner.random_seed ).set_display_name('Predict') model_predict.set_cpu_limit(config.CPU_LIMIT) model_predict.set_memory_limit(config.MEMORY_LIMIT) model_predict.set_gpu_limit(config.GPU_LIMIT) model_predict.add_node_selector_constraint( config.GKE_ACCELERATOR_KEY, config.GPU_TYPE) model_predict.set_env_variable( 'TF_FORCE_UNIFIED_MEMORY', config.TF_FORCE_UNIFIED_MEMORY) model_predict.set_env_variable( 'XLA_PYTHON_CLIENT_MEM_FRACTION', config.XLA_PYTHON_CLIENT_MEM_FRACTION) with dsl.Condition(is_run_relax == True): relax_protein = RelaxOp( unrelaxed_protein=model_predict.outputs['unrelaxed_protein'], use_gpu=True, ).set_display_name('Relax protein') relax_protein.set_cpu_limit(config.RELAX_CPU_LIMIT) relax_protein.set_memory_limit(config.RELAX_MEMORY_LIMIT) relax_protein.set_gpu_limit(config.RELAX_GPU_LIMIT) relax_protein.add_node_selector_constraint( config.GKE_ACCELERATOR_KEY, config.RELAX_GPU_TYPE) relax_protein.set_env_variable( 'TF_FORCE_UNIFIED_MEMORY', config.TF_FORCE_UNIFIED_MEMORY) relax_protein.set_env_variable( 'XLA_PYTHON_CLIENT_MEM_FRACTION', config.XLA_PYTHON_CLIENT_MEM_FRACTION)
Python
0
@@ -1668,27 +1668,29 @@ _relax: -bool = True +str = 'relax' %0A):%0A %22%22 @@ -4200,20 +4200,23 @@ elax == -True +'relax' ):%0A
dffaf115f569624b5a484e391cbb8e205dbb1a93
Make date conversion more resilient (#245)
src/pipelines/epidemiology/us_dc_authority.py
src/pipelines/epidemiology/us_dc_authority.py
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from io import BytesIO from pathlib import Path from typing import Any, Dict, List from bs4 import BeautifulSoup from pandas import DataFrame from lib.cast import safe_float_cast from lib.io import read_file from lib.net import download_snapshot, download from lib.pipeline import DataSource from lib.utils import pivot_table_date_columns, table_rename def _preprocess_age_sex(data: DataFrame) -> DataFrame: data = data.iloc[1:].set_index(data.columns[0]) return pivot_table_date_columns(data, value_name="total_deceased").reset_index() def _parse_sex(data: DataFrame) -> DataFrame: data = _preprocess_age_sex(data) data = data.rename(columns={"index": "sex"}) data = data[data.sex != "All"] data.sex = data.sex.apply({"Male": "male", "Female": "female"}.get) return data def _parse_age(data: DataFrame) -> DataFrame: data = _preprocess_age_sex(data) data = data.rename(columns={"index": "age"}) data = data[data.age != "All"] data.age = data.age.str.replace("<", "0-") data.age = data.age.str.replace("+", "-") return data def _parse_summary(data: DataFrame) -> DataFrame: data = data[data.columns[1:]] data.columns = ["statistic"] + list(data.columns[1:]) data = data.dropna(subset=data.columns[1:], how="all") data = pivot_table_date_columns(data.set_index("statistic"), value_name="statistic") data = data.reset_index().dropna(subset=["date"]) data.statistic = data.statistic.apply(safe_float_cast).astype(float) data = data.pivot_table(index="date", columns=["index"], values="statistic") data = data.reset_index() data = table_rename( data, { "date": "date", "Total Positives": "total_confirmed", "Number of Deaths": "total_deceased", "Total Overall Tested": "total_tested", "Cleared From Isolation": "total_recovered", "Total COVID-19 Patients in DC Hospitals": "total_hospitalized", "Total COVID-19 Patients in ICU": "total_intensive_care", }, drop=True, ) return data _sheet_processors = { "Overal Stats": _parse_summary, "Lives Lost by Age": _parse_age, "Lives Lost by Sex": _parse_sex, } class DistrictColumbiaDataSource(DataSource): def fetch( self, output_folder: Path, cache: Dict[str, str], fetch_opts: List[Dict[str, Any]] ) -> List[str]: # The link to the spreadsheet changes daily, so we parse the HTML to find the link every # time and download the latest version buffer = BytesIO() src_opts = fetch_opts[0] download(src_opts["url"], buffer) page = BeautifulSoup(buffer.getvalue().decode("utf8"), "lxml") for link in page.findAll("a"): if "href" in link.attrs and link.attrs.get("href").endswith("xlsx"): href = link.attrs.get("href") if href.startswith("/"): href = "https://" + src_opts["url"].split("//")[1].split("/")[0] + href return [download_snapshot(href, output_folder, **src_opts.get("opts"))] raise RuntimeError("No link to XLSX file found in page") def parse(self, sources: Dict[str, str], aux: Dict[str, DataFrame], **parse_opts) -> DataFrame: data = read_file(sources[0], sheet_name=parse_opts.get("sheet_name")) # Process the individual sheet data = _sheet_processors[parse_opts.get("sheet_name")](data) # Fix up the date format data.date = data.date.apply(lambda x: x.date().isoformat()) # Add a key to all the records (state-level only) data["key"] = "US_DC" return data
Python
0.00013
@@ -861,16 +861,56 @@ aSource%0A +from lib.time import datetime_isoformat%0A from lib @@ -4126,25 +4126,31 @@ data -. +%5B%22 date +%22%5D = data -. +%5B%22 date +%22%5D .app @@ -4166,17 +4166,17 @@ x: -x. date -(). +time_ isof @@ -4181,16 +4181,38 @@ oformat( +x, %22%25Y-%25m-%25d %25H:%25M:%25S%22 ))%0A%0A
97e6153551708ae888b3bb2999e33a62df92061d
Set LogFile.parsed to False at end of LogFile.clean().
src/cclib/parser/logfileparser.py
src/cclib/parser/logfileparser.py
""" cclib (http://cclib.sf.net) is (c) 2006, the cclib development team and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html). """ __revision__ = "$Revision$" import logging, sys import Numeric import utils class Logfile(object): """Abstract class for logfile objects. Subclasses: ADF, GAMESS, GAMESSUK, Gaussian, Jaguar Attributes: aonames -- atomic orbital names (list) aooverlaps -- atomic orbital overlap matrix (array[2]) atomcoords -- atom coordinates (array[3], angstroms) atomnos -- atomic numbers (array[1]) coreelectrons -- number of core electrons in an atom's pseudopotential (array[1]) etenergies -- energy of electronic transitions (array[1], 1/cm) etoscs -- oscillator strength of electronic transition (array[1]) etrotats -- rotatory strength of electronic transitions (array[1], ??) etsecs -- singly-excited configurations comprising each electronic transition (??) etsyms -- symmetry of electronic transition (list) geotargets -- targets for convergence of the geometry (array[1]) geovalues -- current values for convergence of the geometry (array[1], same units as geotargets) homos -- molecular orbital index of HOMO(s) (array[1]) mocoeffs -- molecular orbital coefficients (array[3]) moenergies -- orbital energies (array[2], eV) mosyms -- orbital symmetries (list[2]) mpenergies -- molecule electronic energy after Moller-Plesset correcetion (array[1-4], eV) natom -- number of atoms (integer) nbasis -- number of basis functions (integer) nmo -- number of linearly-independent basis functions (integer) scfenergies -- the electronic energy of the molecule (array[1], eV) scftargets -- targets for convergence of the SCF (array[1]) scfvalues -- current values for convergence of the SCF (array[2], same units as scftargets) vibfreqs -- vibrational frequencies (array, 1/cm) vibirs -- IR intensity (array, km/m) vibramans -- Raman intensity (array, A^4/Da) vibsyms -- symmetry of vibrations (list) (1) The term 'array' currently refers to a Numeric array (2) The number of dimensions of an array is given in square brackets (3) Python indexes arrays/lists starting at zero. So if homos==[10], then the 11th molecular orbital is the HOMO """ def __init__(self,filename,progress=None, loglevel=logging.INFO,logname="Log"): """Initialise the Logfile object. Typically called by subclasses in their own __init__ methods. """ self.filename = filename self.progress = progress self.parsed = False self.loglevel = loglevel self.logname = logname self.table = utils.PeriodicTable() self.attrlist = ['aonames', 'aooverlaps', 'atomcoords', 'atomnos', 'coreelectrons', 'etenergies', 'etoscs', 'etrotats', 'etsecs', 'etsyms', 'fonames', 'fooverlaps', 'geotargets', 'geovalues', 'homos', 'mocoeffs', 'moenergies', 'mosyms', 'mpenergies', 'natom', 'nbasis', 'nmo', 'scfenergies', 'scftargets', 'scfvalues', 'vibfreqs', 'vibirs', 'vibramans', 'vibsyms'] self._toarray = ['atomcoords', 'etenergies', 'etoscs', 'geotargets', 'geovalues', 'mpenergies', 'scfenergies', 'scftargets', 'vibdisps', 'vibfreqs', 'vibirs', 'vibramans'] self._tolistofarrays = ['moenergies', 'scfvalues'] # Set up the logger self.logger = logging.getLogger('%s %s' % (self.logname,self.filename)) self.logger.setLevel(self.loglevel) handler = logging.StreamHandler(sys.stdout) handler.setFormatter(logging.Formatter("[%(name)s %(levelname)s] %(message)s")) self.logger.addHandler(handler) def parse(self, fupdate=0.05, cupdate=0.02): """Parse the logfile, using the assumed extract method of the child.""" if hasattr(self, "extract"): # This method does the actual parsing of text self.extract(fupdate=fupdate, cupdate=cupdate) # Make sure selected attributes are arrays for attr in self._toarray: if hasattr(self, attr): if type(getattr(self, attr)) is not Numeric.arraytype: setattr(self, attr, Numeric.array(getattr(self, attr), 'f')) # Make sure selected attrbutes are lsits of arrays for attr in self._tolistofarrays: if hasattr(self, attr): if not Numeric.alltrue([type(x) is Numeric.arraytype for x in getattr(self, attr)]): setattr(self, attr, [Numeric.array(x, 'f') for x in getattr(self, attr)]) else: self.logger.info("Method parse() was called from generaic LogFile class.") def clean(self): """Delete all of the parsed attributes.""" for attr in self.attrlist: if hasattr(self, attr): delattr(self, attr) def normalisesym(self,symlabel): """Standardise the symmetry labels between parsers. This method should be overwritten by individual parsers, and should contain appropriate doctests. If is not overwritten, this is detected as an error by unit tests. """ return "ERROR: This should be overwritten by this subclass" def float(self,number): """Convert a string to a float avoiding the problem with Ds. >>> t = Logfile("dummyfile") >>> t.float("123.2323E+02") 12323.23 >>> t.float("123.2323D+02") 12323.23 """ number = number.replace("D","E") return float(number) if __name__=="__main__": import doctest,logfileparser doctest.testmod(logfileparser,verbose=False)
Python
0.999772
@@ -5327,16 +5327,45 @@ , attr)%0D +%0A self.parsed = False%0D %0A%0D%0A d
57d9d4fe1b46d9dd45629dc5fc461c0b8c51c5ec
Fix music helper
src/helpers/musicHelper.py
src/helpers/musicHelper.py
#import pygame import os import sys import pyglet sys.path.append(os.path.dirname(__file__) + "/../audios/letters") pyglet.options['audio'] = ('openal', 'pulse', 'silent') player = pyglet.media.Player() #pygame.mixer.init() def play_file(file_path): pass #pygame.mixer.music.load(file_path) # playAudioLoaded() def play_word(word): #CHANNEL.stop() # pygame.mixer.music.load(os.path.dirname(__file__) + "/../audios/letters/a.mp3") # pygame.mixer.music.play() # pygame.mixer.music.queue(os.path.dirname(__file__) + "/../audios/letters/e.mp3") # pygame.mixer.music.stop() first = True for letter in word: path = str(os.path.dirname(__file__) + "/../audios/letters/" + letter.lower() + ".mp3") src = pyglet.media.load(path, streaming=False) player.queue(src) # if first: # first = False # pygame.mixer.music.load(os.path.dirname(__file__) + "/../audios/letters/" + letter.lower() + ".mp3") #pygame.mixer.music.play() # else: # pygame.mixer.music.queue(os.path.dirname(__file__) + "/../audios/letters/" + letter.lower() + ".mp3") #_play_letter(letter) # pygame.mixer.music.play() player.play() def _play_letter(letter): pass #pygame.mixer.music.load("audios/letters/" + letter.lower() + ".mp3") #pygame.mixer.music.play() #while pygame.mixer.music.get_busy() == True: # continue #def playAudioLoaded():
Python
0.000003
@@ -1,9 +1,8 @@ -# import p @@ -31,22 +31,8 @@ sys -%0Aimport pyglet %0A%0Asy @@ -103,1133 +103,98 @@ %0Apyg -let.options%5B'audio'%5D = ('openal', 'pulse', 'silent')%0Aplayer = pyglet.media.Player()%0A%0A#pygame.mixer.init()%0A%0Adef play_file(file_path):%0A pass%0A #pygame.mixer.music.load(file_path)%0A # playAudioLoaded()%0A%0Adef play_word(word):%0A #CHANNEL.stop()%0A%0A # pygame.mixer.music.load(os.path.dirname(__file__) + %22/../audios/letters/a.mp3%22)%0A # pygame.mixer.music.play()%0A # pygame.mixer.music.queue(os.path.dirname(__file__) + %22/../audios/letters/e.mp3%22)%0A%0A # pygame.mixer.music.stop()%0A first = True%0A for letter in word:%0A path = str(os.path.dirname(__file__) + %22/../audios/letters/%22 + letter.lower() + %22.mp3%22)%0A src = pyglet.media.load(path, streaming=False)%0A player.queue(src)%0A # if first:%0A # first = False%0A # pygame.mixer.music.load(os.path.dirname(__file__) + %22/../audios/letters/%22 + letter.lower() + %22.mp3%22)%0A #pygame.mixer.music.play()%0A # else:%0A # pygame.mixer.music.queue(os.path.dirname(__file__) + %22/../audios/letters/%22 + letter.lower() + %22.mp3%22)%0A #_play_letter(letter)%0A # pygame.mixer.music.play()%0A %0A player.play( +ame.mixer.init()%0A%0Adef play_word(word):%0A for letter in word:%0A _play_letter(letter )%0A%0Ad @@ -226,18 +226,8 @@ -pass%0A # pyga @@ -295,17 +295,16 @@ 3%22)%0A -# pygame.m @@ -325,17 +325,16 @@ y()%0A -# while py @@ -374,17 +374,16 @@ ue:%0A -# %09continu @@ -389,29 +389,5 @@ ue%0A%0A -#def playAudioLoaded():%0A %09
38d96e4ddbe44af8f028dfb29eca17dc8ecd478d
test case for clean module
src/html2latex/__init__.py
src/html2latex/__init__.py
from .html2latex import html2latex html2latex try: import pkg_resources pkg_resources.declare_namespace(__name__) except ImportError: import pkgutil __path__ = pkgutil.extend_path(__path__, __name__)
Python
0
@@ -40,17 +40,16 @@ l2latex%0A -%0A try:%0A
46f15a00d2324da4b9f12c9168ddda8dddb1b607
use notebook-style for plot_logistic_path.py (#22536)
examples/linear_model/plot_logistic_path.py
examples/linear_model/plot_logistic_path.py
""" ============================================== Regularization path of L1- Logistic Regression ============================================== Train l1-penalized logistic regression models on a binary classification problem derived from the Iris dataset. The models are ordered from strongest regularized to least regularized. The 4 coefficients of the models are collected and plotted as a "regularization path": on the left-hand side of the figure (strong regularizers), all the coefficients are exactly 0. When regularization gets progressively looser, coefficients can get non-zero values one after the other. Here we choose the liblinear solver because it can efficiently optimize for the Logistic Regression loss with a non-smooth, sparsity inducing l1 penalty. Also note that we set a low value for the tolerance to make sure that the model has converged before collecting the coefficients. We also use warm_start=True which means that the coefficients of the models are reused to initialize the next model fit to speed-up the computation of the full-path. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # License: BSD 3 clause from time import time import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model from sklearn import datasets from sklearn.svm import l1_min_c iris = datasets.load_iris() X = iris.data y = iris.target X = X[y != 2] y = y[y != 2] X /= X.max() # Normalize X to speed-up convergence # ############################################################################# # Demo path functions cs = l1_min_c(X, y, loss="log") * np.logspace(0, 7, 16) print("Computing regularization path ...") start = time() clf = linear_model.LogisticRegression( penalty="l1", solver="liblinear", tol=1e-6, max_iter=int(1e6), warm_start=True, intercept_scaling=10000.0, ) coefs_ = [] for c in cs: clf.set_params(C=c) clf.fit(X, y) coefs_.append(clf.coef_.ravel().copy()) print("This took %0.3fs" % (time() - start)) coefs_ = np.array(coefs_) plt.plot(np.log10(cs), coefs_, marker="o") ymin, ymax = plt.ylim() plt.xlabel("log(C)") plt.ylabel("Coefficients") plt.title("Logistic Regression Path") plt.axis("tight") plt.show()
Python
0
@@ -1159,537 +1159,418 @@ se%0A%0A -from time import time%0Aimport numpy as np%0Aimport matplotlib.pyplot as plt%0A%0Afrom sklearn import linear_model%0Afrom sklearn import datasets%0Afrom sklearn.svm import l1_min_c%0A%0Airis = datasets.load_iris()%0AX = iris.data%0Ay = iris.target%0A%0AX = X%5By != 2%5D%0Ay = y%5By != 2%5D%0A%0AX /= X.max() # Normalize X to speed-up convergence%0A%0A# #############################################################################%0A# Demo path functions%0A%0Acs = l1_min_c(X, y, loss=%22log%22) * np.logspace(0, 7, 16)%0A%0A%0Aprint(%22Computing regularization path ...%22)%0Astart = time() +# %25%25%0A# Load data%0A# ---------%0A%0Afrom sklearn import datasets%0A%0Airis = datasets.load_iris()%0AX = iris.data%0Ay = iris.target%0A%0AX = X%5By != 2%5D%0Ay = y%5By != 2%5D%0A%0AX /= X.max() # Normalize X to speed-up convergence%0A%0A# %25%25%0A# Compute regularization path%0A# ---------------------------%0A%0Aimport numpy as np%0A%0Afrom sklearn import linear_model%0Afrom sklearn.svm import l1_min_c%0A%0Acs = l1_min_c(X, y, loss=%22log%22) * np.logspace(0, 7, 16)%0A %0Aclf @@ -1853,79 +1853,128 @@ ())%0A -print(%22This took %250.3fs%22 %25 (time() - start))%0A%0Acoefs_ = np.array(coefs_) +%0Acoefs_ = np.array(coefs_)%0A%0A# %25%25%0A# Plot regularization path%0A# ------------------------%0A%0Aimport matplotlib.pyplot as plt%0A %0Aplt
d1a69904ba1d8072988aeb330157dbff20d0c5de
Remove unneeded self.client.
cred/test/util.py
cred/test/util.py
import os import tempfile import json from functools import wraps import flask.ext.testing import flask.ext.sqlalchemy import cred.database from cred.app import app, api from cred.routes import create_api_resources # Constants used throughout the test suites DEVICE = 'Thermostat' LOCATION = 'Living Room' EVENTS = ['Temperature'] SUBSCRIBE = { 'Light': {'location': 'Living Room'}, 'Alarm': {} } app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://' cred.database.db = flask.ext.sqlalchemy.SQLAlchemy(app) create_api_resources(api) def assertEqual(test_object, assertables): """Convenience method for asserting multiple items.""" for value, expected_value in assertables.items(): test_object.assertEqual(value, expected_value) def authenticate(permission, alt_dev=None): """Decorator for authenticating a client with permissions.""" def authenticate_decorator(fun): @wraps(fun) def wrapped(self, *args, **kwargs): self.authenticate_with_server(permission, alternate_device=alt_dev) fun(self, *args, **kwargs) return wrapped return authenticate_decorator class BaseTestCase(flask.ext.testing.TestCase): SQLALCHEMY_DATABASE_URI = "sqlite://" TESTING = True def create_app(self): self.client = app.test_client() return app def setUp(self): """Create a SQLite database for quick testing.""" cred.database.init_db(cred.database.db) self.session_key = None def tearDown(self): """Close the database file and unlink it.""" cred.database.db.session.remove() cred.database.db.drop_all() def authenticate_with_server(self, permission, alternate_device=None): """Authenticate with the server.""" from cred.models.apikey import APIKey as APIKeyModel from cred.resources.apikeys import generate_apikey device = DEVICE if alternate_device is not None: device = alternate_device apikey = APIKeyModel(generate_apikey(), permission) cred.database.db.session.add(apikey) cred.database.db.session.commit() req = json.dumps({ 'apikey': apikey.apikey, 'device': device, 'location': LOCATION, 'subscribe': SUBSCRIBE }) response = self.client.post( '/auth', data=req, content_type='application/json' ) resp = json.loads(response.data.decode('utf-8')) self.session_key = resp['sessionKey'] self.client_id = resp['id'] return response
Python
0
@@ -1279,48 +1279,8 @@ f):%0A - self.client = app.test_client()%0A
587628f506ae58f1434d84a83fce51dc6f4d795e
Add functionality for fetching all Poloniex tickers
cryex/poloniex.py
cryex/poloniex.py
import time import hmac import hashlib from decimal import Decimal from datetime import datetime try: from urllib.parse import urlencode except ImportError: from urllib import urlencode import requests from cryex import core from cryex.model import Ask, Bid, Trade from cryex.coins.poloniex import POLONIEX_REPAIRS class Poloniex(core.Client): """ Implementation for exchange: Poloniex """ PUBLIC = 'https://poloniex.com/public' PRIVATE = 'https://poloniex.com/tradingApi' REPAIRS = POLONIEX_REPAIRS def __init__(self, key=None, secret=None): super(Poloniex, self).__init__() self.public = self.Public() self.private = self.Private(key, secret) ################################################## class Public(core.Public): """ Public API """ def ticker(self, pair): """ Return ticker """ new_pair = Poloniex.repair(pair) args = urlencode({'command': 'returnTicker'}) url = Poloniex.PUBLIC + '?' + args data = requests.get(url).json()[new_pair] return { 'exchange': 'poloniex', 'last': Decimal(data['last']), 'pair': pair, 'volume24h': Decimal(data['quoteVolume']), 'high24h': Decimal(data['high24hr']), 'low24h': Decimal(data['low24hr']), } def depth(self, pair, depth=1000): """ Return order book """ new_pair = Poloniex.repair(pair) args = urlencode({ 'command': 'returnOrderBook', 'currencyPair': new_pair, 'depth': depth }) url = Poloniex.PUBLIC + '?' + args data = requests.get(url).json() asks = [Ask(i[0], i[1]) for i in data['asks']] bids = [Bid(i[0], i[1]) for i in data['bids']] return (asks, bids) def trades(self, pair, start=None, end=None): """ Return public trade history Arguments: pair - cryptocurrency pair (optional) start - start timestamp end - end timestamp """ new_pair = Poloniex.repair(pair) args = urlencode({ 'command': 'returnTradeHistory', 'currencyPair': new_pair, }) url = Poloniex.PUBLIC + '?' + args data = requests.get(url).json() def new_trade(i): """ Parse UTC Date and create a new Trade object """ dateformat = '%Y-%m-%d %H:%M:%S' rate = Decimal(i['rate']) amount = Decimal(i['amount']) date = datetime.strptime(i['date'], dateformat) return Trade(i['globalTradeID'], rate, amount, i['type'], date) # --- return [new_trade(i) for i in data] def currencies(self): return [c for c in Poloniex.REPAIRS.keys() if '_' not in c] ################################################## class Private(core.Private): """ Private API """ def __init__(self, key, secret): self.key = key self.secret = secret # ############## HELPERS ################################### def sign(self, data): encoded = urlencode(data).encode('utf-8') return hmac.new( self.secret.encode('utf-8'), encoded, hashlib.sha512).hexdigest() def post(self, params): """ Make a post request to Poloniex """ # Add nonce params['nonce'] = int(time.time() * 1000) # Sign params and create headers sig = self.sign(params) headers = {'Key': self.key, 'Sign': sig} # Fetch and return json res = requests.post(Poloniex.PRIVATE, params, headers=headers) return res.json() # ############## COMMANDS ################################### def balances(self, symbol=None): """ Return account balances. Arguments: (optional) symbol - return balance for that symbol (currency) """ response = self.post({'command': 'returnBalances'}) return response[Poloniex.REPAIRS[symbol]] if symbol else response def buy(self, pair, price, amount): """ Launch a new buy order Arguments: pair - currency pair, e.g. eth_btc price - the price to pay amount - the amount to trade """ new_pair = Poloniex.REPAIRS[pair] return self.post({ 'command': 'buy', 'currencyPair': new_pair, 'rate': price, 'amount': amount }) def sell(self, pair, price, amount): """ Launch a new sell order Arguments: pair - currency pair, e.g. eth_btc price - the price to pay amount - the amount to trade """ new_pair = Poloniex.REPAIRS[pair] return self.post({ 'command': 'sell', 'currencyPair': new_pair, 'rate': price, 'amount': amount }) def stop(self, order_id): """ Cancel order """ return self.post({ 'command': 'cancelOrder', 'orderNumber': order_id, })
Python
0
@@ -826,16 +826,17 @@ def +_ ticker(s @@ -848,250 +848,16 @@ pair -):%0A %22%22%22 Return ticker %22%22%22%0A%0A new_pair = Poloniex.repair(pair)%0A args = urlencode(%7B'command': 'returnTicker'%7D)%0A url = Poloniex.PUBLIC + '?' + args%0A data = requests.get(url).json()%5Bnew_pair%5D%0A +, data): %0A @@ -1148,32 +1148,32 @@ ta%5B'low24hr'%5D),%0A - %7D%0A%0A @@ -1171,16 +1171,718 @@ %7D%0A%0A + def tickers(self):%0A %22%22%22 Return all Poloniex's live tickers with non-normalized%0A pair names %22%22%22%0A args = urlencode(%7B'command': 'returnTicker'%7D)%0A url = Poloniex.PUBLIC + '?' + args%0A return %5Bself._ticker(pair.lower(), data)%0A for pair, data in requests.get(url).json().items()%5D%0A%0A def ticker(self, pair):%0A %22%22%22 Return ticker with normalized pair name %22%22%22%0A%0A new_pair = Poloniex.repair(pair)%0A args = urlencode(%7B'command': 'returnTicker'%7D)%0A url = Poloniex.PUBLIC + '?' + args%0A data = requests.get(url).json()%5Bnew_pair%5D%0A return self._ticker(new_pair, data)%0A%0A
97ef6b35aad6f6d459673aff0a760f8159e39a70
Remove 'setcachsize' from zoneinfo __all__ list
dateutil/zoneinfo/__init__.py
dateutil/zoneinfo/__init__.py
# -*- coding: utf-8 -*- import logging import os import warnings import tempfile import shutil from subprocess import check_call from tarfile import TarFile from pkgutil import get_data from io import BytesIO from contextlib import closing from dateutil.tz import tzfile __all__ = ["setcachesize", "gettz", "rebuild"] _ZONEFILENAME = "dateutil-zoneinfo.tar.gz" # python2.6 compatability. Note that TarFile.__exit__ != TarFile.close, but # it's close enough for python2.6 _tar_open = TarFile.open if not hasattr(TarFile, '__exit__'): def _tar_open(*args, **kwargs): return closing(TarFile.open(*args, **kwargs)) class tzfile(tzfile): def __reduce__(self): return (gettz, (self._filename,)) def getzoneinfofile_stream(): try: return BytesIO(get_data(__name__, _ZONEFILENAME)) except IOError as e: # TODO switch to FileNotFoundError? warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror)) return None class ZoneInfoFile(object): def __init__(self, zonefile_stream=None): if zonefile_stream is not None: with _tar_open(fileobj=zonefile_stream, mode='r') as tf: # dict comprehension does not work on python2.6 # TODO: get back to the nicer syntax when we ditch python2.6 # self.zones = {zf.name: tzfile(tf.extractfile(zf), # filename = zf.name) # for zf in tf.getmembers() if zf.isfile()} self.zones = dict((zf.name, tzfile(tf.extractfile(zf), filename=zf.name)) for zf in tf.getmembers() if zf.isfile()) # deal with links: They'll point to their parent object. Less # waste of memory # links = {zl.name: self.zones[zl.linkname] # for zl in tf.getmembers() if zl.islnk() or zl.issym()} links = dict((zl.name, self.zones[zl.linkname]) for zl in tf.getmembers() if zl.islnk() or zl.issym()) self.zones.update(links) else: self.zones = dict() # The current API has gettz as a module function, although in fact it taps into # a stateful class. So as a workaround for now, without changing the API, we # will create a new "global" class instance the first time a user requests a # timezone. Ugly, but adheres to the api. # # TODO: deprecate this. _CLASS_ZONE_INSTANCE = list() def gettz(name): if len(_CLASS_ZONE_INSTANCE) == 0: _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) return _CLASS_ZONE_INSTANCE[0].zones.get(name) def rebuild(filename, tag=None, format="gz", zonegroups=[]): """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar* filename is the timezone tarball from ftp.iana.org/tz. """ tmpdir = tempfile.mkdtemp() zonedir = os.path.join(tmpdir, "zoneinfo") moduledir = os.path.dirname(__file__) try: with _tar_open(filename) as tf: for name in zonegroups: tf.extract(name, tmpdir) filepaths = [os.path.join(tmpdir, n) for n in zonegroups] try: check_call(["zic", "-d", zonedir] + filepaths) except OSError as e: if e.errno == 2: logging.error( "Could not find zic. Perhaps you need to install " "libc-bin or some other package that provides it, " "or it's not in your PATH?") raise target = os.path.join(moduledir, _ZONEFILENAME) with _tar_open(target, "w:%s" % format) as tf: for entry in os.listdir(zonedir): entrypath = os.path.join(zonedir, entry) tf.add(entrypath, entry) finally: shutil.rmtree(tmpdir)
Python
0
@@ -281,24 +281,8 @@ = %5B -%22setcachesize%22, %22get
633ec6de1e7eb665e99decacb57e4c6056e25b41
add the company_name in defualt address format
openerp/addons/base/res/res_country.py
openerp/addons/base/res/res_country.py
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from osv import fields, osv class Country(osv.osv): _name = 'res.country' _description = 'Country' _columns = { 'name': fields.char('Country Name', size=64, help='The full name of the country.', required=True, translate=True), 'code': fields.char('Country Code', size=2, help='The ISO country code in two chars.\n' 'You can use this field for quick search.', required=True), 'address_format': fields.text('Address Format', help="""You can state here the usual format to use for the \ addresses belonging to this country.\n\nYou can use the python-style string patern with all the field of the address \ (for example, use '%(street)s' to display the field 'street') plus \n%(state_name)s: the name of the state \n%(state_code)s: the code of the state \n%(country_name)s: the name of the country \n%(country_code)s: the code of the country"""), } _sql_constraints = [ ('name_uniq', 'unique (name)', 'The name of the country must be unique !'), ('code_uniq', 'unique (code)', 'The code of the country must be unique !') ] _defaults = { 'address_format': "%(street)s\n%(street2)s\n%(city)s,%(state_code)s %(zip)s\n%(country_name)s", } def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100): if not args: args=[] if not context: context={} ids = False if len(name) == 2: ids = self.search(cr, user, [('code', 'ilike', name)] + args, limit=limit, context=context) if not ids: ids = self.search(cr, user, [('name', operator, name)] + args, limit=limit, context=context) return self.name_get(cr, user, ids, context) _order='name' def create(self, cursor, user, vals, context=None): if 'code' in vals: vals['code'] = vals['code'].upper() return super(Country, self).create(cursor, user, vals, context=context) def write(self, cursor, user, ids, vals, context=None): if 'code' in vals: vals['code'] = vals['code'].upper() return super(Country, self).write(cursor, user, ids, vals, context=context) Country() class CountryState(osv.osv): _description="Country state" _name = 'res.country.state' _columns = { 'country_id': fields.many2one('res.country', 'Country', required=True), 'name': fields.char('State Name', size=64, required=True), 'code': fields.char('State Code', size=3, help='The state code in three chars.\n', required=True), } def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100): if not args: args = [] if not context: context = {} ids = self.search(cr, user, [('code', 'ilike', name)] + args, limit=limit, context=context) if not ids: ids = self.search(cr, user, [('name', operator, name)] + args, limit=limit, context=context) return self.name_get(cr, user, ids, context) _order = 'code' CountryState() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Python
0.000031
@@ -2221,16 +2221,34 @@ at': %22%25( +company_name)s%5Cn%25( street)s
1c6e593cf8971e7814ffa126b85d092bff8eb89e
Update runTests.py
iterators/runTests.py
iterators/runTests.py
from subprocess import Popen, PIPE import itertools ITERATORS_NUM = [1, 2] # Compare against 0 UPDATERS_NUM = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31] #[1, 2, 3, 4, 5, 6, 7] DURATION = [2, 4] PERCENTAGES = [(25, 25, 50), (20, 10, 70), (50, 50, 0)] KEY_RANGE = [4096] INIT_SIZE = [1024] runs = 1 # Maybe sanitize inputs here # Write configurations used to a file configfile = open("config.txt", 'w') configfile.write(' '.join(map(str, ITERATORS_NUM)) + '\n') configfile.write(' '.join(map(str, UPDATERS_NUM)) + '\n') configfile.write(' '.join(map(str, DURATION)) + '\n') #configfile.write(' '.join(map(str, PERCENTAGES)) + '\n') perc_string = "" for perc in PERCENTAGES: perc_string += "(%d,%d,%d) " % (perc[0], perc[1], perc[2]) perc_string = perc_string.strip() + '\n' configfile.write(perc_string) configfile.write(' '.join(map(str, KEY_RANGE)) + '\n') configfile.write(' '.join(map(str, INIT_SIZE)) + '\n') configfile.close() # Open file, write header outputfile = open("output.txt", 'w') outputfile.write("IT\tUP\tTIME\tCFG\tKEYR\tINIT\tHASH\tUBST\n") verbose = open("output_verbose.txt", 'w') verbose.write("IT\tUP\tTIME\tCFG\tKEYR\tINIT\tRUN\tHASH\tUBST\n") PARAMETER_COMBINATIONS = [ITERATORS_NUM, UPDATERS_NUM, DURATION, PERCENTAGES, KEY_RANGE, INIT_SIZE] # Iterate through all combinations def makeargs(param, alg, i): args = ["java", "-cp", ("%s:%s/lib/java-getopt-1.0.13.jar") % (alg, alg), "IteratorTest"] args += ["-i", str(i)] args += ["-u", str(param[1])] args += ["-d", str(param[2])] args += ["-I", str(param[3][0])] args += ["-R", str(param[3][1])] args += ["-M", str(param[4])] args += ["-s", str(param[5])] return args # for keeping track of progress count = 0 total = runs * len([j for j in itertools.product(*PARAMETER_COMBINATIONS)]) def to_str(data): if type(data) != type(()): return str(data) else: return_str = "(" return_str += (','.join(map(str, data))).strip(',') + ')' return return_str # main loop for param in itertools.product(*PARAMETER_COMBINATIONS): accum_hash = 0 accum_ubst = 0 for r in xrange(runs): # Compare each run against identical run with no iterators # hash table pTest0h = Popen(makeargs(param, "hash", 0), stdout=PIPE) result0h = int(pTest0h.communicate()[0].strip()) pTest1h = Popen(makeargs(param, "hash", param[0]), stdout=PIPE) result1h = int(pTest1h.communicate()[0].strip()) # unbalanced binary search tree pTest0b = Popen(makeargs(param, "ubst", 0), stdout=PIPE) result0b = int(pTest0b.communicate()[0].strip()) pTest1b = Popen(makeargs(param, "ubst", param[0]), stdout=PIPE) result1b = int(pTest1b.communicate()[0].strip()) # calculate/write verbose output line = reduce(lambda x, y: x + y, map(lambda x: to_str(x) + '\t', param + (r+1,))) line += str(float(result1h)/result0h) + '\t' line += str(float(result1b)/result0b) + '\n' verbose.write(line) # accumulate to calculate an average over runs accum_hash += float(result1h) / result0h accum_ubst += float(result1b) / result0b count += 1 print "%d of %d done" % (count, total) # write averages line = reduce(lambda x, y: x + y, map(lambda x: to_str(x) + '\t', param)) line += str(accum_hash/runs) + '\t' line += str(accum_ubst/runs) + '\n' outputfile.write(line) outputfile.close() verbose.close()
Python
0.000001
@@ -2045,16 +2045,30 @@ bst = 0%0A +%09# accum_list%0A %09for r i @@ -2083,16 +2083,16 @@ (runs):%0A - %09%09# Comp @@ -2643,24 +2643,56 @@ %5B0%5D.strip()) +%0A%09%09%0A%09%09#Add linked list code here %0A%0A%09%09# calcul @@ -2894,16 +2894,57 @@ ) + '%5Cn' + # change this to a tab%0A%09%09# add line here %0A%09%09verbo @@ -3093,16 +3093,31 @@ result0b +%0A%09%09# accum_list %0A%0A%09%09coun @@ -3295,16 +3295,16 @@ + '%5Ct'%0A - %09line += @@ -3331,16 +3331,55 @@ ) + '%5Cn' + # changethis to a tab%0A%09# add line here %0A%09output
064802e0354cd9d27a7ea0d1c69a45baf0587c63
add pool example
redisext/pool.py
redisext/pool.py
''' Pool ^^^^ .. autoclass:: Pool :members: SortedSet ^^^^^^^^^ .. autoclass:: SortedSet :members: ''' from __future__ import absolute_import import redisext.models.abc class Pool(redisext.models.abc.Model): def pop(self): item = self.connect_to_master().spop(self.key) return self.decode(item) def push(self, item): item = self.encode(item) return bool(self.connect_to_master().sadd(self.key, item)) class SortedSet(redisext.models.abc.Model): def add(self, element, score): element = self.encode(element) return bool(self.connect_to_master().zadd(self.key, score, element)) def length(self, start_score, end_score): return int(self.connect_to_slave().zcount(self.key, start_score, end_score)) def members(self): elements = self.connect_to_slave().zrevrange(self.key, 0, -1) if not elements: return elements return [self.decode(e) for e in elements] def contains(self, element): element = self.encode(element) return self.connect_to_slave().zscore(self.key, element) is not None def truncate(self, size): return int(self.connect_to_master().zremrangebyrank(self.key, 0, -1 * size - 1)) def clean(self): return bool(self.connect_to_master().delete(self.key))
Python
0
@@ -2,20 +2,20 @@ ''%0APool%0A -%5E%5E%5E%5E +---- %0A%0A.. aut @@ -46,313 +46,1034 @@ s:%0A%0A -SortedSet%0A%5E%5E%5E%5E%5E%5E%5E%5E%5E%0A%0A.. autoclass:: SortedSet%0A :members:%0A%0A'''%0Afrom __future__ import absolute_import%0A%0Aimport redisext.models.abc%0A%0A%0Aclass Pool(redisext.models.abc.Model):%0A def pop(self):%0A item = self.connect_to_master().spop(self.key)%0A return self.decode(item)%0A%0A def push(self, item): +The simpliest example of pool usage is token pool::%0A%0A class TokenPool(Connection, redisext.pool.Pool):%0A SERIALIZER = redisext.serializer.String%0A%0Aand this pool could be used like::%0A%0A %3E%3E%3E facebook = TokenPool('facebook')%0A %3E%3E%3E facebook.push('fb1')%0A True%0A %3E%3E%3E facebook.push('fb1')%0A False%0A %3E%3E%3E facebook.push('fb2')%0A True%0A %3E%3E%3E facebook.pop()%0A u'fb1'%0A %3E%3E%3E facebook.pop()%0A u'fb2'%0A %3E%3E%3E facebook.pop()%0A %3E%3E%3E%0A%0ASortedSet%0A---------%0A%0AFor your spectial needs check :class:%60redisext.pool.SortedSet%60.%0A%0A'''%0Afrom __future__ import absolute_import%0A%0Aimport redisext.models.abc%0A%0A%0Aclass Pool(redisext.models.abc.Model):%0A def pop(self):%0A '''%0A Pop item from pool.%0A%0A :returns: obviously item%0A :rtype: how knows(serializer knows)%0A '''%0A item = self.connect_to_master().spop(self.key)%0A return self.decode(item)%0A%0A def push(self, item):%0A '''%0A Place item into pool.%0A%0A :param item: whatever you need to place into pool%0A :rtype: bool%0A ''' %0A
60e4cef4be2b13c79ed664b911efb30c705ac664
replace sys.version_info with PY2 from robot.utils remove varkw, only use kwargs
src/robot/running/arguments/argumentparser.py
src/robot/running/arguments/argumentparser.py
# Copyright 2008-2015 Nokia Networks # Copyright 2016- Robot Framework Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import inspect if sys.platform.startswith('java'): from java.lang import Class from java.util import List, Map from robot.errors import DataError from robot.variables import is_dict_var, is_list_var, is_scalar_var from .argumentspec import ArgumentSpec class _ArgumentParser(object): def __init__(self, type='Keyword'): self._type = type def parse(self, source, name=None): return ArgumentSpec(name, self._type, *self._get_arg_spec(source)) def _get_arg_spec(self, source): raise NotImplementedError class PythonArgumentParser(_ArgumentParser): def _get_arg_spec(self, handler): if sys.version_info[0] == 2: args, varargs, kwargs, defaults = inspect.getargspec(handler) varkw = kwargs else: args, varargs, varkw, defaults, _, _, _ = inspect.getfullargspec(handler) if inspect.ismethod(handler) or handler.__name__ == '__init__': args = args[1:] # drop 'self' defaults = list(defaults) if defaults else [] return args, defaults, varargs, varkw class JavaArgumentParser(_ArgumentParser): def _get_arg_spec(self, signatures): if not signatures: return self._no_signatures_arg_spec() elif len(signatures) == 1: return self._single_signature_arg_spec(signatures[0]) else: return self._multi_signature_arg_spec(signatures) def _no_signatures_arg_spec(self): # Happens when a class has no public constructors return self._format_arg_spec() def _single_signature_arg_spec(self, signature): varargs, kwargs = self._get_varargs_and_kwargs_support(signature.args) positional = len(signature.args) - int(varargs) - int(kwargs) return self._format_arg_spec(positional, varargs=varargs, kwargs=kwargs) def _get_varargs_and_kwargs_support(self, args): if not args: return False, False if self._is_varargs_type(args[-1]): return True, False if not self._is_kwargs_type(args[-1]): return False, False if len(args) > 1 and self._is_varargs_type(args[-2]): return True, True return False, True def _is_varargs_type(self, arg): return arg is List or isinstance(arg, Class) and arg.isArray() def _is_kwargs_type(self, arg): return arg is Map def _multi_signature_arg_spec(self, signatures): mina = maxa = len(signatures[0].args) for sig in signatures[1:]: argc = len(sig.args) mina = min(argc, mina) maxa = max(argc, maxa) return self._format_arg_spec(maxa, maxa-mina) def _format_arg_spec(self, positional=0, defaults=0, varargs=False, kwargs=False): positional = ['arg%d' % (i+1) for i in range(positional)] defaults = [''] * defaults varargs = '*varargs' if varargs else None kwargs = '**kwargs' if kwargs else None supports_named = False return positional, defaults, varargs, kwargs, supports_named class _ArgumentSpecParser(_ArgumentParser): def parse(self, argspec, name=None): result = ArgumentSpec(name, self._type) for arg in argspec: if result.kwargs: self._raise_invalid_spec('Only last argument can be kwargs.') if self._is_kwargs(arg): self._add_kwargs(arg, result) continue if result.varargs: self._raise_invalid_spec('Positional argument after varargs.') if self._is_varargs(arg): self._add_varargs(arg, result) continue if '=' in arg: self._add_arg_with_default(arg, result) continue if result.defaults: self._raise_invalid_spec('Non-default argument after default ' 'arguments.') self._add_arg(arg, result) return result def _raise_invalid_spec(self, error): raise DataError('Invalid argument specification: %s' % error) def _is_kwargs(self, arg): raise NotImplementedError def _add_kwargs(self, kwargs, result): result.kwargs = self._format_kwargs(kwargs) def _format_kwargs(self, kwargs): raise NotImplementedError def _is_varargs(self, arg): raise NotImplementedError def _add_varargs(self, varargs, result): result.varargs = self._format_varargs(varargs) def _format_varargs(self, varargs): raise NotImplementedError def _add_arg_with_default(self, arg, result): arg, default = arg.split('=', 1) self._add_arg(arg, result) result.defaults.append(default) def _add_arg(self, arg, result): result.positional.append(self._format_arg(arg)) def _format_arg(self, arg): return arg class DynamicArgumentParser(_ArgumentSpecParser): def _is_kwargs(self, arg): return arg.startswith('**') def _format_kwargs(self, kwargs): return kwargs[2:] def _is_varargs(self, arg): return arg.startswith('*') and not self._is_kwargs(arg) def _format_varargs(self, varargs): return varargs[1:] class UserKeywordArgumentParser(_ArgumentSpecParser): def _is_kwargs(self, arg): return is_dict_var(arg) def _is_varargs(self, arg): return is_list_var(arg) def _format_kwargs(self, kwargs): return kwargs[2:-1] def _format_varargs(self, varargs): return varargs[2:-1] def _format_arg(self, arg): if not is_scalar_var(arg): self._raise_invalid_spec("Invalid argument syntax '%s'." % arg) return arg[2:-1]
Python
0
@@ -871,17 +871,16 @@ lar_var%0A -%0A from .ar @@ -910,16 +910,44 @@ entSpec%0A +from robot.utils import PY2%0A %0A%0Aclass @@ -1327,31 +1327,10 @@ if -sys.version_info%5B0%5D == +PY 2:%0A @@ -1406,35 +1406,8 @@ er)%0A - varkw = kwargs%0A @@ -1443,21 +1443,22 @@ arargs, -varkw +kwargs , defaul @@ -1716,13 +1716,14 @@ gs, -varkw +kwargs %0A%0A%0Ac
8c4ed26ef577e43e5828c14b960acc3fbb71b583
Edit test_schemas.py
src/encoded/tests/test_schemas.py
src/encoded/tests/test_schemas.py
import pytest from pkg_resources import resource_listdir from snovault.schema_utils import load_schema import re pytestmark = [pytest.mark.working, pytest.mark.schema] SCHEMA_FILES = [ f for f in resource_listdir('encoded', 'schemas') if f.endswith('.json') ] @pytest.fixture(scope='module') def master_mixins(): mixins = load_schema('encoded:schemas/mixins.json') mixin_keys = [ 'schema_version', 'uuid', 'accession', 'aliases', 'status', 'submitted', 'modified', 'references', 'attribution', 'notes', 'documents', 'attachment', 'attachments', 'dbxrefs', 'library', 'sop_mapping' ] for key in mixin_keys: assert(mixins[key]) def camel_case(name): return ''.join(x for x in name.title() if not x == '_') def pluralize(name): name = name.replace('_', '-') # deal with a few special cases explicitly specials = ['experiment', 'file', 'individual', 'treatment', 'quality-metric', 'summary-statistic', 'workflow-run', 'microscope-setting'] for sp in specials: if name.startswith(sp) and re.search('-(set|flag)', name) is None: return name.replace(sp, sp + 's') elif name.startswith(sp) and re.search('setting', name): return name.replace(sp, sp + 's') # otherwise just add 's' return name + 's' @pytest.mark.parametrize('schema', SCHEMA_FILES) def test_load_schema(schema, master_mixins, registry): from snovault import TYPES from snovault import COLLECTIONS abstract = [ 'microscope_setting.json', 'experiment.json', 'file.json', 'individual.json', 'quality_metric.json', 'treatment.json', 'workflow_run.json' ] loaded_schema = load_schema('encoded:schemas/%s' % schema) assert(loaded_schema) typename = schema.replace('.json', '') collection_names = [camel_case(typename), pluralize(typename)] # check the mixin properties for each schema if not schema == ('mixins.json'): verify_mixins(loaded_schema, master_mixins) if schema not in ['namespaces.json', 'mixins.json']: # check that schema.id is same as /profiles/schema idtag = loaded_schema['id'] idtag = idtag.replace('/profiles/', '') # special case for access_key.json if schema == 'access_key.json': idtag = idtag.replace('_admin', '') assert schema == idtag # check for pluralized and camel cased in collection_names val = None for name in collection_names: assert name in registry[COLLECTIONS] if val is not None: assert registry[COLLECTIONS][name] == val else: val = registry[COLLECTIONS][name] if schema not in abstract: # check schema w/o json extension is in registry[TYPES] assert typename in registry[TYPES].by_item_type assert typename in registry[COLLECTIONS] assert registry[COLLECTIONS][typename] == val shared_properties = [ 'uuid', 'schema_version', 'aliases', 'lab', 'award', 'date_created', 'submitted_by', 'status' ] no_alias_or_attribution = ['user.json', 'award.json', 'lab.json', 'organism.json', 'ontology.json', 'ontology_term.json', 'sysinfo.json', 'page.json', 'badge.json'] for prop in shared_properties: if schema == 'experiment.json': # currently experiment is abstract and has no mixin properties continue if schema == 'access_key.json' and prop not in ['uuid', 'schema_version']: continue if schema in no_alias_or_attribution and prop in ['aliases', 'lab', 'award']: continue verify_property(loaded_schema, prop) def verify_property(loaded_schema, property): assert(loaded_schema['properties'][property]) def verify_mixins(loaded_schema, master_mixins): ''' test to ensure that we didn't accidently overwrite mixins somehow ''' for mixin in loaded_schema.get('mixinProperties', []): # get the mixin name from {'$ref':'mixins.json#/schema_version'} mixin_file_name, mixin_name = mixin['$ref'].split('/') if mixin_file_name != "mixins.json": # skip any mixins not in main mixins.json continue mixin_schema = master_mixins[mixin_name] # each field in the mixin should be present in the parent schema with same properties for mixin_field_name, mixin_field in mixin_schema.items(): schema_field = loaded_schema['properties'][mixin_field_name] for key in mixin_field.keys(): assert mixin_field[key] == schema_field[key] def test_linkTo_saves_uuid(root, submitter, lab): item = root['users'][submitter['uuid']] assert item.properties['submits_for'] == [lab['uuid']] def test_mixinProperties(): from snovault.schema_utils import load_schema schema = load_schema('encoded:schemas/access_key.json') assert schema['properties']['uuid']['type'] == 'string' def test_dependencies(testapp): collection_url = '/testing-dependencies/' testapp.post_json(collection_url, {'dep1': 'dep1', 'dep2': 'dep2'}, status=201) testapp.post_json(collection_url, {'dep1': 'dep1'}, status=422) testapp.post_json(collection_url, {'dep2': 'dep2'}, status=422) testapp.post_json(collection_url, {'dep1': 'dep1', 'dep2': 'disallowed'}, status=422) def test_changelogs(testapp, registry): from snovault import TYPES for typeinfo in registry[TYPES].by_item_type.values(): changelog = typeinfo.schema.get('changelog') if changelog is not None: res = testapp.get(changelog) assert res.status_int == 200, changelog assert res.content_type == 'text/markdown' def test_schemas_etag(testapp): etag = testapp.get('/profiles/', status=200).etag assert etag testapp.get('/profiles/', headers={'If-None-Match': etag}, status=304)
Python
0.000003
@@ -3667,16 +3667,39 @@ e.json', + 'static_section.json', 'badge.
9cdf08450d5c6d8fb979d3e076b11bdaf7d794ed
fix for issue 84
activedata_etl/update_push_to_es.py
activedata_etl/update_push_to_es.py
# encoding: utf-8 # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. # # Author: Kyle Lahnakoski (kyle@lahnakoski.com) # from __future__ import division from __future__ import unicode_literals import logging from boto import ec2 as boto_ec2 from fabric.api import settings as fabric_settings from fabric.context_managers import cd from fabric.operations import run, put, sudo from fabric.state import env from mo_collections import UniqueIndex from mo_dots import unwrap, wrap from mo_dots.objects import datawrap, DataObject from mo_files import File from mo_logs import Log, startup, constants from pyLibrary.aws import aws_retry @aws_retry def _get_managed_spot_requests(ec2_conn, name): output = wrap([datawrap(r) for r in ec2_conn.get_all_spot_instance_requests() if not r.tags.get("Name") or r.tags.get("Name").startswith(name)]) return output @aws_retry def _get_managed_instances(ec2_conn, name): requests = UniqueIndex(["instance_id"], data=_get_managed_spot_requests(ec2_conn, name).filter(lambda r: r.instance_id != None)) reservations = ec2_conn.get_all_instances() output = [] for res in reservations: for instance in res.instances: if instance.tags.get('Name', '').startswith(name) and instance._state.name == "running": instance.request = requests[instance.id] output.append(datawrap(instance)) return wrap(output) def _config_fabric(connect, instance): if not instance.ip_address: Log.error("Expecting an ip address for {{instance_id}}", instance_id=instance.id) for k, v in connect.items(): env[k] = v env.host_string = instance.ip_address env.abort_exception = Log.error def _disable_oom_on_es(): with fabric_settings(warn_only=True): sudo("supervisorctl start es") with cd("/home/ec2-user"): run("mkdir -p temp") with cd("/home/ec2-user/temp"): processes = sudo("ps -eo pid,command | grep java") candidates = [ line for line in processes.split("\n") if line.find("/usr/java/default/bin/java -Xms") != -1 and line.find("org.elasticsearch.bootstrap.Elasticsearch") != -1 ] if not candidates: Log.error("Expecting to find some hint of Elasticsearch running") elif len(candidates) > 1: Log.error("Fond more than one Elasticsearch running, not sure what to do") pid = candidates[0].split(" ")[0].strip() run("echo -16 > oom_adj") sudo("sudo cp oom_adj /proc/" + pid + "/oom_adj") def _refresh_indexer(): with cd("/usr/local/elasticsearch"): sudo("rm -f java*.hprof") _disable_oom_on_es() with cd("/home/ec2-user/ActiveData-ETL/"): result = run("git pull origin push-to-es6") if "Already up-to-date." in result: Log.note("No change required") else: # RESTART ANYWAY, SO WE USE LATEST INDEX run("~/pypy/bin/pypy -m pip install -r requirements.txt") with fabric_settings(warn_only=True): sudo("supervisorctl stop push_to_es:*") sudo("supervisorctl start push_to_es:00") def _start_supervisor(): put("~/code/SpotManager/examples/config/es_supervisor.conf", "/etc/supervisord.conf", use_sudo=True) # START DAEMON (OR THROW ERROR IF RUNNING ALREADY) with fabric_settings(warn_only=True): sudo("supervisord -c /etc/supervisord.conf") sudo("supervisorctl reread") sudo("supervisorctl update") def _run_remote(command, name): File("./results/temp/" + name + ".sh").write("nohup " + command + " >& /dev/null < /dev/null &\nsleep 20") put("./results/temp/" + name + ".sh", "" + name + ".sh") run("chmod u+x " + name + ".sh") run("./" + name + ".sh") def main(): try: settings = startup.read_settings() constants.set(settings.constants) Log.start(settings.debug) logging.getLogger('paramiko.transport').addHandler(LogTranslate()) aws_args = dict( region_name=settings.aws.region, aws_access_key_id=unwrap(settings.aws.aws_access_key_id), aws_secret_access_key=unwrap(settings.aws.aws_secret_access_key) ) ec2_conn = boto_ec2.connect_to_region(**aws_args) instances = _get_managed_instances(ec2_conn, settings.name) for i in instances: try: Log.note("Reset {{instance_id}} ({{name}}) at {{ip}}", instance_id=i.id, name=i.tags["Name"], ip=i.ip_address) _config_fabric(settings.fabric, i) _refresh_indexer() except Exception as e: Log.warning( "could not refresh {{instance_id}} ({{name}}) at {{ip}}", instance_id=i.id, name=i.tags["Name"], ip=i.ip_address, cause=e ) except Exception as e: Log.error("Problem with etl", e) finally: Log.stop() class LogTranslate(object): def __init__(self, level=0): self.level=level def emit(self, record): Log.note("{{record}}", record=record) def flush(self): pass def handle(self, record): Log.note("{{record|json}}", record=DataObject(record)) if __name__ == "__main__": main()
Python
0
@@ -2223,26 +2223,16 @@ if -line.find( %22/usr/ja @@ -2260,30 +2260,21 @@ Xms%22 -) != -1 and + in line -.find( + and %22org @@ -2316,15 +2316,16 @@ rch%22 -) != -1 + in line %0A @@ -2586,16 +2586,24 @@ ates%5B0%5D. +strip(). split(%22
e5ed6ef0c201d9a29c5934e3687abec7e13ae551
update models to use a hashids for naming files
api/models.py
api/models.py
""" This file represents the models for the api app. """ from django.db import models class DateMixin(models.Model): """A model mixin for date creation.""" created = models.DateField(auto_now_add=True) class File(DateMixin): """This class represents the file model.""" name = models.CharField(max_length=100, unique=True) file = models.FileField(allow_files=True) def __str__(self): """Return a string representation of the model instance.""" return "{}".format(self.name)
Python
0
@@ -79,16 +79,70 @@ models%0A +from .utils import get_file_upload_path, generate_uid%0A %0A%0Aclass @@ -338,20 +338,23 @@ %22%22%0A%0A -name +file_id = model @@ -369,41 +369,50 @@ eld( -max_length=100, unique=True +default=generate_uid, max_length=50 )%0A +_ file @@ -435,24 +435,38 @@ eld( -allow_files=True +upload_to=get_file_upload_path )%0A%0A
c82d704944d210b2db39ba1dd44bcb2c82708edc
allow filtering by domain
corehq/blobs/management/commands/run_blob_migration.py
corehq/blobs/management/commands/run_blob_migration.py
import logging import os import sys from datetime import datetime, timedelta from django.core.management import BaseCommand, CommandError from corehq.blobs.migrate import MIGRATIONS from corehq.blobs.util import set_max_connections from corehq.util.decorators import change_log_level from corehq.util.teeout import tee_output DEFAULT_WORKER_POOL_SIZE = 10 DEFAULT_BOTOCORE_MAX_POOL_CONNECTIONS = 10 USAGE = """Usage: ./manage.py run_blob_migration [options] <slug> Slugs: {} """.format('\n'.join(sorted(MIGRATIONS))) class Command(BaseCommand): """ Example: ./manage.py run_blob_migration [options] saved_exports """ help = USAGE def add_arguments(self, parser): def add_argument(*args, **kw): name = args[-1].lstrip("-").replace("-", "_") self.option_names.add(name) parser.add_argument(*args, **kw) self.option_names = set() add_argument( 'slug', choices=sorted(MIGRATIONS), help="Migration slug: {}".format(', '.join(sorted(MIGRATIONS))), ) add_argument( '--log-dir', help="Migration log directory.", ) add_argument( '--reset', action="store_true", default=False, help="Discard any existing migration state.", ) add_argument( '--chunk-size', type=int, default=100, help="Maximum number of records to read from couch at once.", ) add_argument( '--num-workers', type=int, default=DEFAULT_WORKER_POOL_SIZE, help=( "Worker pool size for parallel processing. This option is " "ignored by migration types that do not support it." ), ) add_argument( '--date-range', help=( "Creation date range of blobs to be migrated specified as one " "or two dates in YYYYMMDD format. If only one date is " "specified, it will be used as the end date, leaving the " "start date unbounded. Some migrations may not support this" "parameter. Example value: 20180109-20190109" ), ) add_argument( '--process_day_by_day', action='store_true', default=False, help=( "Run migration for each day in the given date-range separately " "to allow cancelling and resuming on any day. Only applicable with date-range option" ), ) @change_log_level('boto3', logging.WARNING) @change_log_level('botocore', logging.WARNING) def handle(self, slug, log_dir=None, **options): try: migrator = MIGRATIONS[slug]() except KeyError: raise CommandError(USAGE) # drop options not added by this command for name in list(options): if name not in self.option_names: options.pop(name) if not migrator.has_worker_pool: num_workers = options.pop("num_workers") if num_workers != DEFAULT_WORKER_POOL_SIZE: print("--num-workers={} ignored because this migration " "does not use a worker pool".format(num_workers)) elif options["num_workers"] > DEFAULT_BOTOCORE_MAX_POOL_CONNECTIONS: set_max_connections(options["num_workers"]) if "date_range" in options: rng = options["date_range"] if rng is None: options.pop("date_range") else: if "-" not in rng: rng = (None, get_date(rng)) else: rng = rng.split("-") if len(rng) != 2: raise CommandError("bad date range: {}".format(rng)) rng = tuple(get_date(v) for v in rng) # date_range is a tuple containing two date values # a value of None means that side of the range is unbounded options["date_range"] = rng if log_dir is None: summary_file = log_file = None else: now = datetime.utcnow().strftime("%Y%m%dT%H%M%SZ") summary_file = os.path.join(log_dir, "{}-blob-migration-{}-summary.txt".format(slug, now)) log_file = os.path.join(log_dir, "{}-blob-migration-{}.txt".format(slug, now)) assert not os.path.exists(summary_file), summary_file assert not os.path.exists(log_file), log_file def _migrate(): with tee_output(summary_file): try: total, skips = migrator.migrate(log_file, **options) if skips: sys.exit(skips) except KeyboardInterrupt: print("stopped by operator") if options.get('date_range'): print("while processing date range {}".format(options['date_range'])) sys.exit(1) process_day_by_day = options.pop('process_day_by_day') if 'date_range' in options and process_day_by_day: start, end = options.pop('date_range') num_days = (end - start).days for day in range(num_days + 1): date = start + timedelta(days=day) options['date_range'] = (date, date) print("Migrating for date {} ".format(date)) _migrate() print("Finished migration for date {} ".format(date)) else: _migrate() def get_date(value): if not value: return None try: return datetime.strptime(value, "%Y%m%d").date() except ValueError: raise CommandError("bad date value: {}".format(value))
Python
0
@@ -2287,32 +2287,108 @@ ),%0A )%0A + add_argument('--domain', help=%22Limit migration to a single domain%22)%0A add_argu
9e82515ca1eeb6376947ae653ee375146c95016c
Fix EE API interface
dcos_test_utils/enterprise.py
dcos_test_utils/enterprise.py
import logging import os from dcos_test_utils import dcos_api_session, helpers, iam log = logging.getLogger(__name__) class MesosNodeClientMixin: """ This Mixin allows any request to be made against a master or agent mesos HTTP port by providing the keyword 'mesos_node'. Thus, the user does not have to specify the master/agent port or which arbitrary host in the cluster meeting that role """ def api_request(self, method, path_extension, *, scheme=None, host=None, query=None, fragment=None, port=None, mesos_node=None, **kwargs): if mesos_node is not None: assert port is None, 'Usage error: mesos_node keyword will set port' assert host is None, 'Usage error: mesos_node keyword will set host' if mesos_node == 'master': port = 5050 host = self.masters[0] elif mesos_node == 'agent': port = 5051 host = self.slaves[0] else: raise AssertionError('Mesos node type not recognized: {}'.format(mesos_node)) return super().api_request(method, path_extension, scheme=scheme, host=host, query=query, fragment=fragment, port=port, **kwargs) class EnterpriseUser(dcos_api_session.DcosUser): def __init__(self, uid: str, password: str): self.uid = uid self.password = password super().__init__(self.auth_json) @property def auth_json(self): return {'uid': self.uid, 'password': self.password} class EnterpriseApiSession(MesosNodeClientMixin, dcos_api_session.DcosApiSession): @property def iam(self): return iam.Iam(self.default_url.copy(path='acs/api/v1'), session=self.copy().session) @property def secrets(self): new = self.copy() new.default_url = self.default_url.copy(path='secrets/v1') return new @property def ca(self): new = self.copy() new.default_url = self.default_url.copy(path='ca/api/v2') return new @staticmethod def get_args_from_env(): assert 'DCOS_LOGIN_UNAME' in os.environ, 'DCOS_LOGIN_UNAME must be set to login!' assert 'DCOS_LOGIN_PW' in os.environ, 'DCOS_LOGIN_PW must be set!' uid = os.environ['DCOS_LOGIN_UNAME'] password = os.environ['DCOS_LOGIN_PW'] args = super(dcos_api_session.DcosApiSession).get_args_from_env() args['auth_user'] = EnterpriseUser(uid, password) return args def set_ca_cert(self): log.info('Attempt to get CA bundle via CA HTTP API') r = self.post('ca/api/v2/info', json={'profile': ''}, verify=False) r.raise_for_status() crt = r.json()['result']['certificate'] self.session.verify = helpers.session_tempfile(crt.encode()) def set_initial_resrouce_ids(self): self.initial_resource_ids = [] r = self.iam.get('/acls') for o in r.json()['array']: self.initial_resource_ids.append(o['rid'])
Python
0.000143
@@ -2392,22 +2392,16 @@ args = -super( dcos_api @@ -2423,17 +2423,16 @@ iSession -) .get_arg @@ -2861,11 +2861,11 @@ _res -r ou +r ce_i
6e2515f4db3b6b9913e252cd52237574002637f2
Add missing user_id in revoke_certs_by_user_and_project()
nova/cert/manager.py
nova/cert/manager.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cert manager manages x509 certificates. **Related Flags** :cert_topic: What :mod:`rpc` topic to listen to (default: `cert`). :cert_manager: The module name of a class derived from :class:`manager.Manager` (default: :class:`nova.cert.manager.Manager`). """ import base64 from nova import crypto from nova import flags from nova import manager from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) FLAGS = flags.FLAGS class CertManager(manager.Manager): RPC_API_VERSION = '1.0' def init_host(self): crypto.ensure_ca_filesystem() def revoke_certs_by_user(self, context, user_id): """Revoke all user certs.""" return crypto.revoke_certs_by_user(user_id) def revoke_certs_by_project(self, context, project_id): """Revoke all project certs.""" return crypto.revoke_certs_by_project(project_id) def revoke_certs_by_user_and_project(self, context, user_id, project_id): """Revoke certs for user in project.""" return crypto.revoke_certs_by_user_and_project(project_id) def generate_x509_cert(self, context, user_id, project_id): """Generate and sign a cert for user in project""" return crypto.generate_x509_cert(user_id, project_id) def fetch_ca(self, context, project_id): """Get root ca for a project""" return crypto.fetch_ca(project_id) def fetch_crl(self, context, project_id): """Get crl for a project""" return crypto.fetch_crl(project_id) def decrypt_text(self, context, project_id, text): """Decrypt base64 encoded text using the projects private key.""" return crypto.decrypt_text(project_id, base64.b64decode(text))
Python
0.999994
@@ -1740,32 +1740,41 @@ ser_and_project( +user_id, project_id)%0A%0A
b71f45c0f4a53b11b03ce78a7ee8f67951b2d406
Remove default argument from mutable array
paasta_tools/monkrelaycluster_tools.py
paasta_tools/monkrelaycluster_tools.py
# Copyright 2015-2019 Yelp Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List from typing import Mapping from typing import Optional import service_configuration_lib from paasta_tools.kubernetes_tools import sanitised_cr_name from paasta_tools.long_running_service_tools import LongRunningServiceConfig from paasta_tools.long_running_service_tools import LongRunningServiceConfigDict from paasta_tools.utils import BranchDictV2 from paasta_tools.utils import deep_merge_dictionaries from paasta_tools.utils import DEFAULT_SOA_DIR from paasta_tools.utils import load_service_instance_config from paasta_tools.utils import load_v2_deployments_json class MonkRelayClusterDeploymentConfigDict(LongRunningServiceConfigDict, total=False): replicas: int class MonkRelayClusterDeploymentConfig(LongRunningServiceConfig): config_dict: MonkRelayClusterDeploymentConfigDict config_filename_prefix = "monkrelaycluster" def __init__( self, service: str, cluster: str, instance: str, config_dict: MonkRelayClusterDeploymentConfigDict, branch_dict: Optional[BranchDictV2], soa_dir: str = DEFAULT_SOA_DIR, ) -> None: super().__init__( cluster=cluster, instance=instance, service=service, soa_dir=soa_dir, config_dict=config_dict, branch_dict=branch_dict, ) def get_instances(self, with_limit: bool = True) -> int: return self.config_dict.get("replicas", 1) def validate( self, params: List[str] = [ "cpus", "security", "dependencies_reference", "deploy_group", ], ) -> List[str]: # Use InstanceConfig to validate shared config keys like cpus and mem # TODO: add mem back to this list once we fix PAASTA-15582 and # move to using the same units as flink/marathon etc. error_msgs = super().validate(params=params) if error_msgs: name = self.get_instance() return [f"{name}: {msg}" for msg in error_msgs] else: return [] def load_monkrelaycluster_instance_config( service: str, instance: str, cluster: str, load_deployments: bool = True, soa_dir: str = DEFAULT_SOA_DIR, ) -> MonkRelayClusterDeploymentConfig: """Read a service instance's configuration for MonkRelayCluster. If a branch isn't specified for a config, the 'branch' key defaults to paasta-${cluster}.${instance}. :param service: The service name :param instance: The instance of the service to retrieve :param cluster: The cluster to read the configuration for :param load_deployments: A boolean indicating if the corresponding deployments.json for this service should also be loaded :param soa_dir: The SOA configuration directory to read from :returns: A dictionary of whatever was in the config for the service instance""" general_config = service_configuration_lib.read_service_configuration( service, soa_dir=soa_dir ) instance_config = load_service_instance_config( service, instance, "monkrelaycluster", cluster, soa_dir=soa_dir ) general_config = deep_merge_dictionaries( overrides=instance_config, defaults=general_config ) branch_dict: Optional[BranchDictV2] = None if load_deployments: deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir) temp_instance_config = MonkRelayClusterDeploymentConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=None, soa_dir=soa_dir, ) branch = temp_instance_config.get_branch() deploy_group = temp_instance_config.get_deploy_group() branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group) return MonkRelayClusterDeploymentConfig( service=service, cluster=cluster, instance=instance, config_dict=general_config, branch_dict=branch_dict, soa_dir=soa_dir, ) # TODO: read this from CRD in service configs def cr_id(service: str, instance: str) -> Mapping[str, str]: return dict( group="yelp.com", version="v1alpha1", namespace="paasta-monkrelayclusters", plural="monkrelays", name=sanitised_cr_name(service, instance), )
Python
0.000002
@@ -2064,178 +2064,32 @@ ate( -%0A self,%0A params: List%5Bstr%5D = %5B%0A %22cpus%22,%0A %22security%22,%0A %22dependencies_reference%22,%0A %22deploy_group%22,%0A %5D,%0A +self, params: List%5Bstr%5D, ) -%3E @@ -2311,16 +2311,206 @@ on etc.%0A + if params is None:%0A params = %5B%0A %22cpus%22,%0A %22security%22,%0A %22dependencies_reference%22,%0A %22deploy_group%22,%0A %5D%0A
b45ce22e0d688e5c2a9a56f5eb87744cea87a263
Fix scimath.power for negative integer input.
numpy/lib/scimath.py
numpy/lib/scimath.py
""" Wrapper functions to more user-friendly calling of certain math functions whose output data-type is different than the input data-type in certain domains of the input. """ __all__ = ['sqrt', 'log', 'log2', 'logn','log10', 'power', 'arccos', 'arcsin', 'arctanh'] import numpy.core.numeric as nx import numpy.core.numerictypes as nt from numpy.core.numeric import asarray, any from numpy.lib.type_check import isreal #__all__.extend([key for key in dir(nx.umath) # if key[0] != '_' and key not in __all__]) _ln2 = nx.log(2.0) def _tocomplex(arr): if isinstance(arr.dtype, (nt.single, nt.byte, nt.short, nt.ubyte, nt.ushort)): return arr.astype(nt.csingle) else: return arr.astype(nt.cdouble) def _fix_real_lt_zero(x): x = asarray(x) if any(isreal(x) & (x<0)): x = _tocomplex(x) return x def _fix_real_abs_gt_1(x): x = asarray(x) if any(isreal(x) & (abs(x)>1)): x = _tocomplex(x) return x def sqrt(x): x = _fix_real_lt_zero(x) return nx.sqrt(x) def log(x): x = _fix_real_lt_zero(x) return nx.log(x) def log10(x): x = _fix_real_lt_zero(x) return nx.log10(x) def logn(n, x): """ Take log base n of x. """ x = _fix_real_lt_zero(x) n = _fix_real_lt_zero(n) return nx.log(x)/nx.log(n) def log2(x): """ Take log base 2 of x. """ x = _fix_real_lt_zero(x) return nx.log(x)/_ln2 def power(x, p): x = _fix_real_lt_zero(x) return nx.power(x, p) def arccos(x): x = _fix_real_abs_gt_1(x) return nx.arccos(x) def arcsin(x): x = _fix_real_abs_gt_1(x) return nx.arcsin(x) def arctanh(x): x = _fix_real_abs_gt_1(x) return nx.arctanh(x)
Python
0.999999
@@ -886,24 +886,135 @@ return x%0A%0A +def _fix_int_lt_zero(x):%0A x = asarray(x)%0A if any(isreal(x) & (x %3C 0)):%0A x = x * 1.0%0A return x%0A%0A def _fix_rea @@ -1607,32 +1607,60 @@ real_lt_zero(x)%0A + p = _fix_int_lt_zero(p)%0A return nx.po
3c299bf2682a9b8d5be2c9c8f308720182935d12
Add missing username to log statement
accounts/tasks.py
accounts/tasks.py
import logging from celery import task from django.db import IntegrityError from django.utils.text import slugify import games.models from accounts.models import User from emails.messages import send_daily_mod_mail from games.util.steam import create_game LOGGER = logging.getLogger() @task def sync_steam_library(user_id): user = User.objects.get(pk=user_id) steamid = user.steamid library = games.models.GameLibrary.objects.get(user=user) steam_games = games.util.steam.steam_sync(steamid) if not steam_games: LOGGER.info("Steam user %s has no steam games") return for game in steam_games: LOGGER.info("Adding %s to %s's library", game['name'], user.username) if not game['img_icon_url']: LOGGER.info("Game %s has no icon", game['name']) continue try: steam_game = games.models.Game.objects.get(steamid=game['appid']) except games.models.Game.MultipleObjectsReturned: LOGGER.error("Multiple games with appid '%s'", game['appid']) continue except games.models.Game.DoesNotExist: LOGGER.info("No game with steam id %s", game['appid']) try: steam_game = games.models.Game.objects.get( slug=slugify(game['name'])[:50] ) if not steam_game.steamid: steam_game.steamid = game['appid'] steam_game.save() except games.models.Game.DoesNotExist: steam_game = create_game(game) LOGGER.info("Creating game %s", steam_game.slug) try: library.games.add(steam_game) except IntegrityError: # Game somehow already added. pass @task def daily_mod_mail(): send_daily_mod_mail()
Python
0.000006
@@ -584,16 +584,31 @@ m games%22 +, user.username )%0A
3b6d5fd80eb4d95679b969e8809b154d6254de8d
Replace get_user_profile_by_email with get_user.
zerver/management/commands/bankrupt_users.py
zerver/management/commands/bankrupt_users.py
from __future__ import absolute_import from __future__ import print_function from typing import Any from argparse import ArgumentParser from django.core.management.base import BaseCommand from zerver.lib.actions import do_update_message_flags from zerver.models import UserProfile, Message, get_user_profile_by_email class Command(BaseCommand): help = """Bankrupt one or many users.""" def add_arguments(self, parser): # type: (ArgumentParser) -> None parser.add_argument('emails', metavar='<email>', type=str, nargs='+', help='email address to bankrupt') def handle(self, *args, **options): # type: (*Any, **str) -> None for email in options['emails']: try: user_profile = get_user_profile_by_email(email) except UserProfile.DoesNotExist: print("e-mail %s doesn't exist in the system, skipping" % (email,)) continue do_update_message_flags(user_profile, "add", "read", None, True, None, None) messages = Message.objects.filter( usermessage__user_profile=user_profile).order_by('-id')[:1] if messages: old_pointer = user_profile.pointer new_pointer = messages[0].id user_profile.pointer = new_pointer user_profile.save(update_fields=["pointer"]) print("%s: %d => %d" % (email, old_pointer, new_pointer)) else: print("%s has no messages, can't bankrupt!" % (email,))
Python
0.000007
@@ -171,27 +171,28 @@ import -Base Command +Error %0A%0Afrom z @@ -256,69 +256,80 @@ ver. -models import UserProfile, Message, get_user_profile_by_email +lib.management import ZulipBaseCommand%0Afrom zerver.models import Message %0A%0Acl @@ -340,16 +340,21 @@ Command( +Zulip BaseComm @@ -622,16 +622,58 @@ nkrupt') +%0A self.add_realm_args(parser, True) %0A%0A de @@ -736,32 +736,72 @@ **str) -%3E None%0A + realm = self.get_realm(options)%0A for emai @@ -876,16 +876,21 @@ e = +self. get_user _pro @@ -889,76 +889,54 @@ user -_profile_by_email(email)%0A except UserProfile.DoesNotExist +(email, realm)%0A except CommandError :%0A @@ -991,14 +991,16 @@ the -system +realm %25s , sk @@ -1016,16 +1016,22 @@ (email, + realm ))%0A @@ -1050,17 +1050,16 @@ ontinue%0A -%0A
3135bda8970a2fdefa92b932c15cf5c559392c9c
allow to specify db session callable directly
ziggurat_foundations/ext/pyramid/get_user.py
ziggurat_foundations/ext/pyramid/get_user.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals import importlib import logging from ziggurat_foundations.models.base import get_db_session from ziggurat_foundations.models.services.user import UserService CONFIG_KEY = "ziggurat_foundations" log = logging.getLogger(__name__) def includeme(config): settings = config.registry.settings session_provider_callable_config = settings.get( "%s.session_provider_callable" % CONFIG_KEY ) if not session_provider_callable_config: def session_provider_callable(request): return get_db_session() test_session_callable = None else: parts = session_provider_callable_config.split(":") _tmp = importlib.import_module(parts[0]) session_provider_callable = getattr(_tmp, parts[1]) test_session_callable = "session exists" # This function is bundled into the request, so for each request you can # do request.user def get_user(request): userid = request.unauthenticated_userid if test_session_callable is None: # set db_session to none to pass to the UserModel.by_id db_session = None else: # Else assign the request.session db_session = session_provider_callable(request) if userid is not None: return UserService.by_id(userid, db_session=db_session) # add in request.user function config.add_request_method(get_user, "user", reify=True, property=True)
Python
0
@@ -636,32 +636,178 @@ None%0A else:%0A + if callable(session_provider_callable_config):%0A session_provider_callable = session_provider_callable_config%0A else:%0A parts = @@ -858,16 +858,20 @@ + + _tmp = i @@ -903,16 +903,20 @@ rts%5B0%5D)%0A + @@ -967,16 +967,20 @@ rts%5B1%5D)%0A +
fe01408283b0a0a8667214fcb1f5a58162ad7507
Update leave_allocation.py
erpnext/hr/doctype/leave_allocation/leave_allocation.py
erpnext/hr/doctype/leave_allocation/leave_allocation.py
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe.utils import flt, date_diff, formatdate from frappe import _ from frappe.model.document import Document from erpnext.hr.utils import set_employee_name from erpnext.hr.doctype.leave_application.leave_application import get_approved_leaves_for_period class OverlapError(frappe.ValidationError): pass class BackDatedAllocationError(frappe.ValidationError): pass class OverAllocationError(frappe.ValidationError): pass class LessAllocationError(frappe.ValidationError): pass class ValueMultiplierError(frappe.ValidationError): pass class LeaveAllocation(Document): def validate(self): self.validate_period() self.validate_new_leaves_allocated_value() self.validate_allocation_overlap() self.validate_back_dated_allocation() self.set_total_leaves_allocated() self.validate_total_leaves_allocated() set_employee_name(self) def on_update_after_submit(self): self.validate_new_leaves_allocated_value() self.set_total_leaves_allocated() frappe.db.set(self,'carry_forwarded_leaves', flt(self.carry_forwarded_leaves)) frappe.db.set(self,'total_leaves_allocated',flt(self.total_leaves_allocated)) self.validate_against_leave_applications() def validate_period(self): if date_diff(self.to_date, self.from_date) <= 0: frappe.throw(_("To date cannot be before from date")) def validate_new_leaves_allocated_value(self): """validate that leave allocation is in multiples of 0.5""" if flt(self.new_leaves_allocated) % 0.5: frappe.throw(_("Leaves must be allocated in multiples of 0.5"), ValueMultiplierError) def validate_allocation_overlap(self): leave_allocation = frappe.db.sql(""" select name from `tabLeave Allocation` where employee=%s and leave_type=%s and docstatus=1 and to_date >= %s and from_date <= %s""", (self.employee, self.leave_type, self.from_date, self.to_date)) if leave_allocation: frappe.msgprint(_("{0} already allocated for Employee {1} for period {2} to {3}") .format(self.leave_type, self.employee, formatdate(self.from_date), formatdate(self.to_date))) frappe.throw(_('Reference') + ': <a href="#Form/Leave Allocation/{0}">{0}</a>' .format(leave_allocation[0][0]), OverlapError) def validate_back_dated_allocation(self): future_allocation = frappe.db.sql("""select name, from_date from `tabLeave Allocation` where employee=%s and leave_type=%s and docstatus=1 and from_date > %s and carry_forward=1""", (self.employee, self.leave_type, self.to_date), as_dict=1) if future_allocation: frappe.throw(_("Leave cannot be allocated before {0}, as leave balance has already been carry-forwarded in the future leave allocation record {1}") .format(formatdate(future_allocation[0].from_date), future_allocation[0].name), BackDatedAllocationError) def set_total_leaves_allocated(self): self.carry_forwarded_leaves = get_carry_forwarded_leaves(self.employee, self.leave_type, self.from_date, self.carry_forward) self.total_leaves_allocated = flt(self.carry_forwarded_leaves) + flt(self.new_leaves_allocated) if not self.total_leaves_allocated: frappe.throw(_("Total leaves allocated is mandatory")) def validate_total_leaves_allocated(self): # Adding a day to include To Date in the difference date_difference = date_diff(self.to_date, self.from_date) + 1 if date_difference < self.total_leaves_allocated: frappe.throw(_("Total allocated leaves are more than days in the period"), OverAllocationError) def validate_against_leave_applications(self): leaves_taken = get_approved_leaves_for_period(self.employee, self.leave_type, self.from_date, self.to_date) if flt(leaves_taken) > flt(self.total_leaves_allocated): if frappe.db.get_value("Leave Type", self.leave_type, "allow_negative") frappe.msgprint(_("Note: Total allocated leaves {0} shouldn't be less than already approved leaves {1} for the period").format(self.total_leaves_allocated, leaves_taken), LessAllocationError) else: frappe.throw(_("Total allocated leaves {0} cannot be less than already approved leaves {1} for the period").format(self.total_leaves_allocated, leaves_taken), LessAllocationError) @frappe.whitelist() def get_carry_forwarded_leaves(employee, leave_type, date, carry_forward=None): carry_forwarded_leaves = 0 if carry_forward: validate_carry_forward(leave_type) previous_allocation = frappe.db.sql(""" select name, from_date, to_date, total_leaves_allocated from `tabLeave Allocation` where employee=%s and leave_type=%s and docstatus=1 and to_date < %s order by to_date desc limit 1 """, (employee, leave_type, date), as_dict=1) if previous_allocation: leaves_taken = get_approved_leaves_for_period(employee, leave_type, previous_allocation[0].from_date, previous_allocation[0].to_date) carry_forwarded_leaves = flt(previous_allocation[0].total_leaves_allocated) - flt(leaves_taken) return carry_forwarded_leaves def validate_carry_forward(leave_type): if not frappe.db.get_value("Leave Type", leave_type, "is_carry_forward"): frappe.throw(_("Leave Type {0} cannot be carry-forwarded").format(leave_type))
Python
0.000001
@@ -3945,16 +3945,17 @@ gative%22) +: %0A%09%09%09%09fra
07def114287bc3488e76e2516ca7682954ba4a09
Use default alphabet
APITaxi/extensions.py
APITaxi/extensions.py
#coding: utf-8 from flask_sqlalchemy import SQLAlchemy as BaseSQLAlchemy from sqlalchemy.pool import QueuePool as BaseQueuePool class SQLAlchemy(BaseSQLAlchemy): def apply_driver_hacks(self, app, info, options): BaseSQLAlchemy.apply_driver_hacks(self, app, info, options) class QueuePool(BaseQueuePool): def __init__(self, creator, pool_size=5, max_overflow=10, timeout=30, **kw): kw['use_threadlocal'] = True BaseQueuePool.__init__(self, creator, pool_size, max_overflow, timeout, **kw) options.setdefault('poolclass', QueuePool) db = SQLAlchemy(session_options={"autoflush":False}) from .utils.redis_geo import GeoRedis from flask.ext.redis import FlaskRedis redis_store = FlaskRedis.from_custom_provider(GeoRedis) from flask.ext.celery import Celery celery = Celery() from dogpile.cache import make_region region_taxi = make_region('taxis') region_hails = make_region('hails') region_zupc = make_region('zupc') def user_key_generator(namespace, fn, **kw): def generate_key(*args, **kwargs): return fn.__name__ +\ "_".join(str(s) for s in args) +\ "_".join(k+"_"+str(v) for k,v in kwargs.iteritems()) return generate_key region_users = make_region('users', function_key_generator=user_key_generator) from flask.ext.uploads import (UploadSet, configure_uploads, DOCUMENTS, DATA, ARCHIVES, IMAGES) documents = UploadSet('documents', DOCUMENTS + DATA + ARCHIVES) images = UploadSet('images', IMAGES) from .index_zupc import IndexZUPC index_zupc = IndexZUPC() from .utils.cache_user_datastore import CacheUserDatastore from .models import security user_datastore = CacheUserDatastore(db, security.User, security.Role) import shortuuid suid = shortuuid.ShortUUID(alphabet= '0123456789abcdefghijklmnopqrstuvwxyzABDEFGHIJKLOMNOPQRSTUVWXYZ') def get_short_uuid(): return suid.uuid()[:7]
Python
0.000652
@@ -1820,86 +1820,8 @@ UID( -alphabet=%0A '0123456789abcdefghijklmnopqrstuvwxyzABDEFGHIJKLOMNOPQRSTUVWXYZ' )%0A%0Ad
9f500668555292add5d87c942e0cd804aefa6df2
Replace cat usage for fgrep
fuel_health/tests/cloudvalidation/test_disk_space_db.py
fuel_health/tests/cloudvalidation/test_disk_space_db.py
# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from fuel_health import cloudvalidation class DBSpaceTest(cloudvalidation.CloudValidationTest): """Cloud Validation Test class for free space for DB.""" def _check_db_disk_expectation_warning(self, host): """Checks whether DB expects less free space than actually is presented on the controller node """ scheduler_log = 'nova-scheduler.log' if self.config.compute.deployment_os.lower() == 'centos': scheduler_log = 'scheduler.log' err_msg = "Cannot check {scheduler_log} at {host}".format( host=host, scheduler_log=scheduler_log) warning_msg = "Host has more disk space than database expected" cmd = ("cat /var/log/nova/{scheduler_log} " "| grep '{msg}' | tail -1").format( msg=warning_msg, scheduler_log=scheduler_log) out, err = self.verify(5, self._run_ssh_cmd, 1, err_msg, 'check nova-scheduler.log', host, cmd) self.verify_response_true(not err, err_msg, 1) return out def test_db_expectation_free_space(self): """Check disk space allocation for databases on controller nodes Target component: Nova Scenario: 1. Check disk space allocation for databases on controller nodes Duration: 20 s. Deployment tags: disabled Available since release: 2014.2-6.1 """ hosts = filter(self._check_db_disk_expectation_warning, self.controllers) self.verify_response_true(not hosts, ("Free disk space cannot be used " "by database on node(s): {hosts}" ).format(hosts=hosts), 1)
Python
0.000003
@@ -1274,13 +1274,25 @@ d = -(%22cat +%22fgrep '%7Bmsg%7D' -q /va @@ -1321,53 +1321,9 @@ log%7D - %22%0A %22%7C grep '%7Bmsg%7D' %7C tail -1%22) +%22 .for @@ -1323,31 +1323,24 @@ g%7D%22.format(%0A -
2284f9f944ef72c7e2f6c9a4e93e395b09196719
modify initial config
golive/management/commands/create_config.py
golive/management/commands/create_config.py
from django.core.management import BaseCommand from fabric.state import output import sys from golive.stacks.stack import StackFactory, Stack import yaml class Command(BaseCommand): help = 'Creates a basic exampe configuration file' output['stdout'] = False example = """CONFIG: PLATFORM: DEDICATED STACK: CLASSIC ENVIRONMENTS: DEFAULTS: INIT_USER: fatrix PROJECT_NAME: django_example PUBKEY: $HOME/user.pub TESTING: SERVERNAME: golive-sandbox1 ROLES: APP_HOST: - testbox1 DB_HOST: - testbox1 WEB_HOST: - testbox1""" def handle(self, *args, **options): example_file = open(Stack.CONFIG, 'w') example_file.write(Command.example) example_file.close() def end(self): self.stdout.write('Done\n')
Python
0.000002
@@ -382,14 +382,12 @@ ER: -fatrix +root %0A @@ -415,16 +415,15 @@ ango -_example +project %0A @@ -445,12 +445,19 @@ OME/ -user +.ssh/id_dsa .pub @@ -494,23 +494,18 @@ ME: -golive-sandbox1 +testserver %0A @@ -552,36 +552,38 @@ - test -box1 +server %0A DB_ @@ -610,20 +610,22 @@ - test -box1 +server %0A @@ -665,12 +665,14 @@ test -box1 +server %22%22%22%0A
bc85dffa594c292094d2aa1f5a456e0a0690ea79
Remove debug code
grumpy-tools-src/tests/test_grumpy_tools.py
grumpy-tools-src/tests/test_grumpy_tools.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """Tests for `grumpy_tools` package.""" import tempfile import unittest import pytest from click.testing import CliRunner from grumpy_tools import cli @pytest.fixture def response(): """Sample pytest fixture. See more at: http://doc.pytest.org/en/latest/fixture.html """ # import requests # return requests.get('https://github.com/audreyr/cookiecutter-pypackage') def test_content(response): """Sample pytest test function with the pytest fixture as an argument.""" # from bs4 import BeautifulSoup # assert 'GitHub' in BeautifulSoup(response.content).title.string @pytest.mark.xfail def test_command_line_interface(capfd): """Test the CLI.""" runner = CliRunner() out, err = capfd.readouterr() help_result = runner.invoke(cli.main, ['--help']) assert help_result.exit_code == 0 result = runner.invoke(cli.main) assert result.exit_code == 0 assert '>>> ' in out, (result.output, out, err) def test_run_input_inline(capfd): runner = CliRunner() result = runner.invoke(cli.main, ['run', '-c', "print('Hello World')",]) # import wdb; wdb.set_trace() out, err = capfd.readouterr() assert out == 'Hello World\n', (err, result.output) assert result.exit_code == 0 def test_run_input_stdin(capfd): runner = CliRunner() result = runner.invoke(cli.main, ['run'], input="print('Hello World')") out, err = capfd.readouterr() assert out == 'Hello World\n', (err, result.output) assert result.exit_code == 0 def test_run_input_file(capfd): runner = CliRunner() with tempfile.NamedTemporaryFile() as script_file: script_file.write("print('Hello World')") script_file.flush() result = runner.invoke(cli.main, ['run', script_file.name]) out, err = capfd.readouterr() assert out == 'Hello World\n', (err, result.output) assert result.exit_code == 0
Python
0.000299
@@ -1147,42 +1147,8 @@ ,%5D)%0A - # import wdb; wdb.set_trace()%0A
6c0047b49c65f3b3c06582ad83db6819ad60aa34
load corpus reference, too
dictionaria/lib/submission.py
dictionaria/lib/submission.py
# coding: utf8 from __future__ import unicode_literals import re from clldutils.path import Path, md5 from clldutils.jsonlib import load from clldutils.misc import nfilter from clld.db.meta import DBSession from clld.db.models import common from clld.lib import bibtex from clld.scripts.util import bibtex2source from dictionaria.lib import sfm from dictionaria.lib import cldf from dictionaria.lib.ingest import Examples from dictionaria import models import dictionaria REPOS = Path(dictionaria.__file__).parent.joinpath('..', '..', 'dictionaria-intern') class Submission(object): def __init__(self, path): self.dir = path self.id = path.name self.cdstar = load(REPOS.joinpath('cdstar.json')) print(self.dir) assert self.dir.exists() desc = self.dir.joinpath('md.html') if desc.exists(): with desc.open(encoding='utf8') as fp: self.description = fp.read() else: self.description = None md = self.dir.joinpath('md.json') self.md = load(md) if md.exists() else None self.props = self.md.get('properties', {}) if self.md else {} bib = self.dir.joinpath('sources.bib') self.bib = bibtex.Database.from_file(bib) if bib.exists() else None @property def dictionary(self): d = self.dir.joinpath('processed') if d.joinpath('cldf-md.json').exists(): impl = cldf.Dictionary elif d.joinpath('db.sfm').exists(): impl = sfm.Dictionary else: raise ValueError('unknown dictionary format') return impl(d) def add_file(self, type_, checksum, file_cls, obj, attrs=None): if checksum in self.cdstar: jsondata = {k: v for k, v in self.props.get(type_, {}).items()} jsondata.update(self.cdstar[checksum]) if attrs: jsondata.update(attrs) f = file_cls( id='%s-%s' % (obj.id, checksum), name=self.cdstar[checksum]['original'], object_pk=obj.pk, mime_type=self.cdstar[checksum]['mimetype'], jsondata=jsondata) DBSession.add(f) DBSession.flush() DBSession.refresh(f) return print('{0} file missing: {1}'.format(type_, checksum)) return def load_sources(self, dictionary, data): if self.bib: for rec in self.bib.records: src = bibtex2source(rec, models.DictionarySource) src.dictionary = dictionary src.id = '%s-%s' % (self.id, src.id) data.add(models.DictionarySource, rec.id, _obj=bibtex2source(rec)) def load_examples(self, dictionary, data, lang): abbr_p = re.compile('\$(?P<abbr>[a-z1-3][a-z]*(\.[a-z]+)?)') if hasattr(self.dictionary, 'cldf'): #ID,Language_ID,Primary_Text,Analyzed_Word,Gloss,Translated_Text,Meta_Language_ID,Comment,Sense_IDs,Analyzed,Media_IDs #XV000001,tzh,lek a lok',,,saliรณ bien,,,SN000001,, colmap = {} for k in ['id', 'primaryText', 'analyzedWord', 'gloss', 'translatedText']: try: colmap[k] = self.dictionary.cldf['ExampleTable', k].name except KeyError: pass for i, ex in enumerate(self.dictionary.cldf['ExampleTable']): obj = data.add( models.Example, ex[colmap['id']], id='%s-%s' % (self.id, ex[colmap['id']].replace('.', '_')), name=ex[colmap['primaryText']], number='{0}'.format(i + 1), source=None, language=lang, serialized='{0}'.format(ex), dictionary=dictionary, analyzed='\t'.join(nfilter(ex[colmap['analyzedWord']] or [])) if 'analyzedWord' in colmap else None, gloss='\t'.join([abbr_p.sub(lambda m: m.group('abbr').upper(), g) for g in ex[colmap['gloss']]]) \ if 'gloss' in colmap and ex[colmap['gloss']] \ else ((ex[colmap['gloss']] or None) if 'gloss' in colmap else None), description=ex[colmap['translatedText']], alt_translation1=ex.get('alt_translation1'), alt_translation_language1=self.props.get('metalanguages', {}).get('gxx'), alt_translation2=ex.get('alt_translation2'), alt_translation_language2=self.props.get('metalanguages', {}).get('gxy'), ) DBSession.flush() for md5 in sorted(set(ex.get('Media_IDs', []))): self.add_file(None, md5, common.Sentence_files, obj) elif self.dir.joinpath('processed', 'examples.sfm').exists(): for i, ex in enumerate( Examples.from_file(self.dir.joinpath('processed', 'examples.sfm'))): obj = data.add( models.Example, ex.id, id='%s-%s' % (self.id, ex.id.replace('.', '_')), name=ex.text, number='{0}'.format(i + 1), source=ex.corpus_ref, language=lang, serialized='{0}'.format(ex), dictionary=dictionary, analyzed=ex.morphemes, gloss=abbr_p.sub(lambda m: m.group('abbr').upper(), ex.gloss) if ex.gloss else ex.gloss, description=ex.translation, alt_translation1=ex.alt_translation, alt_translation_language1=self.props.get('metalanguages', {}).get('gxx'), alt_translation2=ex.alt_translation2, alt_translation_language2=self.props.get('metalanguages', {}).get('gxy')) DBSession.flush() if ex.soundfile: self.add_file('audio', ex.soundfile, common.Sentence_files, obj)
Python
0
@@ -3738,20 +3738,46 @@ source= -None +ex.get('Corpus_Reference', %5B%5D) ,%0A
dc9c5021c022108fd9ca2c87e9064b385abd26cf
Fix style
didyoumean/readme_examples.py
didyoumean/readme_examples.py
# -*- coding: utf-8 """Code to generate examples in README.md.""" from didyoumean import add_suggestions_to_exception import sys def get_exception(code): """Helper function to run code and get what it throws.""" try: exec(code) except: return sys.exc_info() assert False def main(): """Main.""" # Different examples : # Code examples are groupes by error type then by suggestion type # Numbers have been added in dict keys just to be able to iterate # over them and have the result in the wanted order. examples = { (1, NameError): { (1, "Fuzzy matches on existing names " "(local, builtin, keywords, modules, etc)"): [ "def my_func(foo, bar):\n\treturn foob\nmy_func(1, 2)", "def my_func(lst):\n\treturn leng(foo)\nmy_func([0])", "import math\nmaths.pi", "def my_func():\n\tpasss\nmy_func()", "def my_func():\n\tfoo = 1\n\tfoob +=1\nmy_func()" ], (2, "Checking if name is the attribute of a defined object"): [ "class Duck():\n\tdef __init__(self):\n\t\tquack()" "\n\tdef quack(self):\n\t\tpass\nd = Duck()", "import math\npi", ], (3, "Looking for missing imports"): [ "functools.wraps()", ], }, (2, AttributeError): { (1, "Fuzzy matches on existing attributes"): [ "lst = [1, 2, 3]\nlst.appendh(4)", "import math\nmath.pie", ], (2, "Detection of mis-used builtins"): [ "lst = [1, 2, 3]\nlst.max()", ], (3, "Trying to find method with similar meaning (hardcoded)"): [ "lst = [1, 2, 3]\nlst.add(4)", ], }, (3, ImportError): { (1, "Fuzzy matches on existing modules"): [ "from maths import pi", ], (2, "Fuzzy matches on elements of the module"): [ "from math import pie", ], (3, "Looking for import from wrong module"): [ "from itertools import pi", ], }, (4, TypeError): { (1, "Fuzzy matches on keyword arguments"): [ "def my_func(abcde):\n\tpass\nmy_func(abcdf=1)", ], }, (5, SyntaxError): { (1, "Fuzzy matches when importing from __future__"): [ "from __future__ import divisio", ], (2, "Various"): [ "return", ], }, } str_func = repr # could be str or repr for (_, exc_type), exc_examples in sorted(examples.items()): print("### %s\n" % exc_type.__name__) for (_, desc), codes in sorted(exc_examples.items()): print("##### %s\n" % desc) for code in codes: type_, value, traceback = get_exception(code) assert issubclass(type_, exc_type) before = str_func(value) add_suggestions_to_exception(type_, value, traceback) after = str_func(value) assert before != after print("""```python %s #>>> Before: %s #>>> After: %s ```""" % (code, before, after)) if __name__ == '__main__': main()
Python
0.000001
@@ -658,16 +658,20 @@ + %22(local,
f42fdde5404c3025236ad7dcade4b08529e7ce36
repair Noneuser_bug
app/delete.py
app/delete.py
from .models import User from . import db def deletenone(): noneuser=User.query.filter_by(username=None).all() for user in noneuser: db.session.delete(user) db.session.commit()
Python
0.000003
@@ -37,18 +37,16 @@ ort db%0A%0A -%0A%0A def dele
7012d3f63044ee0b61ea012809293c56830c10db
modify models defining
app/models.py
app/models.py
#!/usr/bin/env python3.5 from tools.column import Column from tools.field import String,Int,Float,Text,Boolean from tools.model import Model from tools.database import * from tools.log import * import time class User(Model): __table__='users' id=Column(Int(4,unsigned=True),primary_key=True,null=False,auto_increment=True) user_name=Column(String(50),unique_key=True,null=False) password=Column(String(100),null=False) email=Column(String(50),unique_key=True,null=False) user_image=Column(String(300)) last_login=Column(String(20)) create_at=Column(Float(),default=time.time()) gender=Column(Int(1,unsigned=True)) location=Column(String(50)) desc=Column(String(600)) class Article(Model): __table__='articles' id=Column(Int(4,unsigned=True),primary_key=True,null=False,auto_increment=True) uid=Column(Int(4,unsigned=True),null=False) cate_id=Column(Int(4,unsigned=True),null=False) content=Column(Text()) post_at=Column(Float(),default=time.time()) modify_at=Column(String(20)) auth_password=Column(String(100),default="") abstract=Column(String(400)) view_num=Column(Int(4,unsigned=True),default=0) class Category(Model): __table__='categorys' id=Column(Int(4,unsigned=True),primary_key=True,auto_increment=True) uid=Column(Int(4,unsigned=True)) cate_text=Column(String(100)) cate_image=Column(String(200)) creat_at=Column(Flot(),default=time.time()) article_num=Column(Int(4,unsigned=True),default=0) cate_desc=Column(String(400),default='') class Comment(Model): __table__='comments' id=Column(Int(4,unsigned=True),primary_key=True,auto_increment=True) uid=Column(Int(4,unsigned=True),null=False) article_id=Column(Int(4,unsigned=True)) comment_text=Column(String(1000)) class Image(Model): __table__='images' id=Column(Int(4,unsigned=True),primary_key=True,auto_increment=True) uid=Column(Int(4,unsigned=True)) type_id=Column(Int(2,unsigned=True)) belong_id=Column(Int(4,unsigned=True)) url=Column(String(300)) class Music(Model): __table__='musics' id=Column(Int(4,unsigned=True),primary_key=True,auto_increment=True) uid=Column(Int(4,unsigned=True)) type_id=Column(Int(4,unsigned=True)) belong_id=Column(Int(4,unsigned=True)) url=Column(String(300)) #class Need(Model): # __table__='needs' # id=Column(Int(4,unsigned=True),primary_key=True,null=False,auto_increment=True) # user_id=Column(Int(4,unsigned=True),null=False) # content=Column(Text(),null=False) # create_at=Column(Float(),default=time.time()) # is_solved=Column(Boolean(),default=False) # solved_user_id=Column(Int(4,unsigned=True),default=0) if __name__=='__main__': print(Need().__table__) print(Need().__columns__) print(User().__columns__) print(User().__table__)
Python
0
@@ -551,25 +551,25 @@ lumn(String( -2 +3 0))%0A%09create_ @@ -570,38 +570,41 @@ reate_at=Column( -Float( +String(30 ),default=time.t @@ -953,38 +953,41 @@ %09post_at=Column( -Float( +String(30 ),default=time.t @@ -1018,17 +1018,17 @@ (String( -2 +3 0))%0A%09aut @@ -1074,16 +1074,12 @@ %22)%0A%09 -abstract +desc =Col @@ -1363,16 +1363,17 @@ )%0A%09creat +e _at=Colu @@ -1379,13 +1379,17 @@ umn( -Flot( +String(30 ),de @@ -1660,16 +1660,67 @@ =False)%0A +%09comment_at=Column(String(30),default=time.time())%0A %09article @@ -2002,32 +2002,82 @@ unsigned=True))%0A +%09upload_at=Column(String(30),default=time.time())%0A %09url=Column(Stri @@ -2221,32 +2221,82 @@ unsigned=True))%0A +%09upload_at=Column(String(30),default=time.time())%0A %09type_id=Column( @@ -2765,16 +2765,17 @@ in__':%0A%09 +# print(Ne @@ -2792,16 +2792,17 @@ le__)%09%0A%09 +# print(Ne @@ -2812,32 +2812,33 @@ ).__columns__)%0A%09 +# print(User().__c @@ -2847,16 +2847,42 @@ umns__)%0A +%09#print(User().__table__)%0A %09print(U @@ -2899,9 +2899,37 @@ ble__)%0A%09 +print(User().__columns__)%0A%09%0A %0A
738210dfd6cb74b04b731e85f2e235024d64ff93
format improvement
digitalocean_plugin/security.py
digitalocean_plugin/security.py
# ####### # Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import random import requests from cloudify.exceptions import NonRecoverableError class DigitalOceanSecurity(object): pubkey_stub = "Cloudify Key {0}" api_endpoint = 'https://api.digitalocean.com/v2/' token_file_name = 'token.txt' key_count_limit = 1024 def __init__(self): self.digitalocean_security_token \ = self._load_digitalocean_account_token() self.rand = random def _build_url(self, end_of_url): if end_of_url.startswith("/"): end_of_url = end_of_url[1:] return "%s%s" % (self.api_endpoint, end_of_url.replace("//", "/")) def _common_headers(self): return { 'Content-Type': 'application/json', 'Authorization': "Bearer %s" % self.digitalocean_security_token } def delete_pubkey_from_account_by_fingerprint(self, fingerprint, **_): """ :param fingerprint: key fingerprint :param _: varargs :return: True upon success """ self._destroy_key(fingerprint) return True def _destroy_key(self, identifier): url = self._build_url("account/keys/%s" % identifier) h = self._common_headers() response = requests.delete(url, headers=h) code = response.status_code if code != 204: raise NonRecoverableError( "Error on server. Expected status code = '204'" ", but received '%s' instead." % str(code) ) return def delete_pubkey_from_account_by_keyid(self, keyid, **_): """ :param keyid: id assigned to key (when imported) by digital ocean :param _: varargs :return: True upon success """ self._destroy_key(keyid) return True def add_pubkey_to_digitalocean_account(self, pubkey_file, key_name, **_): """ Uploads a public key file to the DigitalOcean account. :param pubkey_file: full path to a public key file :param name: an optional name for your key - will be assigned randomly if not provided :param _: :return: The id and fingerprint of the uploaded key, returned as a tuple (id, fingerprint) """ if not os.path.isfile(pubkey_file): raise NonRecoverableError("Unknown public key file: '{0}'." .format(pubkey_file)) with open(pubkey_file, 'r') as f: pubkey = f.read() key_name = self._make_key_name(key_name) assert pubkey, "A non-empty public key file must be provided." payload = { "name": key_name, "public_key": pubkey } url = self._build_url('account/keys') h = self._common_headers() # try: # because it's a POST, it should be only 1 response # if we were to call GET, it would return all associated keys response = requests.post(url, headers=h, data=payload) code = response.status_code if code < 200 or code > 299: raise NonRecoverableError( "Error on server for %(URL)s. Status code = '%(CODE)d'." % {'URL': url, 'CODE': code} ) r = response.json()['ssh_key'] return r['id'], r['fingerprint'] def _make_key_name(self, proposed_name): """ :param proposed_name: a name to test for emptiness :return: proposed_name, stripped (if non-empty), else a randomly-generated name """ if proposed_name: return proposed_name.strip() r = self.rand.randint(0, self.key_count_limit + 1) return self.pubkey_stub.format(r) def cache_pubkey_copy_from_digitalocean_account(self, fingerprint, **_): raise NonRecoverableError("Not implemented yet.") def _load_digitalocean_account_token(self, **_): """ This will load a security token from a local file called token.txt. A token can be obtained from DigitalOcean by Registering a New Developer or Authorized Application. :return: the security token, as a string :raises: NonRecoverableError if token.txt file is not present """ def cwd(): return os.path.dirname(__file__) token_path = os.path.join(cwd(), self.token_file_name) if not os.path.isfile(token_path): raise NonRecoverableError('Missing security token file "%s".' % token_path) with open(token_path, 'r') as f: return f.read()
Python
0.000001
@@ -4332,24 +4332,26 @@ rmat(r)%0A%0A + # def cache_p @@ -4392,24 +4392,26 @@ nt(self,%0A + # @@ -4466,20 +4466,27 @@ , **_):%0A + # %0A + # rai @@ -4532,16 +4532,21 @@ yet.%22)%0A + # %0A def @@ -5135,16 +5135,33 @@ leError( +%0A 'Missing @@ -5191,46 +5191,8 @@ s%22.' -%0A %25 t @@ -5200,16 +5200,29 @@ ken_path +%0A )%0A
c12df9f8f0c73577c122fc65bd11314b7231179c
Add type
test_runner/environments.py
test_runner/environments.py
import logging import re import sys from glanceclient.v1.client import Client as glance_client from keystoneclient.v2_0.client import Client as keystone_client from neutronclient.v2_0.client import Client as neutron_client from novaclient.v1_1 import client as nova_client from .utils import rand_name LOG = logging.getLogger(__name__) CIRROS_URL='http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img' class Environment(object): def __init__(self, username, password, auth_url): self.keystone = keystone_client( username=username, password=password, tenant_name=username, auth_url=auth_url) self.endpoints = self.keystone.service_catalog.get_endpoints() self.token = self.keystone.auth_ref['token']['id'] self.glance = glance_client( endpoint=self.endpoints['image'][0]['internalURL'], token=self.token) self.neutron = neutron_client( username=username, password=password, tenant_name=username, auth_url=auth_url, endpoint_type='internalURL') self.nova = nova_client.Client( username=username, api_key=password, project_id=username, auth_url=auth_url, endpoint_type='internalURL') self.images = [] self.network = {} self.router = {} self.users = {} self.admin = {'username': username, 'password': password} def build(self): self.create_guests() self.get_images() self.get_network() self.get_router() def destroy(self): LOG.info('Destroying environment') if self.guests: map(self.keystone.users.delete, self.guests) if self.tenant: self.keystone.tenants.delete(self.tenant) if self.role: self.keystone.roles.delete(self.role) if self.images: self.glance.images.delete(self.images[0]) def create_guests(self, password='secrete'): LOG.info('Creating guest users') self.tenant = self.keystone.tenants.create(rand_name('guest')) try: roles = self.keystone.roles.list() self.role = self._find_resource(roles, 'Member') except: self.role = self.keystone.roles.create('Member') self.guests = [] for _ in range(2): user = self.keystone.users.create(name=rand_name('guest'), password=password, tenant_id=self.tenant.id) user.password = password user.tenant_name = self.tenant.name self.guests.append(user) def get_images(self): LOG.info('Fetching image metadata') try: filters = {'name': 'cirros'} image = next(self.glance.images.list(filters=filters)) self.images = [image, image] except StopIteration: image = self.glance.images.create( name='cirros', disk_format='qcow2', container_format='bare', location=CIRROS_URL, is_public='True') self.images = [image, image] @staticmethod def _find_resource(resources, name): if type(resources) is dict: return next(resource for resource in resources if name in resource['name']) else: return next(resource for resource in resources if name in resource.name) def get_network(self): LOG.info('Fetching networks') networks = self.neutron.list_networks()['networks'] self.network = self._find_resource(networks, 'private') def get_router(self): LOG.info('Fetching routers') routers = self.neutron.list_routers()['routers'] self.router = self._find_resource(routers, 'public-private')
Python
0.000003
@@ -3338,16 +3338,24 @@ is dict + or list :%0A
7404233b4e286335c9959539ef9ea15ae20c8f27
Correct table name
assessment.py
assessment.py
# -*- coding: utf-8 -*- """ Article assessment-related worklists for WikiProjects. Copyright (C) 2015 James Hare Licensed under MIT License: http://mitlicense.org """ import requests import pywikibot import json from project_index import WikiProjectTools class WikiProjectAssess: def __init__(self): self.bot = pywikibot.Site('en', 'wikipedia') self.wptools = WikiProjectTools() self.projects = [] self.predictorseed = {} self.unknownquality = {} self.unknownpriority = {} self.config = self.wptools.query('index', 'select json from config;', None) self.config = json.loads(self.config[0][0]) for project in self.config['projects']: if 'assessment_tools' in project \ and 'at_category' in project \ and 'at_unknown_quality' in project \ and 'at_unknown_priority' in project: projectname = project['name'][10:] # Normalizing title self.projects.append(projectname) self.predictorseed[projectname] = project['at_category'].replace(' ', '_') self.unknownquality[projectname] = project['at_unknown_quality'].replace(' ', '_') self.unknownpriority[projectname] = project['at_unknown_priority'].replace(' ', '_') def qualitypredictor(self, pagetitles): ''' Makes a query to ORES that predicts the quality of an article. Takes list *pagetitles* as input, returns a list of tuples (title, prediction) Input MUST be a list. If only one title, enter it as [title] ''' output = [] # Split into packages packages = [pagetitles[i:i+50] for i in range(0, len(pagetitles), 50)] for package in packages: if len(package) > 1: q = 'select page_title, page_latest from page where page_namespace = 0 and page_title in {0} order by page_title limit 100;'.format(tuple(package)) else: q = 'select page_title, page_latest from page where page_namespace = 0 and page_title = "{0}";'.format(package[0]) revision_ids = {str(row[1]):row[0].decode('utf-8') for row in self.wptools.query('wiki', q, None)} api_input = [rev_id for rev_id in revision_ids.keys()] api_url = "http://ores.wmflabs.org/scores/enwiki/wp10/?revids=" for rev_id in api_input: api_url += rev_id + "|" api_url = api_url[:-1] # Truncating extra vertical pipe query = requests.get(api_url) query = query.json() for rev_id, result in query.items(): pair = (revision_ids[rev_id], result['prediction']) output.append(pair) return output def qualitylist(self): for wikiproject, category in self.unknownquality.items(): save_to = "User:Reports bot/" + wikiproject + "/Assessment/Assess for quality" q = 'select page_title from categorylinks join page on cl_from = page_id where cl_to = "{0}";'.format(category.replace(' ', '_')) to_process = [row[0].decode('utf-8') for row in self.wptools.query('wiki', q, None)] to_process = self.qualitypredictor(to_process) contents = ("====Assess for quality====\n" "Determine the quality of each article, then go to the " "article's talk page and update the quality assessment " "in the WikiProject's banner. Automated predictions are" " provided to help you.\n\n" "{{#invoke:<includeonly>random|bulleted_list|limit=5" "</includeonly><noinclude>list|bulleted</noinclude>|") for pair in to_process: article = pair[0].replace("_", " ") prediction = pair[1] contents += "<b>[[" + article + "]]</b> ([[Talk:" + article + \ "|talk]])<br />Predicted class: " + \ prediction + "|" contents = contents[:-1] + "}}<includeonly>\n\n[[" + save_to + "|View more]]</includeonly>" page = pywikibot.Page(self.bot, save_to) page.text = contents page.save("Updating listing", minor=False, async=True) def scopepredictor(self): for wikiproject, category in self.predictorseed.items(): category_recs = [] article_recs = [] # This query produces a list of pages that belong to categories that # have been tagged by the WikiProject q = ('select page_namespace, page_title from page ' 'join categorylinks on categorylinks.cl_from = page.page_id ' 'where page_namespace in (0, 14) ' 'and cl_to in ( ' 'select page.page_title from page ' 'join categorylinks on categorylinks.cl_from = page.page_id ' 'where page_namespace = 15 ' 'and cl_to = "{0}");').format(category) for row in self.wptools.query('wiki', q, None): ns = row[0] page = row[1].decode('utf-8') if ns == 0: article_recs.append(page) elif ns == 14: category_recs.append(page) # Filter against these lists: q = 'select pi_page from project_index where pi_project = "{0}";' q = q.format(wikiproject.replace(' ', '_')) article_filter = [row[0].replace('Talk:', '') for row in self.wptools.query('index', q, None) if row[0].startswith('Talk')] q = ('select page_title from page ' 'join categorylinks on cl_from = page_id ' 'where page_namespace = 15 ' 'and cl_to = "{0}";').format(category) category_filter = [row[0].decode('utf-8') for row in self.wptools.query('wiki', q, None)] # Now do the filtering... category_recs = list(set(category_recs) - set(category_filter)) article_recs = list(set(article_recs) - set(article_filter)) # Unite them together... recommendations = [':Category:' + name for name in category_recs] \ + [name for name in article_recs] # And lop it off at 100! recommendations = recommendations[:100] # Class prediction predicted_class = self.qualitypredictor([page for page in recommendations if page.startswith(':Category:') == False]) + \ [(page, 'Category') for page in recommendations if page.startswith(':Category:') == True] predicted_class = {pair[0]:pair[1] for pair in predicted_class} save_to = "User:Reports bot/" + wikiproject + "/Assessment/Not tagged" contents = ("====Not tagged by the WikiProject====\n" "The WikiProject has not tagged these pages. If you " "believe they should be tagged, add the WikiProject " "banner to the talk pages of these articles. Automated" "class predictions are provided to help you.\n\n" "{{#invoke:<includeonly>random|bulleted_list|limit=5" "</includeonly><noinclude>list|bulleted</noinclude>|") for recommendation in recommendations: contents += "<b>[[" + recommendation.replace('_', ' ') \ + "]]</b> ([[Talk:" + recommendation \ + "|talk]])<br />Predicted class:" \ + predicted_class[recommendation] + "|" contents = contents.replace("Talk::Category:", "Category talk:") contents = contents[:-1] + "}}<includeonly>\n\n[[" + save_to + "|View more]]</includeonly>" page = pywikibot.Page(self.bot, save_to) page.text = contents page.save("Updating listing", minor=False, async=True) if __name__ == "__main__": run = WikiProjectAssess() run.qualitylist() run.scopepredictor()
Python
0.009686
@@ -5464,25 +5464,24 @@ from project -_ index where
c68287e17619ab93d14f8289773ec15e8ff92634
Enable volume backup tests
tempest/api/volume/test_volumes_backup.py
tempest/api/volume/test_volumes_backup.py
# Copyright 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools from testtools import matchers from tempest.api.volume import base from tempest.common import utils from tempest.common import waiters from tempest import config from tempest.lib.common.utils import data_utils from tempest.lib import decorators CONF = config.CONF class VolumesBackupsTest(base.BaseVolumeTest): @classmethod def skip_checks(cls): super(VolumesBackupsTest, cls).skip_checks() if not CONF.volume_feature_enabled.backup: raise cls.skipException("Cinder backup feature disabled") def restore_backup(self, backup_id): # Restore a backup restored_volume = self.backups_client.restore_backup( backup_id)['restore'] # Delete backup self.addCleanup(self.delete_volume, self.volumes_client, restored_volume['volume_id']) self.assertEqual(backup_id, restored_volume['backup_id']) waiters.wait_for_volume_resource_status(self.backups_client, backup_id, 'available') waiters.wait_for_volume_resource_status(self.volumes_client, restored_volume['volume_id'], 'available') return restored_volume @decorators.skip_because(bug="1483434") @testtools.skipIf(CONF.volume.storage_protocol == 'ceph', 'ceph does not support arbitrary container names') @decorators.idempotent_id('a66eb488-8ee1-47d4-8e9f-575a095728c6') def test_volume_backup_create_get_detailed_list_restore_delete(self): # Create a volume with metadata metadata = {"vol-meta1": "value1", "vol-meta2": "value2", "vol-meta3": "value3"} volume = self.create_volume(metadata=metadata) self.addCleanup(self.delete_volume, self.volumes_client, volume['id']) # Create a backup backup_name = data_utils.rand_name( self.__class__.__name__ + '-Backup') description = data_utils.rand_name("volume-backup-description") backup = self.create_backup(volume_id=volume['id'], name=backup_name, description=description, container='container') self.assertEqual(backup_name, backup['name']) waiters.wait_for_volume_resource_status(self.volumes_client, volume['id'], 'available') # Get a given backup backup = self.backups_client.show_backup(backup['id'])['backup'] self.assertEqual(backup_name, backup['name']) self.assertEqual(description, backup['description']) self.assertEqual('container', backup['container']) # Get all backups with detail backups = self.backups_client.list_backups( detail=True)['backups'] for backup_info in backups: self.assertIn('created_at', backup_info) self.assertIn('links', backup_info) self.assertIn((backup['name'], backup['id']), [(m['name'], m['id']) for m in backups]) restored_volume = self.restore_backup(backup['id']) restored_volume_metadata = self.volumes_client.show_volume( restored_volume['volume_id'])['volume']['metadata'] # Verify the backups has been restored successfully # with the metadata of the source volume. self.assertThat(restored_volume_metadata.items(), matchers.ContainsAll(metadata.items())) @decorators.idempotent_id('07af8f6d-80af-44c9-a5dc-c8427b1b62e6') @utils.services('compute') def test_backup_create_attached_volume(self): """Test backup create using force flag. Cinder allows to create a volume backup, whether the volume status is "available" or "in-use". """ # Create a server volume = self.create_volume() self.addCleanup(self.delete_volume, self.volumes_client, volume['id']) server = self.create_server() # Attach volume to instance self.attach_volume(server['id'], volume['id']) # Create backup using force flag backup_name = data_utils.rand_name( self.__class__.__name__ + '-Backup') backup = self.create_backup(volume_id=volume['id'], name=backup_name, force=True) waiters.wait_for_volume_resource_status(self.volumes_client, volume['id'], 'in-use') self.assertEqual(backup_name, backup['name']) @decorators.idempotent_id('2a8ba340-dff2-4511-9db7-646f07156b15') @utils.services('image') def test_bootable_volume_backup_and_restore(self): # Create volume from image img_uuid = CONF.compute.image_ref volume = self.create_volume(imageRef=img_uuid) volume_details = self.volumes_client.show_volume( volume['id'])['volume'] self.assertTrue(volume_details['bootable']) # Create a backup backup = self.create_backup(volume_id=volume['id']) waiters.wait_for_volume_resource_status(self.volumes_client, volume['id'], 'available') # Restore the backup restored_volume_id = self.restore_backup(backup['id'])['volume_id'] # Verify the restored backup volume is bootable restored_volume_info = self.volumes_client.show_volume( restored_volume_id)['volume'] self.assertTrue(restored_volume_info['bootable']) class VolumesBackupsV39Test(base.BaseVolumeTest): _api_version = 3 min_microversion = '3.9' max_microversion = 'latest' @classmethod def skip_checks(cls): super(VolumesBackupsV39Test, cls).skip_checks() if not CONF.volume_feature_enabled.backup: raise cls.skipException("Cinder backup feature disabled") @decorators.idempotent_id('9b374cbc-be5f-4d37-8848-7efb8a873dcc') def test_update_backup(self): # Create volume and backup volume = self.create_volume() backup = self.create_backup(volume_id=volume['id']) waiters.wait_for_volume_resource_status(self.volumes_client, volume['id'], 'available') # Update backup and assert response body for update_backup method update_kwargs = { 'name': data_utils.rand_name(self.__class__.__name__ + '-Backup'), 'description': data_utils.rand_name("volume-backup-description") } update_backup = self.backups_client.update_backup( backup['id'], **update_kwargs)['backup'] self.assertEqual(backup['id'], update_backup['id']) self.assertEqual(update_kwargs['name'], update_backup['name']) self.assertIn('links', update_backup) # Assert response body for show_backup method retrieved_backup = self.backups_client.show_backup( backup['id'])['backup'] for key in update_kwargs: self.assertEqual(update_kwargs[key], retrieved_backup[key])
Python
0.000041
@@ -1932,52 +1932,8 @@ me%0A%0A - @decorators.skip_because(bug=%221483434%22)%0A
06d71ede1c1feaa597b442f4ead63d2b2e31e715
fix `trigger` -> `__call__`
chainer/training/triggers/once_trigger.py
chainer/training/triggers/once_trigger.py
class OnceTrigger(object): """Trigger based on the starting point of the iteration. This trigger accepts only once at starting point of the iteration. There are two ways to specify the starting point: only starting point in whole iteration or called again when training resumed. Args: call_on_resume (bool): Whether the extension is called again or not when restored from a snapshot. It is set to ``False`` by default. """ def __init__(self, call_on_resume=False): self._flag_first = True self._flag_resumed = call_on_resume def trigger(self, trainer): flag = self._flag_first or self._flag_resumed self._flag_resumed = False self._flag_first = False return flag def serialize(self, serializer): self._flag_first = serializer('_flag_first', self._flag_first)
Python
0.000003
@@ -596,23 +596,24 @@ def -trigger +__call__ (self, t
ddec6067054cc4408ac174e3ea4ffeca2a962201
Remove unnecessary assert from view for Notice home.
regulations/views/notice_home.py
regulations/views/notice_home.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from operator import itemgetter import logging from django.http import Http404 from django.template.response import TemplateResponse from django.views.generic.base import View from regulations.generator.api_reader import ApiReader from regulations.views.preamble import ( notice_data, CommentState) logger = logging.getLogger(__name__) class NoticeHomeView(View): """ Basic view that provides a list of regulations and notices to the context. """ template_name = None # We should probably have a default notice template. def get(self, request, *args, **kwargs): notices = ApiReader().notices().get("results", []) context = {} notices_meta = [] for notice in notices: try: if notice.get("document_number"): _, meta, _ = notice_data(notice["document_number"]) notices_meta.append(meta) except Http404: pass notices_meta = sorted(notices_meta, key=itemgetter("publication_date"), reverse=True) context["notices"] = notices_meta # Django templates won't show contents of CommentState as an Enum, so: context["comment_state"] = {state.name: state.value for state in CommentState} assert self.template_name template = self.template_name return TemplateResponse(request=request, template=template, context=context)
Python
0
@@ -1401,42 +1401,8 @@ e%7D%0A%0A - assert self.template_name%0A
7881c90200069d4774f41e46212bb06771eafae7
Fix ignore list
botbot/checker.py
botbot/checker.py
"""Base class for checking file trees""" import stat import os import time from fnmatch import fnmatch from . import fileinfo as fi from . import report as rep from . import ignore as ig from . import sqlcache as sql class Checker: """ Holds a set of checks that can be run on a file to make sure that it's suitable for the shared directory. Runs checks recursively on a given path. """ # checks is a set of all the checking functions this checker knows of. All # checkers return a number signifying a specific problem with the # file specified in the path. def __init__(self, outpath, dbpath): self.checks = set() # All checks to perform self.checklist = list() # List of FileInfos to check at some point self.checked = list() self.status = { 'files': 0, 'checked': 0, 'time': 0, 'probcount': 0 } # Information about the previous check self.db = sql.FileDatabase(dbpath) self.reporter = rep.Reporter(self, out=outpath) self.path = '' def register(self, *funcs): """ Add a new checking function to the set, or a list/tuple of functions. """ for fn in funcs: self.checks.add(fn) def build_new_checklist(self, path, link=False, verbose=True): """ Build a list of files to check. If link is True, follow symlinks. """ to_add = [path] checklist = [] while len(to_add) > 0: try: apath = fi.FileInfo(to_add.pop(), link=link) if is_link(apath['path']): if not link: continue else: to_add.append(apath['path']) elif apath['isdir']: new = [os.path.join(apath['path'], f) for f in os.listdir(apath['path'])] to_add.extend(new) checklist.append(apath) # TODO: Fix these... except FileNotFoundError: pass except PermissionError: pass except OSError: pass self.checklist = checklist self.status['files'] = len(self.checklist) def update_checklist(self, cached, link=False, verbose=True): """ Take a cached list of files to check and make a list of directories and files that need to be rechecked. A file is rechecked if its last check time is earlier than its last change. """ prunelist = [] recheck = [] for finfo in cached: try: recent = fi.FileInfo(finfo['path']) if recent['lastmod'] > finfo['lastcheck']: if recent['isfile']: recent['problems'] = None recheck.append(recent) else: path = recent['path'] recheck.extend(fi.FileInfo(p) for p in os.listdir(path)) else: self.checked.append(finfo) except FileNotFoundError: # Cached path no longer exists prunelist.append(finfo) self.db.prune(prunelist) self.checklist = list(recheck) self.status['files'] = len(self.checklist) def check_all(self, path, shared=False, link=False, verbose=False, fmt='generic', ignore=None, cached=False, force=False): """Pretty much do everything.""" def remove_ignored(fi, ignore): """Remove files if they're in the ignore file""" fn = os.path.basename(fi['path']) for rule in ignore: if fnmatch(fn, rule): print('Ignoring {}...'.format(fn)) return False return True """Check the file list generated before.""" # Start timing starttime = time.time() # Munge that path boys! path = os.path.abspath(path) path = os.path.expanduser(path) self.path = path # Get a list of files checklist = self.db.get_cached_filelist(path) # If no cached tree exists, build one if we need one if not cached: if force or len(checklist) == 0: self.build_new_checklist(path) else: # Otherwise, see if we need to recheck any files self.status['probcount'] = len(checklist) self.update_checklist(checklist) # Remove ignored files checklist = [fi for fi in checklist if remove_ignored(fi, ignore)] if not cached: for finfo in self.checklist: if finfo['isfile']: finfo['lastcheck'] = int(time.time()) self.check_file(finfo, status=verbose) self.db.store_file_problems(self.checked) self.status['time'] = time.time() - starttime self.reporter.write_report(fmt, shared) def check_file(self, finfo, status=True): """ Check a file against all checkers, write status to stdout if status is True """ result = dict(finfo) for check in self.checks: prob = check(finfo) if prob is not None: if result['problems'] is None: result['problems'] = {prob} else: result['problems'].add(prob) self.status['probcount'] += 1 self.checked.append(result) self.status['checked'] += 1 if status: self.reporter.write_status(40) def is_link(path): """Check if the given path is a symbolic link""" return os.path.islink(path) or os.path.abspath(path) != os.path.realpath(path)
Python
0.000009
@@ -4659,32 +4659,37 @@ d files%0A +self. checklist = %5Bfi @@ -4698,16 +4698,21 @@ r fi in +self. checklis
79ae93beb2502143f78a182b1c28c5aa008b6f9a
Fix scoping
botbot/checker.py
botbot/checker.py
"""Base class for checking file trees""" import stat import os import time from fnmatch import fnmatch from . import fileinfo as fi from . import report as rep from . import sqlcache as sql from . import ignore as ig class CheckerBase: """ Defines a foundation for other checker objects. Allows for checking individual files against a list of check functions. """ def __init__(self, dbpath): self.checks = set() self.db = sql.FileDatabase(dbpath) # Information about # previous check, updated # after every check self.path = '' # Base path we're checking self.checked = [] # List of checked files def register(self, *funcs): """ Add a new checking function to the set, or a list/tuple of functions. """ for fn in funcs: self.checks.add(fn) def check_file(self, finfo): """ Check a file against all checkers, write status to stdout if status is True """ for check in self.checks: prob = check(finfo) if prob is not None: finfo['problems'].add(prob) finfo['lastcheck'] = int(time.time()) def process_checked_file(self, result): """ Helper function to record that a file was checked and to increment the counter. """ self.checked.append(result) class OneshotChecker(CheckerBase): """ Intended to run checks recursively on a given path, once. Useful for one-off check runs, not for daemon mode. """ # checks is a set of all the checking functions this checker knows of. All # checkers return a number signifying a specific problem with the # file specified in the path. def __init__(self, outpath, dbpath): super().__init__(dbpath) self.checks = set() # All checks to perform self.checklist = list() # List of FileInfos to check at some point self.status = { 'files': 0, 'checked': 0, 'time': 0, 'probcount': 0 } # Information about the previous check self.reporter = rep.OneshotReporter(self, out=outpath) # Formats and # writes # information def build_new_checklist(self, path, link=False, verbose=True): """ Build a list of files to check. If link is True, follow symlinks. """ self.path = path to_add = [path] # Acts like a stack, this does checklist = [] while len(to_add) > 0: try: apath = fi.FileInfo(to_add.pop(), link=link) # If this path is a directory, push all files and # subdirectories to the stack if apath['isdir']: new = [os.path.join(apath['path'], f) for f in os.listdir(apath['path'])] to_add.extend(new) else: # Otherwise just add that file to the checklist checklist.append(apath) except PermissionError as err: # We couldn't read the file or directory because # permissions were wrong global apath apath['problems'] = {'PROB_DIR_NOT_ACCESSIBLE'} self.checked.append(apath) except OSError as err: # Probably a dangling link global apath apath['problems'] = {'PROB_BROKEN_LINK'} self.checked.append(apath) # Update checker records self.checklist = checklist self.status['files'] = len(self.checklist) def update_checklist(self, cached, link=False, verbose=True): """ Take a cached list of files to check and make a list of directories and files that need to be rechecked. A file is rechecked if its last check time is earlier than its last change. """ prunelist = [] recheck = [] for finfo in cached: try: # If the ctime of the given file is later than the # last check, the file needs to be rechecked. recent = fi.FileInfo(finfo['path']) if recent['lastmod'] > finfo['lastcheck']: if recent['isfile']: recent['problems'] = set() # We'll regenerate # the list later. recheck.append(recent) else: path = recent['path'] # Add all the paths to the recheck list try: for f in os.listdir(path): self.checklist.append(fi.FileInfo(f)) except PermissionError: # Probably means we can't execute this # directory. (although we probably should # abolish capital punishment anyway) recent['problems'] = {'PROB_DIR_NOT_ACCESSIBLE'} self.checked.append(recent) else: # The file's in the same condition as its last # check. Don't recheck it pls self.checked.append(finfo) except FileNotFoundError: # Cached path no longer exists, prune it bb prunelist.append(finfo) self.db.prune(*prunelist) # Update that shizznik self.checklist = recheck self.status['files'] = len(self.checklist) self.status['probcount'] = len(self.checked) def populate_checklist(self, path, force=False): """Populate the list of files to check""" # Get a list of files from last time checklist = self.db.get_cached_filelist(path) # Recheck if explicitly stated or if we have no cached files if force or len(checklist) == 0: self.build_new_checklist(path) else: # Otherwise, see if we need to recheck any files self.update_checklist(checklist) def remove_ignored(fi, ignore): """Check if a file matches a pattern from the ignore file""" fn = os.path.basename(fi['path']) # Check each file against every ignore rule. Return True # for matching files. for rule in ignore: if fnmatch(fn, rule): print('Ignoring {}...'.format(fn)) return False return True # Remove ignored files and move to object ignore = ig.parse_ignore_rules(ig.find_ignore_file()) self.checklist = [fi for fi in self.checklist if remove_ignored(fi, ignore)] def check_all(self, path, shared=False, link=False, verbose=False, fmt='generic', ignore=None, cached=False, force=False): """Pretty much do everything.""" # Start timing starttime = time.time() # Munge that path boys! path = os.path.abspath(path) path = os.path.expanduser(path) self.path = path # If no cached tree exists, (or if we explicitly want to build # a new one) build one if we need one if not cached: # Build the checklist self.populate_checklist(force=force) # Check all the files against every check. for finfo in self.checklist: if finfo['isfile']: self.check_file(finfo) self.process_checked_file(finfo) if verbose: pass self.db.store_file_problems(*self.checked) # Record stats and write the report. We out! self.status['time'] = time.time() - starttime self.reporter.write_report(fmt, shared) def process_checked_file(self, finfo): super().process_checked_file(finfo) self.status['probcount'] += len(finfo['problems']) self.status['checked'] += 1 def is_link(path): """Check if the given path is a symbolic link""" return os.path.islink(path) or os.path.abspath(path) != os.path.realpath(path)
Python
0.000001
@@ -3238,33 +3238,26 @@ missionError - as err :%0A + @@ -3354,37 +3354,8 @@ ong%0A - global apath%0A @@ -3487,15 +3487,8 @@ rror - as err :%0A @@ -3532,37 +3532,8 @@ ink%0A - global apath%0A
91946410f14b21e510a104b105a6f5036cc8944f
build updated
python/common/core/globalVariables.py
python/common/core/globalVariables.py
''' Author: Jason Parks Created: Apr 22, 2012 Module: common.core.globalVariables Purpose: to import globalVariables ''' # Location of Toolset toolsLocation = 'C:/Users/jason/git/PipelineConstructionSet' # NOTE!: It is necessary to manually add the above location's # python directory, i.e- # # PYTHONPATH = 'C:/Users/jason/git/PipelineConstructionSet/python' # # to the PYTHONPATH environment variable on all user's # machines whom want to use Pipeline Construction set # Location of setup schema data file schemaLocation = 'C:/Users/jason/remotePCS' # "schemaLocation" should probably be in a shared folder on the network # so all users can get updates the T.A. makes to the file called # pcsSchema.xml in this location. You can find a sample version of # this file in ./PipelineConstructionSet/schemas/pcsSchemaSample.xml # Name your games here: teamA = 'GreatGameA' teamB = 'GreatGameB' teamC = 'GreatGameC' teamD = 'GreatGameD' # You need to change the name of the file # ./PipelineConstructionSet/schemas/GreatGameA.xml # and the xml header info in the file as well # If you are making tools for more than one team, # you'll need to make more GreatGame*.xml files # manually update to date/time build = '042212-20.27' # This will show up in the PythonEditor or ScriptEditor # when our DCC app first launches the toolMenu. print "common.core.globalVariables imported"
Python
0
@@ -1222,19 +1222,19 @@ = '0 -422 +506 12-2 -0.27 +1.01 '%0D#
a4043cad930ba94c2a3152a2900537a7ede4a86d
fix bug with sockjs handling of UnsynchronizedViewer
python/neuroglancer/sockjs_handler.py
python/neuroglancer/sockjs_handler.py
# @license # Copyright 2017 Google Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import json import re import six import sockjs.tornado from . import trackable_state, viewer_config_state from .json_utils import decode_json, encode_json SOCKET_PATH_REGEX_WITHOUT_GROUP = r'^/socket/(?:[^/]+)' SOCKET_PATH_REGEX = r'^/socket/(?P<viewer_token>[^/]+)' class ClientCredentialsHandler(object): def __init__(self, io_loop, private_state, config_state, credentials_manager): self.private_state = private_state self.config_state = config_state self._on_changed_callback = (lambda: io_loop.add_callback(self._on_changed)) private_state.add_changed_callback(self._on_changed_callback) self._previous_invalid_credentials = dict() self.credentials_manager = credentials_manager self._closed = False self.io_loop = io_loop def close(self): self.private_state.remove_changed_callback(self._on_changed_callback) self._closed = True def _on_changed(self): credentials = self.private_state.state.credentials try: for key, value in credentials.iteritems(): prev_value = self._previous_invalid_credentials.get(key, 'invalid') if prev_value != value: parsed_key = json.loads(key) provider = self.credentials_manager.get(parsed_key['key'], parsed_key.get('parameters')) if provider is not None: def handle_credentials(f, key=key): if self._closed: return try: credentials = f.result() def func(s): s.credentials[key] = credentials self.config_state.retry_txn(func) except: import traceback traceback.print_exc() provider.get(value).add_done_callback(handle_credentials) except: import traceback traceback.print_exc() class StateHandler(object): def __init__(self, state, io_loop, send_update, receive_updates=True): self.state = state self._send_update = send_update self._receive_updates = receive_updates self.io_loop = io_loop self._last_generation = None if send_update is not None: self._on_state_changed_callback = ( lambda: self.io_loop.add_callback(self._on_state_changed)) self.state.add_changed_callback(self._on_state_changed_callback) def _on_state_changed(self): """Invoked when the viewer state changes.""" raw_state, generation = self.state.raw_state_and_generation if generation != self._last_generation: self._last_generation = generation self._send_update(raw_state, generation) def request_send_state(self, generation): if self._send_update is not None: self._last_generation = generation self._on_state_changed() def receive_update(self, raw_state, generation): if self._receive_updates: self._last_generation = generation self.state.set_state(raw_state, generation) def close(self): if self._send_update is not None: self.state.remove_changed_callback(self._on_state_changed_callback) del self._on_state_changed_callback class SockJSHandler(sockjs.tornado.SockJSConnection): @property def io_loop(self): return self.session.server.io_loop def on_open(self, info): server = self.session.server.neuroglancer_server m = re.match(SOCKET_PATH_REGEX, info.path) if m is None: self.close() return viewer_token = self.viewer_token = m.group('viewer_token') viewer = self.viewer = server.viewers.get(viewer_token) if viewer is None: self.close() return private_state = self.private_state = trackable_state.TrackableState( viewer_config_state.PrivateState) managed_states = [ dict(key='s', state=viewer.shared_state, send_updates=True, receive_updates=True), dict(key='c', state=viewer.config_state, send_updates=True, receive_updates=False), dict(key='p', state=private_state, send_updates=False, receive_updates=True), ] self._state_handlers = dict() from .default_credentials_manager import default_credentials_manager self._credentials_handler = ClientCredentialsHandler(io_loop=self.io_loop, private_state=private_state, config_state=viewer.config_state, credentials_manager=default_credentials_manager) def make_state_handler(key, state, send_updates, receive_updates): def send_update(raw_state, generation): if not self.is_open: return message = {'t': 'setState', 'k': key, 's': raw_state, 'g': generation} self.send(encode_json(message)) handler = StateHandler( state=state, io_loop=self.io_loop, send_update=send_update if send_updates else None, receive_updates=receive_updates) self._state_handlers[key] = handler for x in managed_states: make_state_handler(**x) self.is_open = True def on_message(self, message_text): if self.viewer is None: return try: message = decode_json(message_text) if isinstance(message, dict): t = message['t'] if t == 'getState': handler = self._state_handlers[message['k']] handler.request_send_state(six.text_type(message['g'])) return if t == 'setState': handler = self._state_handlers[message['k']] handler.receive_update(message['s'], six.text_type(message['g'])) return if t == 'action': for action in message['actions']: self.io_loop.add_callback(self.viewer.actions.invoke, action['action'], action['state']) self.io_loop.add_callback(self.send, json.dumps({'t': 'ackAction', 'id': message['id']})) except: # import pdb # pdb.post_mortem() import traceback traceback.print_exc() # Ignore malformed JSON def on_close(self): viewer = self.viewer self.is_open = False if viewer is not None: for state_handler in six.itervalues(self._state_handlers): state_handler.close() self._credentials_handler.close() del self._state_handlers
Python
0.000001
@@ -4426,32 +4426,34 @@ None:%0A + self.close()%0A @@ -4441,32 +4441,34 @@ self.close()%0A + return @@ -4828,103 +4828,8 @@ = %5B%0A - dict(key='s', state=viewer.shared_state, send_updates=True, receive_updates=True),%0A @@ -5019,16 +5019,194 @@ %5D +%0A if hasattr(viewer, 'shared_state'):%0A managed_states.append(%0A dict(key='s', state=viewer.shared_state, send_updates=True, receive_updates=True)) %0A%0A
451e020a80b0c4f21bc613a9f281ac3660069278
Add complete URL to comments notifications.
astrobin/signals.py
astrobin/signals.py
# Django from django.contrib.auth.models import User from django.core.urlresolvers import reverse as reverse_url from django.db.models.signals import m2m_changed from django.db.models.signals import post_save # Third party apps from actstream import action as act from rest_framework.authtoken.models import Token # Other AstroBin apps from nested_comments.models import NestedComment from rawdata.models import ( PrivateSharedFolder, PublicDataPool, RawImage, TemporaryArchive, ) # This app from .notifications import push_notification from .models import Image, Gear, UserProfile from .gear import get_correct_gear def nested_comment_post_save(sender, instance, created, **kwargs): if created: model_class = instance.content_type.model_class() obj = instance.content_type.get_object_for_this_type(id = instance.object_id) url = instance.get_absolute_url() if model_class == Image: if instance.author != obj.user: push_notification( [obj.user], 'new_comment', { 'url': url, 'user': instance.author, } ) if instance.parent and instance.parent.author != instance.author: push_notification( [instance.parent.author], 'new_comment_reply', { 'url': url, 'user': instance.author, } ) elif model_class == Gear: if not instance.parent: gear, gear_type = get_correct_gear(obj.id) user_attr_lookup = { 'Telescope': 'telescopes', 'Camera': 'cameras', 'Mount': 'mounts', 'FocalReducer': 'focal_reducers', 'Software': 'software', 'Filter': 'filters', 'Accessory': 'accessories', } recipients = [x.user for x in UserProfile.objects.filter( **{user_attr_lookup[gear_type]: gear})] notification = 'new_gear_discussion' else: notification = 'new_comment_reply' recipients = [instance.parent.author] push_notification( recipients, notification, { 'url': url, 'user': instance.author, } ) def rawdata_publicdatapool_post_save(sender, instance, created, **kwargs): verb = "created a new public data pool" if created: act.send(instance.creator, verb = verb, target = instance) def create_auth_token(sender, instance, created, **kwargs): if created: Token.objects.get_or_create(user = instance) def rawdata_publicdatapool_data_added(sender, instance, action, reverse, model, pk_set, **kwargs): if action == 'post_add' and len(pk_set) > 0: contributors = [i.user for i in instance.images.all()] users = [instance.creator] + contributors submitter = RawImage.objects.get(pk = list(pk_set)[0]).user users[:] = [x for x in users if x != submitter] push_notification( users, 'rawdata_posted_to_pool', { 'user_name': submitter.username, 'user_url': reverse_url('user_page', kwargs = {'username': submitter.username}), 'pool_name': instance.name, 'pool_url': reverse_url('rawdata.publicdatapool_detail', kwargs = {'pk': instance.pk}), }, ) verb = "added new data to public data pool" act.send(instance.creator, verb = verb, target = instance) def rawdata_publicdatapool_image_added(sender, instance, action, reverse, model, pk_set, **kwargs): if action == 'post_add' and len(pk_set) > 0: contributors = [i.user for i in instance.images.all()] users = [instance.creator] + contributors submitter = Image.objects.get(pk = list(pk_set)[0]).user users[:] = [x for x in users if x != submitter] push_notification( users, 'rawdata_posted_image_to_public_pool', { 'user_name': submitter.username, 'user_url': reverse_url('user_page', kwargs = {'username': submitter.username}), 'pool_name': instance.name, 'pool_url': reverse_url('rawdata.publicdatapool_detail', kwargs = {'pk': instance.pk}), }, ) verb = "added a new processed image to public data pool" act.send(instance.creator, verb = verb, target = instance) def rawdata_privatesharedfolder_data_added(sender, instance, action, reverse, model, pk_set, **kwargs): if action == 'post_add' and len(pk_set) > 0: invitees = instance.users.all() users = [instance.creator] + list(invitees) submitter = RawImage.objects.get(pk = list(pk_set)[0]).user users[:] = [x for x in users if x != submitter] push_notification( users, 'rawdata_posted_to_private_folder', { 'user_name': submitter.username, 'user_url': reverse_url('user_page', kwargs = {'username': submitter.username}), 'folder_name': instance.name, 'folder_url': reverse_url('rawdata.privatesharedfolder_detail', kwargs = {'pk': instance.pk}), }, ) def rawdata_privatesharedfolder_image_added(sender, instance, action, reverse, model, pk_set, **kwargs): if action == 'post_add' and len(pk_set) > 0: invitees = instance.users.all() users = [instance.creator] + list(invitees) submitter = Image.objects.get(pk = list(pk_set)[0]).user users[:] = [x for x in users if x != submitter] push_notification( users, 'rawdata_posted_image_to_private_folder', { 'user_name': submitter.username, 'user_url': reverse_url('user_page', kwargs = {'username': submitter.username}), 'folder_name': instance.name, 'folder_url': reverse_url('rawdata.privatesharedfolder_detail', kwargs = {'pk': instance.pk}), }, ) def rawdata_privatesharedfolder_user_added(sender, instance, action, reverse, model, pk_set, **kwargs): if action == 'post_add' and len(pk_set) > 0: user = User.objects.get(pk = list(pk_set)[0]) push_notification( [user], 'rawdata_invited_to_private_folder', { 'folder_name': instance.name, 'folder_url': reverse_url('rawdata.privatesharedfolder_detail', kwargs = {'pk': instance.pk}), }, ) post_save.connect(nested_comment_post_save, sender = NestedComment) post_save.connect(rawdata_publicdatapool_post_save, sender = PublicDataPool) post_save.connect(create_auth_token, sender = User) m2m_changed.connect(rawdata_publicdatapool_data_added, sender = PublicDataPool.images.through) m2m_changed.connect(rawdata_publicdatapool_image_added, sender = PublicDataPool.processed_images.through) m2m_changed.connect(rawdata_privatesharedfolder_data_added, sender = PrivateSharedFolder.images.through) m2m_changed.connect(rawdata_privatesharedfolder_image_added, sender = PrivateSharedFolder.processed_images.through) m2m_changed.connect(rawdata_privatesharedfolder_user_added, sender = PrivateSharedFolder.users.through)
Python
0
@@ -871,16 +871,41 @@ url = + %22http://astrobin.com/%22 + instanc
08960bea9735921ce15160067533d19e850217bd
Save and load the autohost prio list
autohost_manager.py
autohost_manager.py
""" TODO Mon?: Autohost manager for AliCatFiberarts (and others). * Have a list of high priority streams, in order (or with priorities) * Listen for, or poll for, streams going live * If (a) a high priority stream has just gone live, and (b) you are currently hosting, and (c) the hosted stream has lower priority * Then send "/unhost" to the channel. * Have a very very simple GUI (tkinter?) * "Optional: Rename this to autohost_manager.pyw to hide the black box" """ # Components needed: # 1) Hosting control via IRC - mostly done # 2) Going-live detection # 2a) Poll at a set interval eg 15 mins - need # 2b) Receive webhook notifications from Twitch - nice to have # 3) Authentication, mainly for IRC - done # 3b) Optionally allow user to override channel name (in case you're an editor) - not done # 4) Configuration of channel priorities, since we can't query Twitch - done # 5) JSON config storage - done # Goal: Make this a single-file download with no deps other than Python 3.7+. import json from pprint import pprint import socket import threading import webbrowser import tkinter as tk import urllib.request try: with open("autohost_manager.json") as f: config = json.load(f) if not isinstance(config, dict): config = {} except (FileNotFoundError, json.decoder.JSONDecodeError): config = {} def save_config(): with open("autohost_manager.json", "w") as f: json.dump(config, f) def checkauth(oauth): print("Checking auth...") with urllib.request.urlopen(urllib.request.Request( "https://api.twitch.tv/kraken/user", headers={"Authorization": "OAuth " + oauth}, )) as f: data = json.load(f) pprint(data) config.update(oauth=oauth, login=data["name"], display=data["display_name"], channel=data["name"]) save_config() def unhost(): print("Unhosting...") sock = socket.create_connection(("irc.chat.twitch.tv", 6667)) sock.send("""PASS oauth:{oauth} NICK {login} CAP REQ :twitch.tv/commands JOIN #{channel} MARKENDOFTEXT1 """.format(**config).encode("UTF-8")) endmarker = "MARKENDOFTEXT1" for line in sock.makefile(encoding="UTF-8"): if line.startswith(":tmi.twitch.tv HOSTTARGET #"): # VERY VERY rudimentary IRC parsing hosting = line.split(" ")[3] assert hosting and hosting[0] == ":" hosting = hosting[1:] if hosting == "-": print("Not hosting") else: print("Currently hosting:", hosting) sock.send("PRIVMSG #{channel} :/unhost\nMARKENDOFTEXT2\n".format(**config).encode("UTF-8")) endmarker = "MARKENDOFTEXT2" if endmarker in line: sock.send(b"quit\n") print("Closed") class Application(tk.Frame): def __init__(self, master=None): super().__init__(master) self.pack() # TODO: Fix layout later and make things prettier self.login_frame = tk.LabelFrame(self, text="Authenticate with Twitch") self.login_frame.pack(side="top") self.login_lbl = tk.Label(self.login_frame, text="OAuth token:") self.login_lbl.pack(side="left") self.login_ef = tk.Entry(self.login_frame) self.login_ef.insert(0, config.get("oauth", "")) self.login_ef.pack(side="left") self.login_go_browser = tk.Button(self.login_frame, text="Get a token", command=self.cmd_login_go_browser) self.login_go_browser.pack(side="left") self.login_check_auth = tk.Button(self.login_frame, text="Verify token", command=self.cmd_login_check_auth) self.login_check_auth.pack(side="left") # To prepopulate this, go to https://www.twitch.tv/rosuav/dashboard/settings/autohost # and enter this into the console: # document.querySelector(".autohost-list-edit").innerText # Sadly, the API call /kraken/autohost/list is not documented anywhere and does # not appear to be easily callable :( self.hostlist_frame = tk.LabelFrame(self, text="Autohost list in priority order") self.hostlist_frame.pack(side="top") self.hostlist = tk.Text(self.hostlist_frame, width=30, height=20) self.hostlist.pack() # self.hostlist.get(1.0, tk.END).split("\n") self.unhost = tk.Button(self, text="Unhost now", command=self.cmd_unhost) self.unhost.pack(side="top") def cmd_unhost(self): threading.Thread(target=unhost).start() def cmd_login_go_browser(self): webbrowser.open("https://id.twitch.tv/oauth2/authorize?response_type=token&client_id=q6batx0epp608isickayubi39itsckt&redirect_uri=https://twitchapps.com/tmi/&scope=chat:read+chat:edit+channel_editor+user_read") def cmd_login_check_auth(self): oauth = self.login_ef.get() if oauth.startswith("oauth:"): oauth = oauth[6:] threading.Thread(target=checkauth, args=(oauth,)).start() win = tk.Tk() win.title("Autohost manager") app = Application(master=win) app.mainloop()
Python
0
@@ -3858,27 +3858,75 @@ ostlist. -pack( +insert(tk.END, %22%5Cn%22.join(config.get(%22hosttargets%22, %22%22)) )%0A%09%09 -# self.hos @@ -3935,38 +3935,121 @@ ist. -get(1.0, tk.END).split(%22%5Cn +pack()%0A%0A%09%09self.save = tk.Button(self, text=%22Save host list%22, command=self.cmd_save)%0A%09%09self.save.pack(side=%22top %22)%0A -%0A %09%09se @@ -4148,24 +4148,158 @@ ide=%22top%22)%0A%0A +%09def cmd_save(self):%0A%09%09config%5B%22hosttargets%22%5D = %5Bname for name in self.hostlist.get(1.0, tk.END).split(%22%5Cn%22) if name%5D%0A%09%09save_config()%0A%0A %09def cmd_unh
b0e91b820913c7b46d04f946267903d9785fc2ca
Fix test
experiments/tests/test_counter.py
experiments/tests/test_counter.py
from __future__ import absolute_import from unittest import TestCase from experiments import counters from mock import patch TEST_KEY = 'CounterTestCase' class CounterTestCase(TestCase): def setUp(self): self.counters = counters.Counters() self.counters.reset(TEST_KEY) self.assertEqual(self.counters.get(TEST_KEY), 0) def tearDown(self): self.counters.reset(TEST_KEY) def test_add_item(self): self.counters.increment(TEST_KEY, 'fred') self.assertEqual(self.counters.get(TEST_KEY), 1) def test_add_multiple_items(self): self.counters.increment(TEST_KEY, 'fred') self.counters.increment(TEST_KEY, 'barney') self.counters.increment(TEST_KEY, 'george') self.counters.increment(TEST_KEY, 'george') self.assertEqual(self.counters.get(TEST_KEY), 3) def test_add_duplicate_item(self): self.counters.increment(TEST_KEY, 'fred') self.counters.increment(TEST_KEY, 'fred') self.counters.increment(TEST_KEY, 'fred') self.assertEqual(self.counters.get(TEST_KEY), 1) def test_get_frequencies(self): self.counters.increment(TEST_KEY, 'fred') self.counters.increment(TEST_KEY, 'barney') self.counters.increment(TEST_KEY, 'george') self.counters.increment(TEST_KEY, 'roger') self.counters.increment(TEST_KEY, 'roger') self.counters.increment(TEST_KEY, 'roger') self.counters.increment(TEST_KEY, 'roger') self.assertEqual(self.counters.get_frequencies(TEST_KEY), {1: 3, 4: 1}) def test_delete_key(self): self.counters.increment(TEST_KEY, 'fred') self.counters.reset(TEST_KEY) self.assertEqual(self.counters.get(TEST_KEY), 0) def test_clear_value(self): self.counters.increment(TEST_KEY, 'fred') self.counters.increment(TEST_KEY, 'fred') self.counters.increment(TEST_KEY, 'fred') self.counters.increment(TEST_KEY, 'barney') self.counters.increment(TEST_KEY, 'barney') self.counters.clear(TEST_KEY, 'fred') self.assertEqual(self.counters.get(TEST_KEY), 1) self.assertEqual(self.counters.get_frequencies(TEST_KEY), {2: 1}) @patch('experiments.counters.Counter._redis') def test_should_return_tuple_if_failing(self, patched__redis): patched__redis.side_effect = Exception self.assertEqual(self.counters.get_frequencies(TEST_KEY), dict())
Python
0.000004
@@ -2253,16 +2253,17 @@ .Counter +s ._redis'
92279518979c3fc4c62609624dd245ac4f4ad6c4
Rename logger to log
figgypy/config.py
figgypy/config.py
import logging import os import seria import yaml logger = logging.getLogger('figgypy') if len(logger.handlers) == 0: logger.addHandler(logging.NullHandler()) gpg_loaded = False try: import gnupg gpg_loaded = True except ImportError: logging.info('could not load gnupg, will be unable to unpack secrets') pass class Config(object): """Configuration object Object can be created with a filename only, relative path, or absolute path. If only name or relative path is provided, look in this order: 1. current directory 2. `~/.config/<file_name>` 3. `/etc/<file_name>` It is a good idea to include you __package__ in the file name. For example, `cfg = Config(os.path.join(__package__, 'config.yaml'))`. This way it will look for your_package/config.yaml, ~/.config/your_package/config.yaml, and /etc/your_package/config.yaml. """ _dirs = [ os.curdir, os.path.join(os.path.expanduser("~"), '.config'), "/etc/" ] def __init__(self, f): self._f = self._get_file(f) self._cfg = self._get_cfg(self._f) def _get_cfg(self, f): """Get configuration from config file""" try: with open(f, 'r') as _fo: try: _seria_in = seria.load(_fo) _y = _seria_in.dump('yaml') except Exception as e: raise except IOError: raise FiggypyError("could not open configuration file") _cfg = yaml.load(_y) self._post_load_process(_cfg) for k, v in _cfg.items(): setattr(self, k, v) def _decrypt_and_update(self, obj): """Decrypt and update configuration. Do this only from _post_load_process so that we can verify gpg is ready. If we did them in the same function we would end up calling the gpg checks several times, potentially, since we are calling this recursively. """ if isinstance(obj, list): res_v = [] for item in obj: res_v.append(self._decrypt_and_update(item)) return res_v elif isinstance(obj, dict): for k, v in obj.items(): obj[k] = self._decrypt_and_update(v) else: try: if 'BEGIN PGP' in obj: try: decrypted = self._gpg.decrypt(obj) if decrypted.ok: obj = decrypted.data.decode('utf-8') else: logger.error("gpg error unpacking secrets %s" % decrypted.stderr) except Exception as e: logger.error("error unpacking secrets %s" % e) except TypeError as e: logger.info('Pass on decryption. Only decrypt strings') return obj def _post_load_process(self, cfg): if gpg_loaded: gpgbinary='gpg' gnupghome=None try: if 'FIGGY_GPG_BINARY' in os.environ: gpgbinary = os.environ['FIGGY_GPG_BINARY'] if 'FIGGY_GPG_HOME' in os.environ: gnupghome = os.environ['FIGGY_GPG_HOME'] self._gpg = gnupg.GPG(gpgbinary=gpgbinary, gnupghome=gnupghome) return self._decrypt_and_update(cfg) except OSError as e: if len(e.args) == 2: if (e.args[1] == 'The system cannot find the file specified' or 'No such file or directory' in e.args[1]): # frobnicate if not 'FIGGY_GPG_BINARY' in os.environ: logger.error( "cannot find gpg executable, path=%s, try setting GPG_BINARY env variable" % gpgbinary) else: logger.error("cannot find gpg executable, path=%s" % gpgbinary) else: logger.error("cannot setup gpg, %s" % e) return cfg def _get_file(self, f): """Get a config file if possible""" if os.path.isabs(f): return f else: for d in Config._dirs: _f = os.path.join(d, f) if os.path.isfile(_f): return _f raise FiggypyError("could not find configuration file {} in dirs {}" .format(f, Config._dirs))
Python
0.999296
@@ -48,122 +48,8 @@ ml%0A%0A -logger = logging.getLogger('figgypy')%0Aif len(logger.handlers) == 0:%0A logger.addHandler(logging.NullHandler())%0A%0A gpg_ @@ -213,16 +213,120 @@ pass%0A%0A%0A +log = logging.getLogger('figgypy')%0Aif len(log.handlers) == 0:%0A log.addHandler(logging.NullHandler())%0A %0A%0Aclass
b745b6f26bb55e36984e6600293073a39367a523
version 2 Added icon the the Files menu
plugins/Python_editor.py
plugins/Python_editor.py
# Created by: Storm Shadow http://www.techbliss.org # WARNING! All changes made in this file will be lost! import re import idaapi import idc from idc import * from idaapi import * import idautils class ripeye(idaapi.plugin_t): flags = idaapi.PLUGIN_FIX comment = "This is a comment" help = "Python Editor" wanted_name = "Python Editor" wanted_hotkey = "" def init(self): idaapi.msg("Python Editor Is Found GoTo File Menu \n") return idaapi.PLUGIN_OK def run(self, arg): idaapi.msg("run() called with %d!\n" % arg) def term(self): idaapi.msg("") def AddMenuElements(self): idaapi.add_menu_item("File/", "Code editor", "Alt-E", 0, self.popeye, ()) def run(self, arg = 0): idaapi.msg("Python Editor Loaded Shortcut Alt+E to Load") self.AddMenuElements() def popeye(self): g = globals() idahome = idaapi.idadir("plugins\\Code editor") IDAPython_ExecScript(idahome + "\\pyeditor.py", g) def PLUGIN_ENTRY(): return ripeye()
Python
0
@@ -192,17 +192,122 @@ dautils%0A +import sys%0Asys.path.insert(0 , idaapi.idadir(%22plugins%5C%5CCode editor%5C%5Cicons%22))%0Aimport ico%0Afrom ico import * %0A - class ri @@ -597,16 +597,17 @@ GIN_OK%0A%0A +%0A def @@ -715,24 +715,26 @@ pi.msg(%22%22)%0A%0A +%0A%0A def AddM @@ -834,16 +834,115 @@ ye, ())%0A + idaapi.set_menu_item_icon(%22File/Code editor%22, idaapi.load_custom_icon(%22:/ico/engine.png%22))%0A %0A%0A%0A%0A @@ -1031,17 +1031,16 @@ Load%22)%0A -%0A @@ -1163,16 +1163,16 @@ ditor%22)%0A - @@ -1224,16 +1224,17 @@ y%22, g)%0A%0A +%0A %0A%0Adef PL
c573263511bcbf0ffe37f538142aedd9064f8ae0
Remove copying devdata.env as it's only used for the Google API key we've removed
bin/devdata.py
bin/devdata.py
"""Download .devdata.env from github.com:hypothesis/devdata.git.""" import os from pathlib import Path from shutil import copyfile from subprocess import check_call from tempfile import TemporaryDirectory def _get_devdata(): # The directory that we'll clone the devdata git repo into. with TemporaryDirectory() as tmp_dir_name: git_dir = os.path.join(tmp_dir_name, "devdata") check_call(["git", "clone", "git@github.com:hypothesis/devdata.git", git_dir]) # Copy devdata env file into place. for source, target in ( ("via/devdata.env", ".devdata.env"), ( "via/devdata/google_drive_credentials.json", ".devdata/google_drive_credentials.json", ), ( "via/devdata/google_drive_resource_keys.json", ".devdata/google_drive_resource_keys.json", ), ): copyfile( os.path.join(git_dir, source), os.path.join(Path(__file__).parent.parent, target), ) if __name__ == "__main__": _get_devdata()
Python
0
@@ -13,20 +13,16 @@ .devdata -.env from gi @@ -553,57 +553,8 @@ n (%0A - (%22via/devdata.env%22, %22.devdata.env%22),%0A
3ed05d1a5d08d60bcc79db49d4f82e97be09c9c6
Fix conf_name handling
fabdeploy/fabd.py
fabdeploy/fabd.py
import os import shutil import logging from fabric.api import env, run, sudo, puts, abort from . import users, ssh from .containers import conf as conf_dec from .task import Task __all__ = [ 'mkdirs', 'remove_src', 'debug', 'conf', 'default_conf', 'create_user', 'create_configs', ] logger = logging.getLogger('fabdeploy.fabd') class Mkdirs(Task): """ Create all known remote dirs. We treat config variables ending with ``_path`` postfix as dir. """ def do(self): home_dirs, sudo_dirs = [], [] for k, v in self.conf.items(): if k.endswith('_path'): if v.startswith(self.conf.home_path): home_dirs.append(v) else: sudo_dirs.append(v) run('mkdir --parents %s' % ' '.join(home_dirs)) sudo('mkdir --parents %s' % ' '.join(sudo_dirs)) mkdirs = Mkdirs() class RemoveSrc(Task): """ Remove ``src_path`` dir. This is usefull when you want to perform clean deploy. See also ``virtualenv.remove``. """ def do(self): sudo('rm --recursive --force %(src_path)s' % self.conf) remove_src = RemoveSrc() class Debug(Task): """Print config variable.""" def do(self): if 'var' in self.conf: puts(self.conf[self.conf.var]) else: out = '\n' for k, v in self.conf.items(): out += '%s = %s\n' % (k, v) puts(out) def run(self, var=None, **kwargs): if var is not None: kwargs.setdefault('var', var) super(Debug, self).run(**kwargs) debug = Debug() class Conf(Task): def _conf_name(self, name): return ''.join([p[:1].upper() + p[1:] for p in name.split('_')]) + 'Conf' def get_conf(self): try: import fabconf as config except ImportError: abort('Can not import fabconf.py.') name = self._conf_name(self.conf.name) conf = getattr(config, name)(name='fabd.conf') if self.conf.conf_name == 'default': conf.set_globally('conf_name', self.conf.name) return conf def create_conf(self): conf = self.get_conf() for k, v in self.task_kwargs.items(): conf[k] = v return conf def do(self): env.conf = self.create_conf() env.hosts = [env.conf.address] def run(self, name, **kwargs): kwargs.setdefault('name', name) return super(Conf, self).run(**kwargs) conf = Conf() class DefaultConf(Conf): def get_conf(self): from .containers import DefaultConf return DefaultConf(name='default') def run(self, **kwargs): return super(Conf, self).run(**kwargs) default_conf = DefaultConf() class CreateUser(Task): @conf_dec def fabd_user(self): return 'fabdeploy' def do(self): users.create.run(user=self.conf.fabd_user) ssh.push_key.run( user=self.conf.fabd_user, pub_key_file='~/.ssh/id_rsa.pub') users.grant_sudo.run(user=self.conf.fabd_user) create_user = CreateUser() class CreateConfigs(Task): """Creates config_templates directory with all available configs.""" @conf_dec def configs_src(self): return os.path.join( os.path.dirname(__file__), 'config_templates') @conf_dec def configs_target(self): return os.path.join(os.getcwd(), 'config_templates') def do(self): for (dirpath, dirnames, filenames) in os.walk(self.conf.configs_src): for filename in filenames: src_filepath = os.path.join(dirpath, filename) name = src_filepath.replace(self.conf.configs_src + '/', '') target_filepath = os.path.join( self.conf.configs_target, name) if os.path.exists(target_filepath): continue puts('Copying %s...' % filename) try: os.makedirs(os.path.dirname(target_filepath)) except OSError, exc: logger.debug('CreateConfigs: %s' % exc) shutil.copyfile(src_filepath, target_filepath) create_configs = CreateConfigs()
Python
0.000152
@@ -2041,25 +2041,24 @@ .conf')%0A -%0A if self. @@ -2053,74 +2053,27 @@ -if self.conf.conf_name == 'default':%0A conf.set_globally +conf.set_conf_value ('co @@ -2096,16 +2096,38 @@ onf.name +, keep_user_value=True )%0A%0A
81b5961cdf4b9ca7e20920eda3c7f76f96a35a9b
Bump version
filer/__init__.py
filer/__init__.py
#-*- coding: utf-8 -*- # version string following pep-0396 and pep-0386 __version__ = '0.9pbs.105.dev1' # pragma: nocover default_app_config = 'filer.apps.FilerConfig'
Python
0
@@ -94,13 +94,8 @@ .105 -.dev1 ' #
ae6892be0bac3ef40cbf8e666a61562faec8f1f0
one command is enough
fabfile/travis.py
fabfile/travis.py
import glob import os from fabric.api import task, local from fabric.context_managers import lcd class Test(object): def __init__(self, func, deps=[], needs_pypy=True, needs_rubyspec=False): self.func = func self.deps = deps self.needs_pypy = needs_pypy self.needs_rubyspec = needs_rubyspec def install_deps(self): local("pip install --use-mirrors {}".format(" ".join(self.deps))) def download_pypy(self): local("wget https://bitbucket.org/pypy/pypy/get/default.tar.bz2 -O `pwd`/../pypy.tar.bz2") local("bunzip2 `pwd`/../pypy.tar.bz2") local("tar -xf `pwd`/../pypy.tar -C `pwd`/../") [path_name] = glob.glob("../pypy-pypy*") path_name = os.path.abspath(path_name) with open("pypy_marker", "w") as f: f.write(path_name) def download_mspec(self): with lcd(".."): local("git clone --depth=100 --quiet https://github.com/rubyspec/mspec") def download_rubyspec(self): with lcd(".."): local("git clone --depth=100 --quiet https://github.com/rubyspec/rubyspec") def run_tests(self): env = {} if self.needs_pypy: with open("pypy_marker") as f: env["pypy_path"] = f.read() self.func(env) @task def install_requirements(): t = TEST_TYPES[os.environ["TEST_TYPE"]] if t.deps: t.install_deps() if t.needs_pypy: t.download_pypy() if t.needs_rubyspec: t.download_mspec() t.download_rubyspec() @task def run_tests(): t = TEST_TYPES[os.environ["TEST_TYPE"]] t.run_tests() def run_own_tests(env): local("PYTHONPATH=$PYTHONPATH:{pypy_path} py.test".format(**env)) def run_rubyspec_untranslated(env): run_specs("bin/topaz_untranslated.py", prefix="PYTHONPATH=$PYTHONPATH:{pypy_path} ".format(**env)) def run_translate_tests(env): local("PYTHONPATH={pypy_path}:$PYTHONPATH python {pypy_path}/pypy/translator/goal/translate.py --batch -Ojit targetrupypy.py".format(**env)) run_specs("`pwd`/topaz-c") def run_specs(binary, prefix=""): # TODO: this list is temporary until we have all the machinery necessary to # run the full rubyspec directory (including the tagging feature) rubyspec_tests = [ "language/and_spec.rb", "language/array_spec.rb", "language/match_spec.rb", "language/module_spec.rb", "language/not_spec.rb", "language/numbers_spec.rb", "language/order_spec.rb", "language/splat_spec.rb", "language/undef_spec.rb", "language/unless_spec.rb", "language/yield_spec.rb", "language/regexp/grouping_spec.rb", "language/regexp/repetition_spec.rb", "core/array/array_spec.rb", "core/array/empty_spec.rb", "core/basicobject/ancestors_spec.rb", "core/basicobject/class_spec.rb", "core/basicobject/new_spec.rb", "core/basicobject/superclass_spec.rb", "core/comparable/between_spec.rb", "core/false/and_spec.rb", "core/false/inspect_spec.rb", "core/false/or_spec.rb", "core/false/to_s_spec.rb", "core/false/xor_spec.rb", "core/fixnum/comparison_spec.rb", "core/fixnum/even_spec.rb", "core/fixnum/hash_spec.rb", "core/fixnum/odd_spec.rb", "core/fixnum/to_f_spec.rb", "core/fixnum/zero_spec.rb", "core/hash/empty_spec.rb", "core/nil/and_spec.rb", "core/nil/inspect_spec.rb", "core/nil/nil_spec.rb", "core/nil/or_spec.rb", "core/nil/to_a_spec.rb", "core/nil/to_i_spec.rb", "core/nil/to_s_spec.rb", "core/nil/xor_spec.rb", "core/regexp/casefold_spec.rb", "core/regexp/source_spec.rb", "core/true/and_spec.rb", "core/true/inspect_spec.rb", "core/true/or_spec.rb", "core/true/to_s_spec.rb", "core/true/xor_spec.rb", ] local("{prefix}../mspec/bin/mspec -t {binary} {spec_files}".format( prefix=prefix, binary=binary, spec_files=" ".join(os.path.join("../rubyspec", p) for p in rubyspec_tests), )) def run_docs_tests(env): local("sphinx-build -W -b html docs/ docs/_build/") TEST_TYPES = { "own": Test(run_own_tests, deps=["pytest", "-r requirements.txt"]), "rubyspec_untranslated": Test(run_rubyspec_untranslated, deps=["-r requirements.txt"], needs_rubyspec=True), "translate": Test(run_translate_tests, deps=["-r requirements.txt"], needs_rubyspec=True), "docs": Test(run_docs_tests, deps=["sphinx"], needs_pypy=False), }
Python
0.999908
@@ -559,55 +559,8 @@ 2%22)%0A - local(%22bunzip2 %60pwd%60/../pypy.tar.bz2%22)%0A @@ -595,16 +595,20 @@ pypy.tar +.bz2 -C %60pwd
444f51c72d4ac92ad99c6e0466dfcf8073b03b19
Update tesseract.py
apps/tesseract.py
apps/tesseract.py
#!/usr/bin/env python # -*- coding: UTF-8 -*- ''' Python-tesseract is an optical character recognition (OCR) tool for python. That is, it will recognize and "read" the text embedded in images. Python-tesseract is a wrapper for google's Tesseract-OCR ( http://code.google.com/p/tesseract-ocr/ ). It is also useful as a stand-alone invocation script to tesseract, as it can read all image types supported by the Python Imaging Library, including jpeg, png, gif, bmp, tiff, and others, whereas tesseract-ocr by default only supports tiff and bmp. Additionally, if used as a script, Python-tesseract will print the recognized text in stead of writing it to a file. Support for confidence estimates and bounding box data is planned for future releases. USAGE: From the shell: $ ./tesseract.py test.png # prints recognized text in image $ ./tesseract.py -l fra test-european.jpg # recognizes french text In python: > import Image > from tesseract import image_to_string > print image_to_string(Image.open('test.png')) > print image_to_string(Image.open('test-european.jpg'), lang='fra') INSTALLATION: * Python-tesseract requires python 2.5 or later. * You will need the Python Imaging Library (PIL). Under Debian/Ubuntu, this is the package "python-imaging". * Install google tesseract-ocr from http://code.google.com/p/tesseract-ocr/ . You must be able to invoke the tesseract command as "tesseract". If this isn't the case, for example because tesseract isn't in your PATH, you will have to change the "tesseract_cmd" variable at the top of 'tesseract.py'. COPYRIGHT: Python-tesseract is released under the GPL v3. Copyright (c) Samuel Hoffstaetter, 2009 http://wiki.github.com/hoffstaetter/python-tesseract ''' # CHANGE THIS IF TESSERACT IS NOT IN YOUR PATH, OR IS NAMED DIFFERENTLY tesseract_cmd = 'tesseract' import Image import StringIO import subprocess import sys import os __all__ = ['image_to_string'] class TesseractError(Exception): def __init__(self, status, message): self.status = status self.message = message self.args = (status, message) def run_tesseract(input_filename, output_filename_base, lang=None, boxes=False): ''' runs the command: `tesseract_cmd` `input_filename` `output_filename_base` returns the exit status of tesseract, as well as tesseract's stderr output ''' command = [tesseract_cmd, input_filename, output_filename_base] if lang is not None: command += ['-l', lang] if boxes: command += ['batch.nochop', 'makebox'] proc = subprocess.Popen(command, stderr=subprocess.PIPE) return (proc.wait(), proc.stderr.read()) def cleanup(filename): ''' tries to remove the given filename. Ignores non-existent files ''' try: os.remove(filename) except OSError: pass def get_errors(error_string): ''' returns all lines in the error_string that start with the string "error" ''' lines = error_string.splitlines() error_lines = tuple(line for line in lines if line.find('Error') >= 0) if len(error_lines) > 0: return '\n'.join(error_lines) else: return error_string.strip() def tempnam(): ''' returns a temporary file-name ''' # prevent os.tmpname from printing an error... stderr = sys.stderr try: sys.stderr = StringIO.StringIO() return os.tempnam(None, 'tess_') finally: sys.stderr = stderr def image_to_string(image, lang=None, boxes=False): ''' Runs tesseract on the specified image. First, the image is written to disk, and then the tesseract command is run on the image. Resseract's result is read, and the temporary files are erased. ''' input_file_name = '%s.bmp' % tempnam() output_file_name_base = tempnam() if not boxes: output_file_name = '%s.txt' % output_file_name_base else: output_file_name = '%s.box' % output_file_name_base try: image.save(input_file_name) status, error_string = run_tesseract(input_file_name, output_file_name_base, lang=lang, boxes=boxes) if status: errors = get_errors(error_string) raise TesseractError(status, errors) f = file(output_file_name) try: return f.read().strip() finally: f.close() finally: cleanup(input_file_name) cleanup(output_file_name) if __name__ == '__main__': if len(sys.argv) == 2: filename = sys.argv[1] try: image = Image.open(filename) except IOError: sys.stderr.write('ERROR: Could not open file "%s"\n' % filename) exit(1) print image_to_string(image) elif len(sys.argv) == 4 and sys.argv[1] == '-l': lang = sys.argv[2] filename = sys.argv[3] try: image = Image.open(filename) except IOError: sys.stderr.write('ERROR: Could not open file "%s"\n' % filename) exit(1) print image_to_string(image, lang=lang) else: sys.stderr.write('Usage: python tesseract.py [-l language] input_file\n') exit(2)
Python
0
@@ -1849,24 +1849,28 @@ ct'%0A%0Aimport +PIL. Image%0Aimport
2aba25d394f722761af6e4d114d91d2824c17ee9
Fix social media plugin error on save
shoop/xtheme/plugins/social_media_links.py
shoop/xtheme/plugins/social_media_links.py
# -*- coding: utf-8 -*- # This file is part of Shoop. # # Copyright (c) 2012-2016, Shoop Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. import re from collections import defaultdict from django import forms from django.utils.translation import ugettext_lazy as _ from shoop.xtheme import TemplatedPlugin from shoop.xtheme.plugins.forms import GenericPluginForm, TranslatableField class SocialMediaLinksPluginForm(GenericPluginForm): """ Form for the social media links xtheme plugin. One field is provided for each entry in the plugin's icon_classes attribute, which maps social media site names to font-awesome icon classes by default. """ def populate(self): """ Populates form with default plugin fields as well as any social media link type included in the plugin's ``icon_classes`` attribute. Also adds an ordering field for each link type to change display order. """ icon_classes = self.plugin.icon_classes links = self.plugin.config.get("links", {}) for name, icon_class in sorted(icon_classes.items()): url = links[name]["url"] if name in links else "" ordering = links[name]["ordering"] if name in links else None self.fields[name] = forms.URLField( label=name, required=False, widget=forms.TextInput(attrs={"placeholder": "URL"}), initial=url) self.fields["%s-ordering" % name] = forms.IntegerField( label="", required=False, min_value=0, max_value=len(icon_classes)*2, initial=ordering, widget=forms.NumberInput(attrs={"placeholder": "Ordering"})) super(SocialMediaLinksPluginForm, self).populate() def clean(self): """ Returns cleaned data from default plugin fields and any link fields. Processed link configuration information is stored and returned as a dictionary (``links``). """ links_dict = defaultdict(dict) cleaned_data = {} precleaned_data = super(SocialMediaLinksPluginForm, self).clean() plugin_fields = [field for (field, value) in self.plugin.fields] for field in plugin_fields: cleaned_data[field] = precleaned_data.pop(field) for link_info in precleaned_data: link_name = re.sub("-ordering", "", link_info) if not link_info.endswith("-ordering"): links_dict[link_name]["url"] = precleaned_data[link_info] else: links_dict[link_name]["ordering"] = precleaned_data[link_info] cleaned_data["links"] = {k: v for (k, v) in links_dict.items() if v["url"]} return cleaned_data class SocialMediaLinksPlugin(TemplatedPlugin): """ An xtheme plugin for displaying site links to common social media sites. """ identifier = "social_media_links" name = _("Social Media Links") template_name = "shoop/xtheme/plugins/social_media_links.jinja" editor_form_class = SocialMediaLinksPluginForm fields = [ ("topic", TranslatableField(label=_("Topic"), required=False, initial="")), ("text", TranslatableField(label=_("Title"), required=False, initial="")), ("icon_size", forms.ChoiceField(label=_("Icon Size"), required=False, choices=[ ("", "Default"), ("lg", "Large"), ("2x", "2x"), ("3x", "3x"), ("4x", "4x"), ("5x", "5x"), ], initial="")), ("alignment", forms.ChoiceField(label=_("Alignment"), required=False, choices=[ ("", "Default"), ("left", "Left"), ("center", "Center"), ("right", "Right"), ], initial="")), ] icon_classes = { "Facebook": "facebook-square", "Flickr": "flickr", "Google Plus": "google-plus-square", "Instagram": "instagram", "Linkedin": "linkedin-square", "Pinterest": "pinterest", "Tumbler": "tumblr", "Twitter": "twitter", "Vimeo": "vimeo", "Vine": "vine", "Yelp": "yelp", "Youtube": "youtube", } def get_context_data(self, context): """ Returns plugin settings and a sorted list of social media links :return: Plugin context data :rtype: dict """ links = self.get_links() return { "links": links, "request": context["request"], "topic": self.get_translated_value("topic"), "text": self.get_translated_value("text"), "icon_size": self.config.get("icon_size", ""), "alignment": self.config.get("alignment", ""), } def get_links(self): """ Returns the list of social media links sorted according to ordering :return: List of link tuples (ordering, icon class, url) :rtype: [(int, str, str)] """ links = self.config.get("links", {}) return sorted([ (v["ordering"] or 0, self.icon_classes[k], v["url"]) for (k, v) in links.items() ])
Python
0
@@ -2782,23 +2782,33 @@ s() if v -%5B%22url%22%5D +.get(%22url%22, None) %7D%0A
eef9d75a7d019a397d2026612ece76d217747e5b
mark oddity
src/zeit/edit/browser/tests/test_form.py
src/zeit/edit/browser/tests/test_form.py
# Copyright (c) 2012 gocept gmbh & co. kg # See also LICENSE.txt from mock import Mock import zeit.cms.testing import zeit.edit.browser.form import zope.formlib.form import zope.interface import zope.publisher.browser import zope.schema class IExample(zope.interface.Interface): foo = zope.schema.TextLine(title=u'foo') class InlineForm(zeit.cms.testing.FunctionalTestCase): def render_form(self, form_class): ANY_CONTEXT = Mock() zope.interface.alsoProvides(ANY_CONTEXT, IExample) request = zope.publisher.browser.TestRequest() form = form_class(ANY_CONTEXT, request) return form() def test_css_class_on_widget_is_rendered_to_html(self): class ExampleForm(zeit.edit.browser.form.InlineForm): form_fields = zope.formlib.form.FormFields(IExample) legend = 'Legend' def setUpWidgets(self): super(ExampleForm, self).setUpWidgets() self.widgets['foo'].vivi_css_class = 'barbaz qux' self.assertEllipsis("""\ ...<div class="field fieldname-foo required barbaz qux"> <div class="label">...""", self.render_form(ExampleForm)) def test_widget_without_css_class_does_not_break(self): class ExampleForm(zeit.edit.browser.form.InlineForm): form_fields = zope.formlib.form.FormFields(IExample) legend = 'Legend' self.assertEllipsis("""\ ...<div class="field fieldname-foo required"> <div class="label">...""", self.render_form(ExampleForm))
Python
0.00151
@@ -375,24 +375,262 @@ TestCase):%0A%0A + # XXX This test should be moved to zeit.cms.browser, but it seems nearly%0A # impossible to instantiate an EditForm, so we punt on this for now;%0A # InlineForms are friendlier (since they don't pull in the%0A # main_template.pt)%0A%0A def rend
1b94e5564b7940139e56310f18c58999f0c598b2
validate by casting
filestore/core.py
filestore/core.py
from __future__ import (absolute_import, division, print_function, unicode_literals) import six from document import Document from jsonschema import validate as js_validate class DatumNotFound(Exception): pass def get_datum(col, eid, _DATUM_CACHE, get_spec_handler, logger): try: datum = _DATUM_CACHE[eid] except KeyError: keys = ['datum_kwargs', 'resource'] # find the current document edoc = col.find_one({'datum_id': eid}) if edoc is None: raise DatumNotFound( "No datum found with datum_id {!r}".format(eid)) # save it for later datum = {k: edoc[k] for k in keys} res = edoc['resource'] count = 0 for dd in col.find({'resource': res}): count += 1 d_id = dd['datum_id'] if d_id not in _DATUM_CACHE: _DATUM_CACHE[d_id] = {k: dd[k] for k in keys} if count > _DATUM_CACHE.max_size: logger.warn("More datum in a resource than your " "datum cache can hold.") handler = get_spec_handler(datum['resource']) return handler(**datum['datum_kwargs']) def bulk_insert_datum(col, resource, datum_ids, datum_kwarg_list): resource_id = resource['id'] def datum_factory(): for d_id, d_kwargs in zip(datum_ids, datum_kwarg_list): datum = dict(resource=resource_id, datum_id=d_id, datum_kwargs=d_kwargs) yield datum bulk = col.initialize_ordered_bulk_op() for dm in datum_factory(): bulk.insert(dm) return bulk.execute() def insert_datum(col, resource, datum_id, datum_kwargs, known_spec): spec = resource['spec'] if spec in known_spec: js_validate(datum_kwargs, known_spec[spec]['datum']) datum = dict(resource=resource['id'], datum_id=datum_id, datum_kwargs=datum_kwargs) col.insert_one(datum) # do not leak mongo objectID datum.pop('_id', None) return Document('datum', datum) def insert_resource(col, spec, resource_path, resource_kwargs, known_spec): if spec in known_spec: js_validate(resource_kwargs, known_spec[spec]['resource']) resource_object = dict(spec=spec, resource_path=resource_path, resource_kwargs=resource_kwargs) col.insert_one(resource_object) # rename to play nice with ME resource_object['id'] = resource_object.pop('_id') return resource_object
Python
0.000001
@@ -191,16 +191,42 @@ alidate%0A +from bson import ObjectId%0A %0A%0Aclass @@ -1305,16 +1305,25 @@ ce_id = +ObjectId( resource @@ -1328,16 +1328,17 @@ ce%5B'id'%5D +) %0A%0A de @@ -1501,20 +1501,25 @@ atum_id= +str( d_id +) ,%0A @@ -1550,16 +1550,21 @@ _kwargs= +dict( d_kwargs @@ -1564,16 +1564,17 @@ _kwargs) +) %0A @@ -1922,32 +1922,41 @@ = dict(resource= +ObjectId( resource%5B'id'%5D, @@ -1953,17 +1953,35 @@ ce%5B'id'%5D -, +),%0A datum_i @@ -1982,24 +1982,28 @@ atum_id= +str( datum_id ,%0A @@ -1994,16 +1994,17 @@ datum_id +) ,%0A @@ -2027,16 +2027,21 @@ _kwargs= +dict( datum_kw @@ -2037,32 +2037,33 @@ ct(datum_kwargs) +) %0A%0A col.insert @@ -2265,24 +2265,68 @@ nown_spec):%0A + resource_kwargs = dict(resource_kwargs)%0A if spec @@ -2441,20 +2441,52 @@ t(spec=s -pec, +tr(spec),%0A resourc @@ -2492,16 +2492,20 @@ ce_path= +str( resource @@ -2509,16 +2509,17 @@ rce_path +) ,%0A
401f55929eff1e48e68694ca8b49aa64005fd411
remove data directories in nIR_run
bin/nIR_run.py
bin/nIR_run.py
#!/usr/bin/python # Script to perform a convolution on a spectrum. # Can take a number of parameters if needed from __future__ import division, print_function import argparse import sys from datetime import datetime as dt from eniric.nIRanalysis import convolve_spectra from eniric.resample import resampler from eniric.utilities import get_spectrum_name import eniric def _parser(): """Take care of all the argparse stuff. :returns: the args """ parser = argparse.ArgumentParser(description='Helpful discription') parser.add_argument("-s", '--startype', help='Spectral Type e.g "MO"', type=str, nargs="+") parser.add_argument("-v", "--vsini", help="Rotational velocity of source", type=float, nargs="+") parser.add_argument("-R", "--resolution", help="Observational resolution", type=float, nargs="+") parser.add_argument("-b", "--band", type=str, default="ALL", choices=["ALL", "VIS", "GAP", "Z", "Y", "J", "H", "K"], help="Wavelength band to select", nargs="+") parser.add_argument('-d', '--data_dir', help='Data directory', type=str, default=None) parser.add_argument('--sample_rate', default=[3.0], type=float, nargs="+", help="Resample rate, pixels per FWHM. Default=3.0") parser.add_argument('--results', default=None, type=str, help='Result directory Default=data_dir+"/results/"') parser.add_argument('--resamples', default=None, type=str, help='Resample directory. Default=data_dir+"/resampled/"') parser.add_argument('--noresample', help='Resample output', default=False, action="store_true") parser.add_argument('--normalize', help='Normalize for wavelength step', default=True, action="store_false") # This logic needs fixed. (--flag shoud be "unnormalize" to turn normalization off) parser.add_argument('--org', help='Only use original .dat files, (temporary option)', default=False, action="store_true") args = parser.parse_args() return args def main(startype, vsini, resolution, band, data_dir=None, results=None, resamples=None, sample_rate=3.0, noresample=False, normalize=True, org=False): """Run convolutions of NIR spectra for the range of given parameters. Multiple values of startype, vsini, resolution, band, and sample_rate can be provided. Read files from data_dir + "PHOENIX_ACES_spectra/" Saves results to data_dir + "results/" Resamples results to data_dir + "resamples/" Parameters ---------- startype: list of strings vsini: list of floats resolution: list of floats band: list of strings data_dir: str, default=None results: str, default=None resample: str, default=None sample_rate: list of floats default=[3.0] noresample: bool default=False normalize: bool default=True """ # vsini, resolution, band and sample_rate can all be a series of values start_time = dt.now() if data_dir is None: data_dir = "../data/" if results is None: results_dir = data_dir + "results/" else: results_dir = results phoenix_path = eniric.path["phoenix_dat"] if resamples is None: resampled_dir = data_dir + "resampled/" else: resampled_dir = resamples counter = 0 for star in startype: spectrum_name = os.path.join(phoenix_path, get_spectrum_name(star, org=org)) for b in band: for vel in vsini: for R in resolution: for sample in sample_rate: if normalize: # when normalize ation is confirmed then can result_name = "Spectrum_{0}-PHOENIX-ACES_{1}band_vsini{2}_R{3}k.txt".format(star, b, vel, int(R / 1000)) else: result_name = "Spectrum_{0}-PHOENIX-ACES_{1}band_vsini{2}_R{3}k_unnormalized.txt".format(star, b, vel, int(R / 1000)) print("Name to be result file", result_name) convolve_spectra(data_dir + spectrum_name, b, vel, R, epsilon=0.6, plot=False, fwhm_lim=5.0, num_procs=None, data_rep=data_dir, results_dir=results_dir, normalize=normalize, output_name=result_name) # Resample only the file just made if noresample: pass else: resampler(result_name, results_dir=results_dir, resampled_dir=resampled_dir, sampling=sample) counter += 1 print("Time to convolve {0:d} combinations = {1}".format(counter, dt.now() - start_time)) return 0 if __name__ == '__main__': args = vars(_parser()) # startype = args.pop("startype") # positional arguments opts = {k: args[k] for k in args} # sys.exit(main(startype, **opts)) sys.exit(main(**opts))
Python
0.000016
@@ -510,17 +510,19 @@ on=' -H +Unh elpful d iscr @@ -517,17 +517,17 @@ elpful d -i +e scriptio @@ -1096,99 +1096,8 @@ +%22)%0A - parser.add_argument('-d', '--data_dir', help='Data directory', type=str, default=None)%0A @@ -1251,293 +1251,8 @@ 0%22)%0A - parser.add_argument('--results', default=None, type=str,%0A help='Result directory Default=data_dir+%22/results/%22')%0A parser.add_argument('--resamples', default=None, type=str,%0A help='Resample directory. Default=data_dir+%22/resampled/%22')%0A @@ -1839,62 +1839,8 @@ and, - data_dir=None, results=None,%0A resamples=None, sam @@ -1852,16 +1852,25 @@ ate=3.0, +%0A noresam @@ -1895,25 +1895,16 @@ ze=True, -%0A org=Fal @@ -2137,100 +2137,8 @@ ra/%22 -%0A Saves results to data_dir + %22results/%22%0A Resamples results to data_dir + %22resamples/%22 %0A%0A @@ -2282,103 +2282,8 @@ ngs%0A - data_dir: str, default=None%0A results: str, default=None%0A resample: str, default=None%0A @@ -2507,307 +2507,97 @@ w()%0A - if data_dir is None:%0A data_dir = %22../data/%22%0A%0A if results is None:%0A results_dir = data_dir + %22results/%22%0A else:%0A results_dir = results%0A phoenix_path = eniric.path%5B%22phoenix_dat%22%5D%0A%0A if resamples is None:%0A resampled_dir = data_dir + %22resampled/%22%0A else:%0A +%0A phoenix_path = eniric.path%5B%22phoenix_dat%22%5D%0A%0A results_dir = eniric.path%5B%22results%22%5D%0A @@ -2612,24 +2612,38 @@ d_dir = +eniric.paths%5B%22 resample s%0A%0A c @@ -2634,17 +2634,19 @@ resample -s +d%22%5D %0A%0A co @@ -3572,27 +3572,8 @@ one, - data_rep=data_dir, %0A
0f41838d07c15bb22861e884949306a8498ead58
Support move
archive_images.py
archive_images.py
#!/usr/bin/env python """ Sorts image files by time - copies them into folders by year and month. Written by Friedrich C. Kischkel. """ import os import re import shutil import time import argparse IMAGE_FILE = re.compile(r"""\.(jpe?g)|(png)|(tiff?)$""", re.IGNORECASE) EXIF_TIME_FORMAT = "%Y:%m:%d %H:%M:%S" def time_taken(path): """Get time a picture was taken or at least file c/mtime.""" times = [ time.localtime(os.path.getctime(path)), time.localtime(os.path.getmtime(path)), time.localtime() # now ] import exifread with open(path, 'rb') as imagefile: tags = exifread.process_file(imagefile, details=False) for tag in ['Image DateTime', 'EXIF DateTimeOriginal', 'EXIF DateTimeDigitized']: try: times.append(time.strptime(str(tags[tag]), EXIF_TIME_FORMAT)) except KeyError, err: print \ "WARNING: tag %(tag)s could not be retrieved from %(file)s" % \ {"tag": err, "file": path} times.sort() return times[0] def archive_image(srcpath, filename, dstpath, overwrite=False): """Copy image "filename" in "path" into a subfolder "dstpath".""" if re.search(IMAGE_FILE, filename): srcpath = os.path.join(srcpath, filename) ctime = time_taken(srcpath) dst = os.path.join( dstpath, time.strftime("%Y", ctime), time.strftime("%m", ctime)) try: os.makedirs(dst) except OSError: pass if not overwrite and os.path.exists(os.path.join(dst, filename)): raise IOError('"%(path)s" already exists' % \ {'path': os.path.join(dst, filename)}) shutil.copy2(srcpath, dst) def archive_all(srcpath, dstpath, overwrite=False, max_depth=None): """Copy files by creation time into sub-folders""" iteration = 0 for current, _, files in os.walk(srcpath): for filename in files: try: archive_image(current, filename, dstpath, overwrite) except IOError, err: print "ERROR: copying image: %(msg)s" % {'msg': str(err)} iteration += 1 if max_depth != None and iteration > max_depth: return if __name__ == "__main__": parser = argparse.ArgumentParser(description="""\ Copy images into year/month sub-folders by time they were taken. Useful to get some chronological orientations when copying a bulk of images from a camera's memory card to a local pictures folder.""") parser.add_argument('SOURCE', nargs='+', help='source path(s)') parser.add_argument('DESTINATION', nargs=1, help='destination path') parser.add_argument('-f', '--force', action='store_true', default=False, help='force overwriting of existing files (default: do not overwrite)') parser.add_argument('-d', '--depth', type=int, help='descend this deep into SOURCE directories') #parser.add_argument('--exec', help='execute command with args SRC DST') ARGS = parser.parse_args() for source in ARGS.SOURCE: archive_all(\ source,\ ARGS.DESTINATION[0],\ overwrite=ARGS.force,\ max_depth=ARGS.depth)
Python
0
@@ -1096,16 +1096,44 @@ te=False +, file_function=shutil.copy2 ):%0A %22 @@ -1730,28 +1730,29 @@ -shutil.copy2 +file_function (srcpath @@ -1824,16 +1824,44 @@ pth=None +, file_function=shutil.copy2 ):%0A %22 @@ -2090,16 +2090,31 @@ verwrite +, file_function )%0A @@ -2737,16 +2737,148 @@ path')%0A + parser.add_argument('-m', '--move', action='store_true', default=False, help='move file instead of copying it (default: copy)')%0A pars @@ -3227,16 +3227,83 @@ _args()%0A + fct = shutil.copy2%0A if ARGS.move:%0A fct = shutil.move%0A for @@ -3469,10 +3469,42 @@ GS.depth +,%5C%0A file_function=fct )%0A
28b77053d633c0762854b8feeee393300668bff1
Update df_status.py
apps/tinyosGW/df_status/df_status.py
apps/tinyosGW/df_status/df_status.py
#!/usr/bin/env python #-*- coding: utf-8 -*- # Author : jeonghoonkang, https://github.com/jeonghoonkang ## ํ•„๋… ''' ./out ๋””๋ ‰ํ† ๋ฆฌ ์ƒ์„ฑํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค ./out ๊ถŒํ•œ์€ sudo chgrp www-data out ์œผ๋กœ ๊ทธ๋ฃน ํ—ˆ๊ฐ€ ์ถ”๊ฐ€ sudo chmod 775 out ''' from __future__ import print_function import cgi import cgitb from subprocess import * from types import * import platform import sys import os import datetime def run_cmd(cmd): p = Popen(cmd, shell=True, stdout=PIPE) output = p.communicate()[0] return output def hostname(): cmd = "hostname" ret = run_cmd(cmd) return ret def get_df(): cmd = "df -h" ret = run_cmd(cmd) return ret def getip(): cmd = "curl http://checkip.amazonaws.com" ip = run_cmd(cmd) print ('[get-public-ip]', ip) return ip def getiip(): cmd="/sbin/ifconfig" _os_type = platform.system() _os_ver = os.uname() if (_os_ver[0] == 'Linux') : if (_os_ver[-1] == 'x86_64') : _os_type = 'Linux' cmd = "ifconfig" print ('os-type', _os_type) if _os_type.find('Cygwin') > 0: cmd = "ipconfig" iip = run_cmd(cmd) print (iip) return iip, _os_type def get_ostype(): #_os_type = platform.system() #_os_machine = platform.machine() _os_ver = os.uname() #print (_os_ver) #์ถœ๋ ฅ์˜ˆ #('Linux', 'gate', '4.1.19+', '#858 Tue Mar 15 15:52:03 GMT 2016','armv6l') if (_os_ver[0] == 'Linux') : if (_os_ver[-1] == 'x86_64') : _os_type = 'Linux' if (_os_ver[-1] == 'armv6l') : _os_type = 'Rasbian' return _os_type def checkifexist(fname): cmd='ls ' + fname print (run_cmd(cmd)) def writefile(_in, fn="ip.txt"): f = open(fn, 'w') f.write(_in) f.flush() f.close() return def args_proc(): msg = "usage : python %s {server_IP_ADD} {server_PORT} {server_id} {passwd_for_server}" %__file__ msg += " => user should input arguments {} " print (msg, '\n') if len(sys.argv) < 2: exit("[bye] you need to input args, ip / port / id") arg1 = sys.argv[1] arg2 = sys.argv[2] arg3 = sys.argv[3] arg4 = sys.argv[4] ip = arg1 port = arg2 id = arg3 passwd = arg4 print ("... start running, inputs are ", ip, port, id, passwd) return ip, port, id, passwd if __name__ == '__main__': cgitb.enable() print ("Content-type: text\n") #print ("Content-type: text/html\n") os_type = get_ostype() info = get_df() #print (os_type) hostn = hostname() if os_type == 'Rasbian': name = 'pi' if os_type == 'Linux': name = 'tinyos' if (os_type == "Linux") or (os_type == 'Rasbian'): fname = '/home/%s/' %name elif os_type == 'Win' : fname = '/home/tinyos/' #์ˆ˜๋™์„ค์ •ํ•ด์•ผ ํ•จ elif os_type == "Darwin": fname = '/Users/%s/' %name sshpass = '/usr/local/bin/' fname = './out/%s_df.txt' %(hostn[:-1]) writefile (info, fname) checkifexist(fname) #print ("finish and return string") print (info)
Python
0.000005
@@ -629,24 +629,282 @@ return ret%0A%0A +def get_file_count():%0A cmd = %22ls /var/www/html/cam/motion %7C wc -l%22%0A ret = run_cmd(cmd)%0A%0A cmd = %22ls /var/www/html/cam/motion %7C grep avi %7C wc -l%22%0A val = run_cmd(cmd)%0A ret = %22 Total -%3E %22 + ret%0A ret = ret + %22 AVI -%3E %22 + val%0A%0A return ret%0A %0A def getip():
22c727e0e38953f3647a8a825b01fcf142c06c64
Bump version.
armet/_version.py
armet/_version.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals, absolute_import, division __version_info__ = (0, 4, 16) __version__ = '.'.join(map(str, __version_info__))
Python
0
@@ -116,9 +116,9 @@ 4, 1 -6 +7 )%0A__
7a1ddf38db725f0696482a271c32fa297d629316
Set the version to the next patch release number (in dev mode)
backlog/__init__.py
backlog/__init__.py
__version__ = (0, 2, 1, '', 0) def get_version(): version = '%d.%d.%d' % __version__[0:3] if __version__[3]: version = '%s-%s%s' % (version, __version__[3], (__version__[4] and str(__version__[4])) or '') return version
Python
0
@@ -18,12 +18,15 @@ 2, -1 +2 , ' +dev ', 0
b808784711242099d8fbf9f0f1c7d13ca5a5a1d7
Bump the version to 0.3.2
backlog/__init__.py
backlog/__init__.py
"""A Simple Note Manager""" from __future__ import absolute_import from backlog.backlog import Backlog __version__ = '0.3.1'
Python
0.999999
@@ -121,7 +121,7 @@ 0.3. -1 +2 '%0A
d0c6ae0dbb68fad31c5f3e51d934b8c7f5e8534f
Add ability to override issue JQL in runner
jzb/runner.py
jzb/runner.py
from argparse import ArgumentParser import logging import sys import jira from redis import StrictRedis import yaml import zendesk from jzb import LOG from jzb.bridge import Bridge from jzb.util import objectize def configure_logger(level): handler = logging.StreamHandler(sys.stdout) handler.setLevel(level) handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) LOG.addHandler(handler) LOG.setLevel(level) def main(): parser = ArgumentParser() parser.add_argument('-c', '--config-file', default='config.yml') parser.add_argument('-v', '--verbose', action='store_true') args = parser.parse_args() if args.verbose: configure_logger(logging.DEBUG) else: configure_logger(logging.INFO) with open(args.config_file) as fp: config = objectize(yaml.load(fp)) redis = StrictRedis(host=config.redis_host, port=config.redis_port) jira_client = jira.JIRA(server=config.jira_url, basic_auth=(config.jira_username, config.jira_password)) zd_client = zendesk.Client(url=config.zd_url, username=config.zd_username, password=config.zd_password) bridge = Bridge(jira_client=jira_client, zd_client=zd_client, redis=redis, config=config) bridge.sync() if __name__ == '__main__': main()
Python
0
@@ -626,16 +626,57 @@ e_true') +%0A parser.add_argument('-Q', '--query') %0A%0A ar @@ -1431,16 +1431,79 @@ onfig)%0A%0A + if args.query:%0A bridge.jira_issue_jql = args.query%0A%0A brid
c08870b838a842e66dd46ebd790571c3ec336fc8
Fix call to collect bears from section
coalib/coala_main.py
coalib/coala_main.py
from pyprint.ConsolePrinter import ConsolePrinter import os from coalib import coala_delete_orig from coalib.output.printers.LogPrinter import LogPrinter from coalib.processes.Processing import execute_section, simplify_section_result from coalib.settings.ConfigurationGathering import gather_configuration from coalib.misc.Exceptions import get_exitcode from coalib.collecting.Collectors import collect_all_bears_from_sections from coalib.output.Interactions import fail_acquire_settings from coalib.output.Tagging import tag_results, delete_tagged_results do_nothing = lambda *args: True def run_coala(log_printer=None, print_results=do_nothing, acquire_settings=fail_acquire_settings, print_section_beginning=do_nothing, nothing_done=do_nothing, show_bears=do_nothing, autoapply=True): """ This is a main method that should be usable for almost all purposes and reduces executing coala to one function call. :param log_printer: A LogPrinter object to use for logging. :param print_results: A callback that takes a LogPrinter, a section, a list of results to be printed, the file dict and the mutable file diff dict. :param acquire_settings: The method to use for requesting settings. It will get a parameter which is a dictionary with the settings name as key and a list containing a description in [0] and the names of the bears who need this setting in all following indexes. :param print_section_beginning: A callback that will be called with a section name string whenever analysis of a new section is started. :param nothing_done: A callback that will be called with only a log printer that shall indicate that nothing was done. :param show_bears: A callback that will be called with first a list of local bears, second a list of global bears to output them. A third bool parameter may be used to indicate if a compressed output (True) or a normal output (False) is desired, the former being used for showing all available bears to the user. :param autoapply: Set to False to autoapply nothing by default; this is overridable via any configuration file/CLI. :return: A dictionary containing a list of results for all analyzed sections as key. """ log_printer = log_printer or LogPrinter(ConsolePrinter()) exitcode = 0 results = None try: yielded_results = yielded_unfixed_results = False did_nothing = True sections, local_bears, global_bears, targets = gather_configuration( acquire_settings, log_printer, autoapply=autoapply) tag = str(sections['default'].get('tag', None)) dtag = str(sections['default'].get('dtag', None)) config_file = os.path.abspath(str(sections["default"].get("config"))) show_all_bears = bool(sections['default'].get('show_all_bears', False)) show_bears_ = bool(sections["default"].get("show_bears", "False")) # Deleting all .orig files, so the latest files are up to date! coala_delete_orig.main(log_printer, sections["default"]) delete_tagged_results(dtag, config_file, log_printer) if show_bears_ or show_all_bears: if show_all_bears: (local_bears, global_bears) = collect_all_bears_from_sections(sections) show_bears(local_bears, global_bears, show_all_bears) did_nothing = False else: results = {} for section_name, section in sections.items(): if not section.is_enabled(targets): continue print_section_beginning(section) section_result = execute_section( section=section, global_bear_list=global_bears[section_name], local_bear_list=local_bears[section_name], print_results=print_results, log_printer=log_printer) yielded, yielded_unfixed, results[section_name] = ( simplify_section_result(section_result)) yielded_results = yielded_results or yielded yielded_unfixed_results = ( yielded_unfixed_results or yielded_unfixed) did_nothing = False tag_results(tag, config_file, results, log_printer) if did_nothing: nothing_done(log_printer) elif yielded_unfixed_results: exitcode = 1 elif yielded_results: exitcode = 5 except BaseException as exception: # pylint: disable=broad-except exitcode = exitcode or get_exitcode(exception, log_printer) return results, exitcode
Python
0.000001
@@ -4252,16 +4252,94 @@ sections +,%0A log_printer )%0A
122b0982d1e10aada383bbd373518d049e54b906
Prepare for release 0.9pbs.107
filer/__init__.py
filer/__init__.py
#-*- coding: utf-8 -*- # version string following pep-0396 and pep-0386 __version__ = '0.9pbs.107.dev1' # pragma: nocover default_app_config = 'filer.apps.FilerConfig'
Python
0
@@ -94,13 +94,8 @@ .107 -.dev1 ' #
fc6694686b5b928580c3e8d682b3b6496b12d006
Refactor pop method
binary_heap.py
binary_heap.py
from __future__ import unicode_literals class BinaryHeap(object): """A class for a binary heap.""" def __init__(self, iterable=()): self.tree = [] for val in iterable: self.push(val) def __repr__(self): return repr(self.tree) def __len__(self): return len(self.tree) def __iter__(self): return iter(self.tree) def pop(self): """Pop the head from the heap and return.""" if len(self.tree) == 1: to_return = self.tree.pop() else: self.tree[0], self.tree[len(self.tree) - 1] = self.tree[len(self.tree) - 1], self.tree[0] to_return = self.tree.pop() # Should raise error on empty self._bubbledown(0) return to_return def push(self, value): """Push a value onto a stack. args: value: the value to add """ self.tree.append(value) # Add protecion for different types case if len(self.tree) > 1: self._bubbleup(len(self.tree)-1) def _bubbleup(self, pos): """Perform one step of heap sort up the tree. args: pos: the index position to inspect """ parent = self._find_parent(pos) if pos == 0: # find_parent will return -1 at end of list return elif self.tree[pos] < self.tree[parent]: self.tree[pos], self.tree[parent] = self.tree[parent], self.tree[pos] self._bubbleup(parent) def _bubbledown(self, pos): """Perform one step of heap sort down the tree. args: pos: the index position to inspect """ lchild = self._find_lchild(pos) rchild = lchild + 1 try: # Evaluating whether lchild exists; may refactor lval = self.tree[lchild] try: rval = self.tree[rchild] except IndexError: # Case of left_child only if lval < self.tree[pos]: self.tree[lchild], self.tree[pos] = self.tree[pos], self.tree[lchild] else: # Case of left_child and right_child if lval < rval: target = lchild else: target = rchild if self.tree[target] < self.tree[pos]: self.tree[target], self.tree[pos] = self.tree[pos], self.tree[target] self._bubbledown(target) except IndexError: # Case of no lchild return def _find_parent(self, pos): """Returns the parent index of given position. args: pos: the index position to inspect Returns: index of the parent """ parent = (pos - 1) // 2 return parent def _find_lchild(self, pos): """Returns the left child index of given position. args: pos: the index position to inspect Returns: index of the left child """ lchild = (pos * 2) + 1 return lchild def compare_values(self, parent_value=None, child_value=None, minheap=True): """Compares the values of child and parent according to heap type. For a minheap, checks if child value is greater than parent value. For a maxheap, checks if child value is less than parent value. args: child_pos: the pos of the child parent: the pos of the parent min: heap type comparison, defaults to minheap Returns: True if heap type comparison matches """ if minheap is True: return child_value > parent_value else: return child_value < parent_value
Python
0.000001
@@ -479,17 +479,17 @@ f.tree) -= +%3C = 1:%0A @@ -684,39 +684,8 @@ op() - # Should raise error on empty %0A
6c9da65a2083ff81dfe446896026fc4f449428e8
Fix typo
folder_cleanup.py
folder_cleanup.py
#!/usr/bin/env python3 """Clean up the files in a folder. This is based on an old Mac program called Folder Clean-Up, and tries to use the same API. All files are put into a subdirectory called _cleanup, sorted by extension. For example, a file called "myfile.txt" would be put in "_cleanup/txts/". Files without an extension are put in a directory called "unsorted", and directories are put in a directory called "folders". Note that due to the way that os.path.splittext works, files that start with a '.' and contain only one '.' (like ".DS_Store" or ".profile") will be put in the "unsorted" directory. I do not know if this corresponds to the API of Folder Clean-Up. For example: YourFolder: |- _cleanup: |-|- avis: |-|-|- sound.avi |-|- folders: |-|-|- a_directory: |-|-|-|- stuff.txt |-|- txts: |-|-|- myfile.txt |-|- unsorted: |-|-|- .DS_Store |-|-|- noextension Run like $ python3 folder_cleanup.py Directory_to_clean/ """ import sys # This file contains syntax that isn't Python 2 compatible anyway # if sys.version[0] < '3': # print("This script requires Python 3") # sys.exit(1) import argparse import os parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument("directories", nargs='+', help="The directories to be cleaned.") parser.add_argument("--dry-run", "-d", dest="dry_run", action="store_true", help="Print what would happen, but don't actually move the " "files.", default=False) # Change this when ready to ship parser.add_argument("--interactive", "-i", action="store_true", help="Interactively ask before moving each file. Ignored " "if --dry-run is passed.") args = parser.parse_args() def main(args): dry_run = args.dry_run interactive = args.interactive if dry_run: print("Dry run mode: None of the following operations actually " "occur.") verb = 'Would move' else: if interactive: verb = "Move" else: verb = "Moving" for directory in args.directories: if not os.path.isdir(directory): print("%(file)s is not a directory." % {'file': directory}, file=sys.stderr) return False for directory in args.directories: print("Cleaning %(directory)s" % {'directory':directory}) cleanup = os.path.join(directory, "_cleanup") if not dry_run: try: os.mkdir(cleanup) except OSError: # _cleanup directory already exists if not os.path.isdir(cleanup): print("ERROR: Could not clean %(directory)s, %(cleanup)s " "exists and is not a directory." % {'directory': directory, 'cleanup': cleanup}, file=sys.stderr) continue pass else: print("Creating cleanup directory: %(cleanup)s" % {'cleanup': cleanup}) else: # At least check if it would give an error if os.path.exists(cleanup) and not os.path.isdir(cleanup): print("ERROR: Could not clean %(directory)s, %(cleanup)s " "exists and is not a directory." % {'directory': directory, 'cleanup': cleanup}, file=sys.stderr) continue for file in os.listdir(directory): if os.path.isdir(os.path.join(directory, file)): if file == '_cleanup': continue ftype = 'directory' folder = 'folders' else: root, ext = os.path.splitext(file) if not ext: ftype = 'extensionless file' folder = 'unsorted' else: ftype = 'file' folder = ext[1:] + 's' newpath = os.path.join(cleanup, folder, file) formatd = {'ftype': ftype, 'file': os.path.join(directory, file), 'newpath': newpath, 'verb':verb} if os.path.exists(newpath): print("WARNING: Could not move %(ftype)s %(file)s to " "%(newpath)s/, file already exists." % formatd, file=sys.stderr) else: print("%(verb)s %(ftype)s %(file)s to %(newpath)s" % formatd, end='') if not dry_run: if interactive: print(" ? [Y/n/q] ", end='') ans = input() if ans.strip().lower() == 'q': print("Exiting...") sys.exit(1) move = ans.lower() == 'y' or ans == '' else: move = True if move: os.renames(os.path.join(directory, file), newpath) else: print("Skipping") if not interactive: print() print("Done") return True # XXX: This is probably not needed because of os.renames def mkdir(path): """ Wrapper to os.mkdir that does nothing if the directory already exists. """ try: os.mkdir(path) except OSError: pass return path if __name__ == '__main__': if main(args): sys.exit(0) else: sys.exit(1)
Python
0.999999
@@ -4401,17 +4401,16 @@ ewpath)s -/ , file a