commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
b74a835bfc5a5686f7b0102a2ed94591e7e9c2cd
Handle unset callback function in the python socket
python/Arcus/__init__.py
python/Arcus/__init__.py
import threading import struct import socket import time ## Raised when an unknown message type is received. class UnknownMessageError(Exception): pass ## Threaded socket communication class. # # class Socket(threading.Thread): InitialState = 1 ConnectingState = 2 ConnectedState = 3 OpeningState = 4 ListeningState = 5 ClosingState = 6 ClosedState = 7 def __init__(self): super().__init__() self._state = self.InitialState self._next_state = self.InitialState self._server_socket = None self._data_socket = None self._message_type = -1 self._message_size = 0 self._partial_message = None self._amount_received = 0 self._send_queue = [] self._send_queue_lock = threading.Lock() self._received_queue = [] self._received_queue_lock = threading.Lock() self._message_types = {} self._message_type_mapping = {} self._stateChangedCallback = None self._messageAvailableCallback = None self._errorCallback = None ## Get the current state of the socket. def getState(self): return self._state def setStateChangedCallback(self, func): self._stateChangedCallback = func def setMessageAvailableCallback(self, func): self._messageAvailableCallback = func def setErrorCallback(self, func): self._errorCallback = func ## Register a message type to handle. def registerMessageType(self, id, type): self._message_types[id] = type self._message_type_mapping[type] = id; ## Listen for connections on a specified address and port. def listen(self, address, port): self._address = (address, port) self._next_state = self.OpeningState self.start() ## Connect to an address of a specified address and port. def connect(self, address, port): self._address = (address, port) self._next_state = self.ConnectingState self.start() ## Close the connection and stop the thread. def close(self): self._next_state = self.ClosingState ## Queue a message to be sent to the other side. def sendMessage(self, message): with self._send_queue_lock: self._send_queue.append(message) ## Take the next available message from the received message queue. # # \return The next available message or False if the queue is empty. def takeNextMessage(self): with self._received_queue_lock: if not self._received_queue: return False return self._received_queue.pop(0) ## Reimplemented from threading.Thread.run() def run(self): while True: if self._state == self.InitialState: time.sleep(0.25) #Prevent uninitialized thread from overloading CPU elif self._state == self.ConnectingState: self._data_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._data_socket.connect(self._address) self._data_socket.settimeout(1.0) self._next_state = self.ConnectedState elif self._state == self.OpeningState: self._server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._server_socket.bind(self._address) self._next_state = self.ListeningState elif self._state == self.ListeningState: print("listening on", self._address) self._server_socket.listen(1) self._data_socket, address = self._server_socket.accept() print("connected on", address) self._data_socket.settimeout(1.0) self._next_state = self.ConnectedState elif self._state == self.ClosingState: self._server_socket.close() self._data_socket.close() self._state = self.ClosedState break; # Exit the infinite loop. elif self._state == self.ConnectedState: self._send_queue_lock.acquire() messages_to_send = [] for message in self._send_queue: messages_to_send.append(self._send_queue.pop(0)) self._send_queue_lock.release() try: for message in messages_to_send: self._sendMessage(message) self._receiveNextMessage() except OSError as e: print(e) if self._next_state != self._state: self._state = self._next_state self._stateChangedCallback() ## private: # def _sendMessage(self, message): self._sendBytes(struct.pack('!i', self._message_type_mapping[type(message)])) self._sendBytes(struct.pack('!i', message.ByteSize())) self._sendBytes(message.SerializeToString()) # Send a byte array across the socket. def _sendBytes(self, data): amount_to_send = len(data) while amount_to_send > 0: try: n = self._data_socket.send(data) amount_to_send -= n except socket.timeout: continue # Try and receive the next message. def _receiveNextMessage(self): # Handle continuation of message receive if self._partial_message: data = self._receiveBytes(self._message_size - self._amount_received) self._partial_message += data self._amount_received += len(data) if self._amount_received >= self._message_size: self._handleMessage(self._partial_message) self._message_size = 0 self._partial_message = None else: return self._message_type = self._receiveUInt32() if self._message_type == -1: return self._message_size = self._receiveUInt32() if self._message_size == -1: return data = self._receiveBytes(self._message_size) if len(data) != self._message_size: self._partial_message = data self._amount_received = len(data) return self._handleMessage(data) # Parse message from a bytearray and put it onto the received messages queue. def _handleMessage(self, data): if not self._message_type in self._message_types: raise UnknownMessageError("Unknown message type {0}".format(self._message_type)) message = self._message_types[self._message_type]() message.ParseFromString(bytes(data)) with self._received_queue_lock: self._received_queue.append(message) self._messageAvailableCallback() # Receive an integer from the socket def _receiveUInt32(self): try: data = self._data_socket.recv(4) if data: return struct.unpack('@i', data)[0] except socket.timeout: pass return -1 # Receive an amount of bytes from the socket and write it into dest. def _receiveBytes(self, maxlen = 0): try: return self._data_socket.recv(maxlen) except socket.timeout: pass
Python
0.000001
@@ -4697,24 +4697,76 @@ _next_state%0A +%0A if self._stateChangedCallback:%0A @@ -4788,32 +4788,43 @@ ChangedCallback( +self._state )%0A%0A ## privat @@ -6868,32 +6868,79 @@ ppend(message)%0A%0A + if self._messageAvailableCallback:%0A self._me
c8baf23086b937a24046fd8dd0b27544dc2aadfb
Add single letter flag option to trap-images
recipes/trap-images.py
recipes/trap-images.py
#!/usr/bin/python """ This main recipe accepts a list of images (through images_to_process.py). Images should be prepared with the correct keywords. """ from __future__ import with_statement import sys import os import datetime from pyrap.quanta import quantity import lofarpipe.support from lofarpipe.support.control import control from lofarpipe.support.utilities import log_time from lofarpipe.support.parset import patched_parset import lofarpipe.support.lofaringredient as ingredient from tkp.database import DataBase from tkp.database import DataSet class TrapImages(control): inputs = { 'dataset_id': ingredient.IntField( '--dataset-id', help='Specify a previous dataset id to append the results to.', default=-1 ), 'monitor_coords': ingredient.ListField( '--monitor-coords', #Unfortunately the ingredient system cannot handle spaces in #parameter fields # I have tried enclosing with quotes, switching to StringField, #still no good. help='Specify a list of RA,DEC co-ordinates to monitor\n' '(decimal degrees, no spaces), e.g.:\n' '--monitor-coords=[137.01,14.02,137.05,15.01]', default=[] ), } def pipeline_logic(self): from images_to_process import images # Create the dataset database = DataBase() dataset = self.initialise_dataset(database) self.add_manual_monitoringlist_entries(dataset) with log_time(self.logger): if len(images): self.logger.info("Processing images ...") outputs = self.run_task("source_extraction", images, dataset_id=dataset.id, # nproc = self.config.get('DEFAULT', 'default_nproc') nproc=1 #Force nproc =1 until issue #3357 is fixed. ) outputs.update( self.run_task("monitoringlist", [dataset.id], nproc=1 #Force nproc =1 until certain concurrent database access OK. ) ) outputs.update( self.run_task("transient_search", [dataset.id], image_ids=outputs['image_ids'])) #FIXME: These need updating too... *sigh* # outputs.update( # self.run_task("feature_extraction", outputs['transients'])) # # outputs.update( # self.run_task("classification", outputs['transients'])) # self.run_task("prettyprint", outputs['transients']) else: self.logger.warn("No images found, check parameter files.") dataset.process_ts = datetime.datetime.utcnow() database.close() def initialise_dataset(self, database): """Either inits a fresh dataset, or grabs the dataset specified at command line""" if self.inputs['dataset_id'] == -1: dataset = DataSet(data={'description': self.inputs['job_name']}, database=database) else: dataset = DataSet(id = self.inputs['dataset_id'], database=database) self.logger.info("Appending results to previously entered dataset") self.logger.info("dataset id = %d", dataset.id) return dataset def add_manual_monitoringlist_entries(self, dataset): """Parses co-ords from self.inputs, loads them into the monitoringlist""" mon_coords = self.parse_monitoringlist_coords() for c in mon_coords: dataset.add_manual_entry_to_monitoringlist(c[0],c[1]) def parse_monitoringlist_coords(self): """Returns a list of coord 2-tuples, format is: [(RA,DEC)] """ if len(self.inputs['monitor_coords']): raw_monitor_list = self.inputs['monitor_coords'] if len(raw_monitor_list)%2 != 0: raise ValueError("Odd number of monitor co-ordinates supplied: " "please supply RA,DEC pairs *with commas but no spaces*.") ra_list = raw_monitor_list[0::2] dec_list = raw_monitor_list[1::2] monitor_coords = zip(ra_list, dec_list) print "You specified monitoring at coords:" for i in monitor_coords: print "RA,", i[0]," ; Dec, " , i[1] return monitor_coords return [] if __name__ == '__main__': sys.exit(TrapImages().main())
Python
0.000001
@@ -838,32 +838,38 @@ eld(%0A + '-m', '--monitor-coor
3bce3110f145789818f5a85328263f55c09e0ebc
print stack trace in registering extensions
glim/app.py
glim/app.py
""" This module is responsible for instantiating a typical glim framework app. It registers glim framework components, extensions and wsgi app. """ # application initiation script import os import sys import traceback from glim import Config, Log, GlimLog from glim.utils import import_module, empty from glim.dispatch import Glim import glim.paths as paths from werkzeug.serving import run_simple from werkzeug.wsgi import SharedDataMiddleware from termcolor import colored class App: """ This class is responsible for registering the components of a typical glim framework app Attributes ---------- commandadapter (glim.command.CommandAdapter): The commandadapter object which is responsible for dispatching commands env (string): application environment variable passed from command line mconfig (module): The configuration module imported from app.config.<env> config (dict): The configuration dictionary by environment which resides in app.config.<env> before (method): The before hook function for registering a function before app starts """ def __init__(self, commandadapter, mconfig=None, env='default', before=None): self.commandadapter = commandadapter self.config = mconfig.config self.register_config() self.register_log() self.register_extensions() self.before = before def register_config(self): """Function registers the Config facade using Config(Registry).""" Config.register(self.config) def register_extensions(self): """ Function registers extensions given extensions list Args ---- extensions (list) : the extensions dict on app.config.<env> Raises ------ Exception: Raises exception when extension can't be loaded properly. """ try: for extension, config in self.config['extensions'].items(): extension_bstr = '' # gather package name if exists extension_pieces = extension.split('.') # if the extensions is not in glim_extensions package if len(extension_pieces) > 1: extension_bstr = '.'.join(extension_pieces) else: # if the extension is in glim_extensions package extension_bstr = 'glim_extensions.%s' % extension_pieces[0] extension_module = import_module(extension_bstr) if extension_module: extension_startstr = '%s.%s' % (extension_bstr, 'start') extension_start = import_module(extension_startstr, pass_errors=True) extension_cmdsstr = '%s.%s' % (extension_bstr, 'commands') extension_cmds = import_module(extension_cmdsstr, pass_errors=True) if extension_start is not None: before = extension_start.before before(config) if extension_cmds is not None: self.commandadapter.register_extension(extension_cmds, extension_pieces[0]) else: GlimLog.error('Extension %s could not be loaded' % extension) except Exception as e: GlimLog.error(e) def register_log(self): """ Function registers Log facade using configuration in app.config.<env>. Note: The Log facade will be registered using default configuration if there isn't any 'log' key in app.config.<env>. """ if not empty('log', self.config): if not empty('glim', self.config['log']): GlimLog.boot(name='glim', config=self.config['log']['glim']) else: GlimLog.boot(name='glim') if not empty('app', self.config['log']): Log.boot(name='app', config=self.config['log']['app']) else: Log.boot(name='app') else: Log.boot(name='app') GlimLog.boot(name='glim') def start(self, host='127.0.0.1', port='8080', env='development'): """ Function initiates a werkzeug wsgi app using app.routes module. Note: Function will register a static path for css, js, img, etc. files using SharedDataMiddleware, else it won't register any static script path. Args ---- host (string): the host ip address to start the web server port (string): the port of ip address env (string): the application environment Raises ------ Exception: Raises any exception coming from werkzeug's web server """ try: self.before() mroutes = import_module('app.routes') app = Glim(mroutes.urls, self.config['app']) if 'assets' in self.config['app']: app.wsgi_app = SharedDataMiddleware(app.wsgi_app, { self.config['app']['assets']['url']: self.config['app']['assets']['path'] }) run_simple(host, int(port), app, use_debugger=self.config['app']['debugger'], use_reloader=self.config['app']['reloader']) except Exception as e: print(traceback.format_exc()) exit()
Python
0
@@ -3397,17 +3397,38 @@ g.error( -e +traceback.format_exc() )%0A%0A d
83bd72110a39f5b5909f50ac3529c09dc9b54cd7
Update rng.py
cogs/rng.py
cogs/rng.py
from discord.ext import commands import random as rng import copy class RNG: """Utilities that provide pseudo-RNG.""" el_fractions=['ходоки', 'лорды', 'некрофаги', 'маги', 'хранители', 'драккны', 'забитые', 'кланы'] def __init__(self, bot): self.bot = bot self.el_pull = copy.copy(RNG.el_fractions) self.ban.aliases += RNG.el_fractions @commands.command() async def random(self, minimum=0, maximum=100): """Выбрать случайное число в заданном диапазоне. Минимум должен быть меньше максимума, а максимум — меньше 1000. """ maximum = min(maximum, 1000) if minimum >= maximum: await self.bot.say('Максимум меньше минимума.') return await self.bot.say(rng.randint(minimum, maximum)) #@random.command() #async def lenny(self): # """Displays a random lenny face.""" # lenny = rng.choice([ # "( ͡° ͜ʖ ͡°)", "( ͠° ͟ʖ ͡°)", "ᕦ( ͡° ͜ʖ ͡°)ᕤ", "( ͡~ ͜ʖ ͡°)", # "( ͡o ͜ʖ ͡o)", "͡(° ͜ʖ ͡ -)", "( ͡͡ ° ͜ ʖ ͡ °)", "(ง ͠° ͟ل͜ ͡°)ง", # "ヽ༼ຈل͜ຈ༽ノ" # ]) # await self.bot.say(lenny) @commands.group(pass_context=True, aliases=['ел']) async def el(self, ctxб *, players_count : int = 0): """Выбор фракции в Endless Legend. Здесь был Vinyl. """ if ctx.invoked_subcommand is None: if players_count is 0: str_answer = '' for idx, fract in enumerate(self.el_pull, 1): str_answer += '{}. {}\n'.format(idx, fract) await self.bot.say(str_answer) else: ctx.invoke(self.roll, players_count) @el.command(pass_context=True, aliases=['репул']) async def repull(self, ctx): self.el_pull = copy.copy(RNG.el_fractions) ctx.invoke(self.el) @el.command(pass_context=True, aliases=[], hidden=True) async def ban(self, ctx): if ctx.invoked_with in self.el_pull: self.el_pull.remove(fraction) ctx.invoke(self.el) else: await self.bot.say('Нет такой фракции.') @el.command(pass_context=True, aliases=['ролл', 'я создал', 'выбор']) async def roll(self, ctx, *, count : int): choice = rng.sample(self.el_pull, count) str_answer = '' for idx, fract in enumerate(choice, 1): str_answer += '{}. {}\n'.format(idx, fract) await self.bot.say(str_answer) @commands.command(aliases=['выбери', 'вибери']) async def choose(self, *, choices : str): """Есть два стула... Варианты должны быть разделены с помощью `or` или `или` """ choices_list = list() for choice in choices.split('or'): choices_list += choice.split('или') if len(choices_list) < 2: await self.bot.say('Шо то хуйня, шо это хуйня.') else: await self.bot.say(rng.choice(choices_list).lstrip()) def setup(bot): bot.add_cog(RNG(bot))
Python
0.000001
@@ -1230,17 +1230,17 @@ elf, ctx -%D0%B1 +, *, play
58a69bf2dd93027f083fe54721847c438f861f10
Fix import of new data after rebase
statsmodels/datasets/statecrime/data.py
statsmodels/datasets/statecrime/data.py
#! /usr/bin/env python """Statewide Crime Data""" __docformat__ = 'restructuredtext' COPYRIGHT = """Public domain.""" TITLE = """Statewide Crime Data 2009""" SOURCE = """ All data is for 2009 and was obtained from the American Statistical Abstracts except as indicated below. """ DESCRSHORT = """State crime data 2009""" DESCRLONG = DESCRSHORT #suggested notes NOTE = """ Number of observations: 51 Number of variables: 8 Variable name definitions: state All 50 states plus DC. violent Rate of violent crimes / 100,000 population. Includes murder, forcible rape, robbery, and aggravated assault. Numbers for Illinois and Minnesota do not include forcible rapes. Footnote included with the American Statistical Abstract table reads: "The data collection methodology for the offense of forcible rape used by the Illinois and the Minnesota state Uniform Crime Reporting (UCR) Programs (with the exception of Rockford, Illinois, and Minneapolis and St. Paul, Minnesota) does not comply with national UCR guidelines. Consequently, their state figures for forcible rape and violent crime (of which forcible rape is a part) are not published in this table." murder Rate of murders / 100,000 population. hs_grad Precent of population having graduated from high school or higher. poverty % of individuals below the poverty line white Percent of population that is one race - white only. From 2009 American Community Survey single Calculated from 2009 1-year American Community Survey obtained obtained from Census. Variable is Male householder, no wife present, family household combined with Female household, no husband prsent, family household, divided by the total number of Family households. urban % of population in Urbanized Areas as of 2010 Census. Urbanized Areas are area of 50,000 or more people.""" import numpy as np from statsmodels.tools import datautils as du from os.path import dirname, abspath def load(): """ Load the statecrime data and return a Dataset class instance. Returns ------- Dataset instance: See DATASET_PROPOSAL.txt for more information. """ data = _get_data() ##### SET THE INDICES ##### #NOTE: None for exog_idx is the complement of endog_idx return du.process_recarray(data, endog_idx=2, exog_idx=[7, 4, 3, 5], dtype=float) def load_pandas(): data = _get_data() ##### SET THE INDICES ##### #NOTE: None for exog_idx is the complement of endog_idx return du.process_recarray_pandas(data, endog_idx=2, exog_idx=[7,4,3,5], dtype=float, index_idx=0) def _get_data(): filepath = dirname(abspath(__file__)) ##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv ##### data = np.recfromtxt(open(filepath + '/statecrime.csv', 'rb'), delimiter=",", names=True, dtype=None) return data
Python
0
@@ -1955,12 +1955,15 @@ els. -tool +dataset s im @@ -1967,20 +1967,16 @@ import -data utils as
ea92aeed4dc606def49df643cadc696fec6452b3
fix docstring (again)
python/mapbox_geocode.py
python/mapbox_geocode.py
import __future__ import os, sys, json try: # python 3 from urllib.request import urlopen as urlopen from urllib.parse import quote_plus as quote_plus except: # python 2 from urllib import quote_plus as quote_plus from urllib2 import urlopen as urlopen def geocode(mapbox_access_token, query): """ Submit a geocoding query to Mapbox's permanent geocoding endpoint. Args: mapbox_access_token (str): valid Mapbox access token with geocoding permissions query (str): input text to geocode """ resp = urlopen('https://api.tiles.mapbox.com/v4/geocode/mapbox.places/{query}.json?access_token={token}'.format(query=quote_plus(query), token=mapbox_access_token)) return json.loads(resp.read().decode('utf-8')) if __name__ == '__main__': token = os.environ.get('MapboxAccessToken', False) if not token: print('environment variable MapboxAccessToken must be set') sys.exit(1) # geocode result = geocode(token, sys.argv[1]) # print result print(json.dumps(result, indent=2))
Python
0
@@ -365,36 +365,16 @@ x's -permanent geocoding endpoint +geocoder .%0A
fa704d7eebb879dc739446c1cc811dc4cd505ae9
Update documentation for mx.callback.Speedometer. (#6058)
python/mxnet/callback.py
python/mxnet/callback.py
# coding: utf-8 """Callback functions that can be used to track various status during epoch.""" from __future__ import absolute_import import logging import math import sys import time from .model import save_checkpoint def module_checkpoint(mod, prefix, period=1, save_optimizer_states=False): """Callback to checkpoint Module to prefix every epoch. Parameters ---------- mod : subclass of BaseModule The module to checkpoint. prefix : str The file prefix for this checkpoint. period : int How many epochs to wait before checkpointing. Defaults to 1. save_optimizer_states : bool Indicates whether or not to save optimizer states for continued training. Returns ------- callback : function The callback function that can be passed as iter_end_callback to fit. """ period = int(max(1, period)) # pylint: disable=unused-argument def _callback(iter_no, sym=None, arg=None, aux=None): """The checkpoint function.""" if (iter_no + 1) % period == 0: mod.save_checkpoint(prefix, iter_no + 1, save_optimizer_states) return _callback def do_checkpoint(prefix, period=1): """Callback to checkpoint the model to prefix every epoch. Parameters ---------- prefix : str The file prefix for this checkpoint. period : int How many epochs to wait before checkpointing. Defaults to 1. Returns ------- callback : function The callback function that can be passed as ``iter_end_callback`` to fit. """ period = int(max(1, period)) def _callback(iter_no, sym, arg, aux): """The checkpoint function.""" if (iter_no + 1) % period == 0: save_checkpoint(prefix, iter_no + 1, sym, arg, aux) return _callback def log_train_metric(period, auto_reset=False): """Callback to log the training evaluation result every period. Parameters ---------- period : int The number of batch to log the training evaluation metric. auto_reset : bool Reset the metric after each log. Returns ------- callback : function The callback function that can be passed as iter_epoch_callback to fit. """ def _callback(param): """The checkpoint function.""" if param.nbatch % period == 0 and param.eval_metric is not None: name_value = param.eval_metric.get_name_value() for name, value in name_value: logging.info('Iter[%d] Batch[%d] Train-%s=%f', param.epoch, param.nbatch, name, value) if auto_reset: param.eval_metric.reset() return _callback class Speedometer(object): """Calculate and log training speed periodically. Parameters ---------- batch_size: int batch_size of data. frequent: int How many batches between calculations. Defaults to calculating & logging every 50 batches. auto_reset : bool Reset the metric after each log. """ def __init__(self, batch_size, frequent=50, auto_reset=True): self.batch_size = batch_size self.frequent = frequent self.init = False self.tic = 0 self.last_count = 0 self.auto_reset = auto_reset def __call__(self, param): """Callback to Show speed.""" count = param.nbatch if self.last_count > count: self.init = False self.last_count = count if self.init: if count % self.frequent == 0: speed = self.frequent * self.batch_size / (time.time() - self.tic) if param.eval_metric is not None: name_value = param.eval_metric.get_name_value() if self.auto_reset: param.eval_metric.reset() for name, value in name_value: logging.info('Epoch[%d] Batch [%d]\tSpeed: %.2f samples/sec\tTrain-%s=%f', param.epoch, count, speed, name, value) else: logging.info("Iter[%d] Batch [%d]\tSpeed: %.2f samples/sec", param.epoch, count, speed) self.tic = time.time() else: self.init = True self.tic = time.time() class ProgressBar(object): """Show a progress bar. Parameters ---------- total: int total batch size length: int length or progress bar """ def __init__(self, total, length=80): self.bar_len = length self.total = total def __call__(self, param): """Callback to Show progress bar.""" count = param.nbatch filled_len = int(round(self.bar_len * count / float(self.total))) percents = math.ceil(100.0 * count / float(self.total)) prog_bar = '=' * filled_len + '-' * (self.bar_len - filled_len) sys.stdout.write('[%s] %s%s\r' % (prog_bar, percents, '%')) class LogValidationMetricsCallback(object): """Just logs the eval metrics at the end of an epoch.""" def __call__(self, param): if not param.eval_metric: return name_value = param.eval_metric.get_name_value() for name, value in name_value: logging.info('Epoch[%d] Validation-%s=%f', param.epoch, name, value)
Python
0
@@ -2744,25 +2744,12 @@ %22%22%22 -Calculate and l +L og +s tra @@ -2760,16 +2760,39 @@ g speed +and evaluation metrics periodic @@ -2852,30 +2852,30 @@ int%0A -b +B atch -_ + size of data @@ -2906,88 +2906,126 @@ -How many batches between calculations.%0A Defaults to calculating & logging +Specifies how frequently training speed and evaluation metrics%0A must be logged. Default behavior is to log once eve @@ -3071,38 +3071,50 @@ Reset the +evaluation metric +s after each log. @@ -3113,16 +3113,478 @@ ach log. +%0A%0A Example:%0A --------%0A %3E%3E%3E # Print training speed and evaluation metrics every ten batches. Batch size is one.%0A ...%0A %3E%3E%3E module.fit(iterator, num_epoch=n_epoch,%0A ... batch_end_callback=mx.callback.Speedometer(1, 10))%0A Epoch%5B0%5D Batch %5B10%5D Speed: 1910.41 samples/sec Train-accuracy=0.200000%0A Epoch%5B0%5D Batch %5B20%5D Speed: 1764.83 samples/sec Train-accuracy=0.400000%0A Epoch%5B0%5D Batch %5B30%5D Speed: 1740.59 samples/sec Train-accuracy=0.500000 %0A %22%22%22
d6ffb7c91d3cfd9b9e0caeec41921ec3ddce6efa
rewrite custom command for django 1.10 compatibility
students/management/commands/stcount.py
students/management/commands/stcount.py
from django.core.management.base import BaseCommand from django.contrib.auth.models import User from students.models import Student, Group class Command(BaseCommand): args = '<model_name model_name ...>' help = 'Prints to console number of students related in database.' models = (('student', Student), ('group', Group), ('user', User)) def handle(self, *args, **options): for name, model in self.models: if name in args: self.stdout.write('Number of {}s in database: {:d}'.format(name, model.objects.count()))
Python
0.000001
@@ -167,49 +167,8 @@ d):%0A - args = '%3Cmodel_name model_name ...%3E'%0A @@ -306,16 +306,102 @@ User))%0A%0A + def add_arguments(self, parser):%0A parser.add_argument('model', nargs='+')%0A%0A def @@ -495,20 +495,32 @@ name in -args +options%5B'model'%5D :%0A
42ad2c26368dfaa19efcc5ea57902857aae3e2cf
fix horizon metrics
src/horizon/protocols.py
src/horizon/protocols.py
from twisted.internet.error import ConnectionDone from twisted.internet.protocol import DatagramProtocol, ServerFactory from twisted.protocols.basic import LineOnlyReceiver, Int32StringReceiver from twisted.python import log from utils import SafeUnpickler from cache import MetricCache from regexlist import WhiteList, BlackList def emit(metric, value): log.msg(metric + " " + value) class MetricReceiver: """ Base class for all metric receiving protocols, handles flow control events and connection state logging. """ def connectionMade(self): self.peerName = self.transport.getPeer() log.msg("%s connection with %s established" % (self.__class__.__name__, self.peerName)) def connectionLost(self, reason): if reason.check(ConnectionDone): log.msg("%s connection with %s closed cleanly" % (self.__class__.__name__, self.peerName)) else: log.msg("%s connection with %s lost: %s" % (self.__class__.__name__, self.peerName, reason.value)) def metricReceived(self, metric, datapoint): if BlackList and metric in BlackList: emit('blacklistMatches', metric) return if WhiteList and metric not in WhiteList: emit('whiteListRejects ', metric) return MetricCache.store(metric, datapoint) class MetricLineReceiver(MetricReceiver, LineOnlyReceiver): delimiter = '\n' def lineReceived(self, line): try: metric, value, timestamp = line.strip().split() self.metricReceived(metric, (float(timestamp), float(value))) except: log.msg('invalid line (%s) received from client %s, ignoring' % (line.strip(), self.peerName)) class MetricPickleReceiver(MetricReceiver, Int32StringReceiver): MAX_LENGTH = 2 ** 20 def connectionMade(self): MetricReceiver.connectionMade(self) ##Use the safe unpickler that comes with carbon rather than standard python pickle/cpickle self.unpickler = SafeUnpickler def stringReceived(self, data): try: datapoints = self.unpickler.loads(data) except: log.msg('invalid pickle received from %s, ignoring' % self.peerName) return for (metric, datapoint) in datapoints: try: datapoint = ( float(datapoint[0]), float(datapoint[1]) ) #force proper types except: continue self.metricReceived(metric, datapoint) class MetricDatagramReceiver(MetricReceiver, DatagramProtocol): def datagramReceived(self, data, (host, port)): for line in data.splitlines(): try: metric, value, timestamp = line.strip().split() self.metricReceived(metric, (float(timestamp), float(value))) except: log.msg('invalid line (%s) received from %s, ignoring' % (line, host)) class MetricLineFactory(ServerFactory): protocol = MetricLineReceiver class MetricPickleFactory(ServerFactory): protocol = MetricPickleReceiver
Python
0.000022
@@ -1079,16 +1079,32 @@ emit(' +skyline.horizon. blacklis @@ -1193,16 +1193,32 @@ emit(' +skyline.horizon. whiteLis @@ -1225,17 +1225,16 @@ tRejects - ', metri
454c7d322af3328279582aef629736b92c87e869
Revert "It seems the mechanism to declare a namespace package changed."
backports/__init__.py
backports/__init__.py
# This file is part of a backport of 'lzma' included with Python 3.3, # exposed under the namespace of backports.lzma following the conventions # laid down here: http://pypi.python.org/pypi/backports/1.0 # Backports homepage: http://bitbucket.org/brandon/backports # A Python "namespace package" http://www.python.org/dev/peps/pep-0382/ # This always goes inside of a namespace package's __init__.py try: import pkg_resources pkg_resources.declare_namespace(__name__) except ImportError: import pkgutil __path__ = pkgutil.extend_path(__path__, __name__)
Python
0
@@ -399,127 +399,40 @@ py%0A%0A -try:%0A import pkg_resources%0A pkg_resources.declare_namespace(__name__)%0Aexcept ImportError:%0A import pkgutil%0A +from pkgutil import extend_path%0A __pa @@ -442,16 +442,8 @@ _ = -pkgutil. exte
d1be7f345529594ba25ed5d0f22e544735a64404
Add a custom admin site header.
qubs_data_centre/urls.py
qubs_data_centre/urls.py
from django.conf.urls import url, include from django.contrib import admin urlpatterns = [ url(r'^api/', include('api.urls')), url(r'^admin/', admin.site.urls), ]
Python
0
@@ -69,16 +69,68 @@ admin%0A%0A +%0Aadmin.site.site_header = 'QUBS Data Centre Admin'%0A%0A urlpatte
914f95b8acc84828c8a5aea1138415542b066a62
switch order
web3/urls.py
web3/urls.py
"""web3 URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.10/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url, include from django.contrib import admin from django.conf import settings from .apps.authentication import views as auth_views from .apps.users import views as user_views from .apps.error.views import (handle_404_view, handle_500_view, handle_503_view) urlpatterns = [ url('', include('social.apps.django_app.urls', namespace='social')), url(r'^$', auth_views.index_view, name='index'), url('^about$', auth_views.about_view, name='about'), url(r'^login/$', auth_views.login_view, name='login'), url(r'^login/superuser/$', auth_views.login_view, name='login_superuser'), url(r'^logout/$', auth_views.logout_view, name='logout'), url(r'^wsauth$', auth_views.node_auth_view, name='node_auth'), url(r"^user/", include("web3.apps.users.urls")), url(r"^site/", include("web3.apps.sites.urls")), url(r'^vm/', include("web3.apps.vms.urls")), url(r'^admin/', admin.site.urls), url(r'^github_oauth/$', user_views.github_oauth_view) ] if settings.DEBUG: import debug_toolbar urlpatterns += [ url(r'^__debug__/', include(debug_toolbar.urls)), ] handler404 = handle_404_view handler500 = handle_500_view handler503 = handle_503_view
Python
0.000005
@@ -1137,16 +1137,26 @@ '%5Elogin/ +superuser/ $', auth @@ -1185,16 +1185,26 @@ e='login +_superuser '),%0A @@ -1212,34 +1212,24 @@ rl(r'%5Elogin/ -superuser/ $', auth_vie @@ -1250,34 +1250,24 @@ name='login -_superuser '),%0A url(
7b6a2a9f4e24f50e1f8aaee482d4ccec4294161b
Update vso.py
tasks/vso.py
tasks/vso.py
__author__ = 'sachinpatney' import json import base64 from urllib.request import urlopen from urllib.request import Request from common import ITask from common import BuildNotifier from common import sync_read_status_file from common import Timeline from common import safe_read_dictionary from common import Icons from common import IconBackgrounds class VSO_API_Templates: getBuilds = "https://{0}.visualstudio.com/defaultcollection/{1}/_apis/build/builds?api-version={2}" class VSO(ITask): def get_auth(self): d = sync_read_status_file() return [ safe_read_dictionary(d, 'vso_username'), safe_read_dictionary(d, 'vso_password') ] def is_broken(self, build): print(build['status'].lower()) if build['status'] == 'failed': return True if build['status'].lower() == 'partiallysucceeded': return True return False def get_broken_builds(self, data): broken_builds = [] print(data['value']) for build in data['value']: if self.is_broken(build): if build['definition']['name'] == 'CI': broken_builds.append(build) else: # We only want broken builds after last success break return broken_builds def get_build_info(self): request = Request(VSO_API_Templates.getBuilds.format('pbix', 'powerbiclients', '1.0')) auth = self.get_auth() username_password = base64.b64encode(("%s:%s" % (auth[0], auth[1])).encode('utf-8')).decode("ascii") request.add_header("Authorization", "Basic %s" % username_password) result = urlopen(request) response = result.read().decode('ascii') return json.loads(response) def __run__(self, time): broken = self.get_broken_builds(self.get_build_info()) if len(broken) == 0: if BuildNotifier.build_was_broken(): BuildNotifier.update_build_status(False) BuildNotifier.notify_all_clear() Timeline.add_item_from_bot('BUILD BREAK FIXED', 'Thank you for taking care of it', '', Icons.Wrench, IconBackgrounds.Green) print('Sent all clear notification') else: print('Was not broken previously too, so do nothing new') else: if not BuildNotifier.build_was_broken(): culprits = [] for b in broken: culprits.append(b['requests'][0]['requestedFor']) BuildNotifier.notify_build_break(culprits) BuildNotifier.update_build_status(True) Timeline.add_item_from_bot('BUILD BREAK', '{0} broke the build. Change was requested by {1}'.format( broken[len(broken) - 1]['buildNumber'], broken[len(broken) - 1]['requests'][0]['requestedFor']['displayName']), '', Icons.Ambulance, IconBackgrounds.Red) print('Sent build break notification') else: print('Was broken previously too, so do nothing')
Python
0.000001
@@ -727,47 +727,8 @@ d):%0A - print(build%5B'status'%5D.lower())%0A @@ -976,22 +976,29 @@ -print( +for build in data%5B'va @@ -1002,26 +1002,27 @@ 'value'%5D -)%0A +:%0A %0A @@ -1005,32 +1005,57 @@ lue'%5D:%0A + print(build%5B'status'%5D) %0A for bui
242c739aa93604e6a19e115598d5c87a5cfe513c
remove unicode py2
mpcontribs-utils/mpcontribs/io/archieml/mpfile.py
mpcontribs-utils/mpcontribs/io/archieml/mpfile.py
from __future__ import unicode_literals, print_function import archieml, textwrap from mpcontribs.config import mp_level01_titles, symprec, replacements from mpcontribs.io.core.mpfile import MPFileCore from mpcontribs.io.core.recdict import RecursiveDict, Quantity from mpcontribs.io.core.utils import nest_dict, normalize_root_level from mpcontribs.io.core.utils import read_csv, make_pair from mpcontribs.io.core.components.tdata import Table from pandas import MultiIndex class MPFile(MPFileCore): """Object for representing a MP Contribution File in ArchieML format.""" @staticmethod def from_string(data): # use archieml-python parse to import data rdct = RecursiveDict(archieml.loads(data)) rdct.rec_update() # post-process internal representation of file contents for key in list(rdct.keys()): is_general, root_key = normalize_root_level(key) if is_general: # make part of shared (meta-)data, i.e. nest under `general` at # the beginning of the MPFile if mp_level01_titles[0] not in rdct: rdct[mp_level01_titles[0]] = RecursiveDict() rdct.move_to_end(mp_level01_titles[0], last=False) # normalize identifier key (pop & insert) # using rec_update since we're looping over all entries # also: support data in bare tables (marked-up only by # root-level identifier) by nesting under 'data' value = rdct.pop(key) keys = [mp_level01_titles[0]] if is_general else [] keys.append(root_key) if isinstance(value, list): keys.append('table') rdct.rec_update(nest_dict(value, keys)) # reference to section to iterate or parse as CIF section = rdct[mp_level01_titles[0]][root_key] \ if is_general else rdct[root_key] # iterate to find CSV sections to parse # also parse propnet quantities if isinstance(section, dict): scope = [] for k, v in section.iterate(): level, key = k key = ''.join([replacements.get(c, c) for c in key]) level_reduction = bool(level < len(scope)) if level_reduction: del scope[level:] if v is None: scope.append(key) elif isinstance(v, list) and isinstance(v[0], dict): table = '' for row_dct in v: table = '\n'.join([table, row_dct['value']]) pd_obj = read_csv(table) d = nest_dict(pd_obj.to_dict(), scope + [key]) section.rec_update(d, overwrite=True) if not is_general and level == 0: section.insert_default_plot_options(pd_obj, key) elif Quantity is not None and isinstance(v, six.string_types) and ' ' in v: quantity = Quantity.from_key_value(key, v) d = nest_dict(quantity.as_dict(), scope + [key]) # TODO quantity.symbol.name section.rec_update(d, overwrite=True) # convert CIF strings into pymatgen structures if mp_level01_titles[3] in section: from pymatgen.io.cif import CifParser for name in section[mp_level01_titles[3]].keys(): cif = section[mp_level01_titles[3]].pop(name) parser = CifParser.from_string(cif) structure = parser.get_structures(primitive=False)[0] section[mp_level01_titles[3]].rec_update(nest_dict( structure.as_dict(), [name] )) return MPFile.from_dict(rdct) def get_string(self, df_head_only=False): from pymatgen import Structure lines, scope = [], [] for key,value in self.document.iterate(): if isinstance(value, Table): lines[-1] = lines[-1].replace('{', '[+').replace('}', ']') header = any([isinstance(col, str) for col in value]) if isinstance(value.index, MultiIndex): value.reset_index(inplace=True) if df_head_only: value = value.head() csv_string = value.to_csv( index=False, header=header, float_format='%g', encoding='utf-8' )[:-1] lines += csv_string.split('\n') if df_head_only: lines.append('...') elif isinstance(value, Structure): from pymatgen.io.cif import CifWriter cif = CifWriter(value, symprec=symprec).__str__() lines.append(make_pair( ''.join([replacements.get(c, c) for c in key]), cif+':end' )) elif Quantity is not None and isinstance(value, Quantity): lines.append(make_pair( value.display_symbols[0], value.pretty_string() )) else: level, key = key # truncate scope level_reduction = bool(level < len(scope)) if level_reduction: del scope[level:] # append scope if value is None: scope.append(''.join([ replacements.get(c, c) for c in key ])) # correct scope to omit internal 'general' section scope_corr = scope if scope[0] == mp_level01_titles[0]: scope_corr = scope[1:] # insert scope line if (value is None and scope_corr) or \ (value is not None and level_reduction): lines.append('\n{' + '.'.join(scope_corr) + '}') # insert key-value line if value is not None: val = unicode(value) if not isinstance(value, str) else value value_lines = [val] if val.startswith('http') \ else textwrap.wrap(val) if len(value_lines) > 1: value_lines = [''] + value_lines + [':end'] lines.append(make_pair( ''.join([replacements.get(c, c) for c in key]), '\n'.join(value_lines) )) return '\n'.join(lines) + '\n' MPFileCore.register(MPFile)
Python
0.99863
@@ -6211,63 +6211,18 @@ l = -unicode(value) if not isinstance(value, str) else value +str(value) %0A
b8af2bcac83686a169deb631a466a75086a45904
fix format
webserver.py
webserver.py
#!/usr/bin/env python """ Raspberry Camera Web Server main file. """ #------------------------------------------------------------------------------ import web import config import json import multiprocessing import time import shutil import os.path import cv2 try: import picamera found_picamera = True except ImportError: found_picamera = False #------------------------------------------------------------------------------ DEF_BRIGHTNESS = 50 DEF_CONTRAST = 0 #------------------------------------------------------------------------------ URLS = ( '/', 'Main', '/lastimage.jpg', 'LastImage', '/filterimage.jpg', 'FilterImage', '/ajax/camera', 'AjaxCamera', '/ajax/filter', 'AjaxFilter', ) WEB_ENV = {'version': config.VERSION, 'camera_name': config.CAMERA_NAME} TMPLS = web.template.render('templates', globals=WEB_ENV) web.config.debug = config.WEB_DEBUG camera_sleep = config.CAMERA_SLEEP face_cascade = cv2.CascadeClassifier('hc_ff.xml') eye_cascade = cv2.CascadeClassifier('hc_eye.xml') #------------------------------------------------------------------------------ def getInt(string_value, default_value=0): try: int_value = int(string_value) return int_value except ValueError: return default_value #------------------------------------------------------------------------------ class Main(object): """Class to Handle root urls.""" def GET(self): """http GET response method.""" return TMPLS.main() #------------------------------------------------------------------------------ class LastImage(object): """Class to handle image queries.""" def GET(self): """http GET response method.""" web.header("Content-Type", "images/jpeg") return open("lastimage.jpg", "rb").read() #------------------------------------------------------------------------------ class FilterImage(object): """Class to handle image queries.""" def GET(self): """http GET response method.""" web.header("Content-Type", "images/jpeg") return open("filterimage.jpg", "rb").read() #------------------------------------------------------------------------------ class AjaxCamera(object): """Class to handle camera ajax queries.""" def GET(self): """http GET response method.""" web.header('Content-Type', 'application/json') cam_config = web.camera_config params = {} params.update(cam_config) return json.dumps({'ok': True, 'params': params}) def PUT(self): """http PUT response method.""" web.header('Content-Type', 'application/json') params = web.input() cam_config = web.camera_config if 'brightness' in params: old_val = cam_config['brightness'] cam_config['brightness'] = getInt(params['brightness'], old_val) if 'contrast' in params: old_val = cam_config['contrast'] cam_config['contrast'] = getInt(params['contrast'], old_val) if 'hflip' in params and params.hflip: cam_config['hflip'] = not cam_config.get('hflip', False) if 'vflip' in params and params.vflip: cam_config['vflip'] = not cam_config.get('vflip', False) return json.dumps({'ok': True}) #------------------------------------------------------------------------------ class AjaxFilter(object): """Class to handle camera ajax filter queries.""" def POST(self): """http POST response method.""" web.header('Content-Type', 'application/json') params = web.input() print params if 'filter_function' in params: if params['filter_function'] == 'edges': img = cv2.imread('lastimage.jpg') edges = cv2.Canny(img, 100, 100 ) cv2.imwrite('filterimage.jpg', edges) elif params['filter_function'] == 'laplacian': img = cv2.imread('lastimage.jpg') laplacian = cv2.Laplacian(img, cv2.CV_64F) cv2.imwrite('filterimage.jpg', laplacian) elif params['filter_function'] == 'faces': img = cv2.imread('lastimage.jpg') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.3, 5) for (x,y,w,h) in faces: cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2) roi_gray = gray[y:y+h, x:x+w] roi_color = img[y:y+h, x:x+w] eyes = eye_cascade.detectMultiScale(roi_gray) for (ex,ey,ew,eh) in eyes: cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) cv2.imwrite('filterimage.jpg', img) return json.dumps({'ok': True}) #------------------------------------------------------------------------------ def camera_loop(camera_config): """Camera update loop.""" camera = None if found_picamera: camera = picamera.PiCamera() while 1 == 1: if camera: camera.brightness = camera_config.get('brightness', DEF_BRIGHTNESS) camera.contrast = camera_config.get('contrast', DEF_CONTRAST) camera.hflip = camera_config.get('hflip', False) camera.vflip = camera_config.get('vflip', False) camera.capture('lastimage0.jpg') shutil.copy('lastimage0.jpg', 'lastimage.jpg') else: print "camera: ", camera_config time.sleep(camera_sleep) #------------------------------------------------------------------------------ def main(): """Main function.""" if found_picamera: print "picamera founded" else: print "picamera not founded" if not os.path.isfile("lastimage.jpg"): shutil.copy("empty.jpg", "lastimage.jpg"); if not os.path.isfile("filterimage.jpg"): shutil.copy("empty.jpg", "filterimage.jpg"); manager = multiprocessing.Manager() camera_config = manager.dict() camera_config['brightness'] = DEF_BRIGHTNESS camera_config['contrast'] = DEF_CONTRAST camera_config['hflip'] = False camera_config['vflip'] = False app = web.application(URLS, globals()) web.camera_config = camera_config loop = multiprocessing.Process(target=camera_loop, args=(camera_config,)) loop.start() app.run() loop.terminate() #------------------------------------------------------------------------------ if __name__ == "__main__": main() #------------------------------------------------------------------------------
Python
0.00006
@@ -1029,16 +1029,17 @@ .xml')%0A%0A +%0A #------- @@ -3482,20 +3482,16 @@ ies.%22%22%22%0A - %0A def @@ -3840,17 +3840,16 @@ 100, 100 - )%0A @@ -4385,12 +4385,15 @@ (x, + y, + w, + h) i @@ -4443,16 +4443,17 @@ img, (x, + y), (x+w @@ -4453,16 +4453,17 @@ ), (x+w, + y+h), (2 @@ -4465,18 +4465,20 @@ ), (255, + 0, + 0), 2)%0A @@ -4674,14 +4674,17 @@ (ex, + ey, + ew, + eh) @@ -4744,40 +4744,86 @@ lor, + (ex, + ey), + (ex+ew, + ey+eh), +%0A (0, + 255, + 0), + 2)%0A%0A @@ -5961,25 +5961,24 @@ timage.jpg%22) -; %0A%0A if not @@ -6068,9 +6068,8 @@ pg%22) -; %0A%0A
553cd2cf48ed7be12021b2d9718a1d6fa6cdd2f4
Fix a method call to roll in reordering.
incuna_test_utils/testcases/integration.py
incuna_test_utils/testcases/integration.py
from django.core.exceptions import ImproperlyConfigured from django.shortcuts import render from .request import BaseRequestTestCase class BaseIntegrationTestCase(BaseRequestTestCase): """ A TestCase that operates similarly to a Selenium test. Contains methods that access pages and render them to strings full of HTML. Can be used to assert the contents of templates as well as doing normal TestCase things. Must be subclassed with the following attributes in order to work: * user_factory * view_class (class-based view) or view (function-based view) """ def get_view(self): """ Returns the class's attached view. Checks self.view_class, then self.view. Throws an ImproperlyConfigured exception if neither exist. """ try: return self.view_class.as_view() except AttributeError: # Continue on to the next try/catch pass try: return self.view except AttributeError: message = "This test must have a 'view_class' or 'view' attribute." raise ImproperlyConfigured(message) def access_view(self, request=None, *args, **kwargs): """ Helper method that accesses the test's view. Accepts an optional request parameter. If this isn't supplied, access_view creates a basic request on your behalf. Returns a HTTPResponse object with the request (created or otherwise) attached. """ if request is None: request = self.create_request() view = self.get_view() response = view(request, *args, **kwargs) # Add the request to the response. # This is a weird-looking but compact way of ensuring we have access to # the request everywhere we need it, without doing clunky things like # returning tuples all the time. response.request = request return response def render_to_str(self, response, request=None): """ Render a HTTPResponse into a string that holds the HTML content. Accepts an optional request parameter, and looks for a request attached to the response if the optional parameter isn't specified. """ if request is None: request = response.request response = render(request, response.template_name, response.context_data) return str(response.content) def access_view_and_render_response(self, request=None, expected_status=200, *view_args, **view_kwargs): """ Accesses the view and returns a string of HTML. Combines access_view, an assertion on the returned status, and render_to_str. Accepts an optional request (but will create a simple one if the parameter isn't supplied), an expected status code for the response (which defaults to 200), and args and kwargs for the view method. """ response = self.access_view(*view_args, request=request, **view_kwargs) # Assert that the response has the correct status code before we go # any further. Throwing accurately descriptive failures when something # goes wrong is better than trying to run assertions on the content # of a HTML response for some random 404 page. self.assertEqual(expected_status, response.status_code) # Render the response and return it. return self.render_to_str(response) def assert_count(self, needle, haystack, count): """ Assert that 'needle' occurs exactly 'count' times in 'haystack'. Used as a snazzier, stricter version of unittest.assertIn. Outputs a verbose error message when it fails. """ actual_count = haystack.count(needle) # Build a verbose error message in case we need it. plural = '' if count == 1 else 's' message = 'Expected {count} instance{plural} of {needle}, but found {actual_count}, in {haystack}' message = message.format_map(locals()) # Make the assertion. self.assertEqual(count, actual_count, message)
Python
0
@@ -3017,16 +3017,25 @@ ss_view( +request, *view_ar @@ -3041,25 +3041,8 @@ rgs, - request=request, **v
b2270d751146ed8f27a0d0cc85a10a15ea28dab3
Fix float to byte conversion.
avena/np.py
avena/np.py
#!/usr/bin/env python import numpy import sys _eps = 10.0 * sys.float_info.epsilon # Map of NumPy array type strings to types _np_dtypes = { 'int8': numpy.int8, 'int16': numpy.int16, 'int32': numpy.int32, 'int64': numpy.int64, 'uint8': numpy.uint8, 'uint16': numpy.uint16, 'uint32': numpy.uint32, 'uint64': numpy.uint64, 'float32': numpy.float32, 'float64': numpy.float64, } _dtype_bounds = { 'float32': (0.0, 1.0), 'float64': (0.0, 1.0), 'uint8': (0, 255), } def from_uint8(array): float_array = array.astype(numpy.float32) float_array *= 1.0 / 256.0 return float_array def to_uint8(array): uint8_array = numpy.empty(array.shape, dtype=numpy.uint8) numpy.around(array * 255, out=uint8_array) return uint8_array def clip(array, bounds): """Clip the values of an array to the given interval.""" (min, max) = bounds x = array < min + _eps y = array > max - _eps array[x] = min array[y] = max return def normalize(array): """Normalize an array to the interval [0,1].""" mu = numpy.mean(array) rho2 = numpy.std(array) min = mu - 1.5 * rho2 max = mu + 1.5 * rho2 array -= min if max - min > _eps: array /= max - min return def peak(array): """Return the index of the peak value of an array.""" return numpy.unravel_index(numpy.argmax(array), array.shape) def _zeropad(array, size): m, n = array.shape p, q = size z = numpy.zeros((p, q), dtype=array.dtype) z[:m, :n] = array return z if __name__ == '__main__': pass
Python
0
@@ -692,21 +692,21 @@ y):%0A -uint8 +float _array = @@ -716,91 +716,78 @@ mpy. -empty(array.shape, dtype=numpy.uint8)%0A numpy.around(array * 255, out=uint8_array +around(array * 255.0)%0A uint8_array = float_array.astype(numpy.uint8 )%0A
2b21a07ad1a26f7006809936e5a58e5af710f61b
bump version: 1.0.1
admin_footer/__init__.py
admin_footer/__init__.py
# Copyright Collab 2015-2016 # See LICENSE for details. """ `django-admin-footer` application. """ from __future__ import unicode_literals #: Application version. __version__ = (1, 0, 0) def short_version(version=None): """ Return short application version. For example: `1.0.0`. """ v = version or __version__ return '.'.join([str(x) for x in v[:3]]) def get_version(version=None): """ Return full version nr, inc. rc, beta etc tags. For example: `2.0.0a1` :rtype: str """ v = version or __version__ if len(v) == 4: return '{0}{1}'.format(short_version(v), v[3]) return short_version(v) #: Full version number. version = get_version()
Python
0.000002
@@ -181,17 +181,17 @@ (1, 0, -0 +1 )%0A%0A%0Adef
64619c465378ee34299961a225f0a3efc22c3d41
Remove unused import.
app/handlers/tests/test_stats_handler.py
app/handlers/tests/test_stats_handler.py
# This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Test module for the JobHandler handler.""" try: import simplejson as json except ImportError: import json import mock import tornado import urls from handlers.tests.test_handler_base import TestHandlerBase class TestStatsHandler(TestHandlerBase): def get_app(self): return tornado.web.Application([urls._STATS_URL], **self.settings) def test_post(self): headers = {"Authorization": "foo", "Content-Type": "application/json"} body = json.dumps({"foo": "bar"}) response = self.fetch( "/statistics", method="POST", headers=headers, body=body) self.assertEqual(response.code, 501) self.assertEqual( response.headers["Content-Type"], self.content_type) def test_post_wrong_token(self): self.validate_token.return_value = (False, None) headers = {"Authorization": "foo", "Content-Type": "application/json"} body = json.dumps({"foo": "bar"}) response = self.fetch( "/statistics", method="POST", headers=headers, body=body) self.assertEqual(response.code, 403) self.assertEqual( response.headers["Content-Type"], self.content_type) def test_put(self): headers = {"Authorization": "foo", "Content-Type": "application/json"} body = json.dumps({"foo": "bar"}) response = self.fetch( "/statistics", method="PUT", headers=headers, body=body) self.assertEqual(response.code, 501) self.assertEqual( response.headers["Content-Type"], self.content_type) def test_put_wrong_token(self): self.validate_token.return_value = (False, None) headers = {"Authorization": "foo", "Content-Type": "application/json"} body = json.dumps({"foo": "bar"}) response = self.fetch( "/statistics", method="PUT", headers=headers, body=body) self.assertEqual(response.code, 403) self.assertEqual( response.headers["Content-Type"], self.content_type) def test_delete(self): headers = {"Authorization": "foo"} response = self.fetch( "/statistics", method="DELETE", headers=headers) self.assertEqual(response.code, 501) self.assertEqual( response.headers["Content-Type"], self.content_type) def test_delete_wrong_token(self): self.validate_token.return_value = (False, None) headers = {"Authorization": "foo"} response = self.fetch("/statistics", method="DELETE", headers=headers) self.assertEqual(response.code, 403) self.assertEqual( response.headers["Content-Type"], self.content_type)
Python
0
@@ -772,20 +772,8 @@ on%0A%0A -import mock%0A impo
62af60d69a8a3eeb5bc70c80a3a8dc2d863a62b1
fix empty password registrations.
modules/websession/lib/webaccount_forms.py
modules/websession/lib/webaccount_forms.py
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2012, 2013 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """WebAccount Forms""" from invenio.webinterface_handler_flask_utils import _ from invenio.wtforms_utils import InvenioBaseForm, FilterForm, DateTimePickerWidget, FilterTextField from flask.ext.wtf import Form, SubmitField, BooleanField, TextField, \ TextAreaField, PasswordField, Required, \ HiddenField, validators from invenio.websession_model import User from invenio.webuser import email_valid_p, nickname_valid_p from sqlalchemy.exc import SQLAlchemyError from websession_webinterface import wash_login_method from invenio.config import \ CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS, \ CFG_ACCESS_CONTROL_LEVEL_GUESTS, \ CFG_ACCESS_CONTROL_LEVEL_SITE, \ CFG_ACCESS_CONTROL_LIMIT_REGISTRATION_TO_DOMAIN, \ CFG_ACCESS_CONTROL_NOTIFY_ADMIN_ABOUT_NEW_ACCOUNTS, \ CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_NEW_ACCOUNT, \ CFG_SITE_SUPPORT_EMAIL from invenio.access_control_config import CFG_EXTERNAL_AUTHENTICATION def validate_nickname_or_email(form, field): try: User.query.filter(User.nickname == field.data).one() except SQLAlchemyError: try: User.query.filter(User.email == field.data).one() except SQLAlchemyError: raise validators.ValidationError( _('Not valid nickname or email: %s') % (field.data, )) class LoginForm(Form): nickname = TextField( _("Nickname"), validators=[Required(message=_("Nickname not provided")), validate_nickname_or_email]) password = PasswordField(_("Password")) remember = BooleanField(_("Remember Me")) referer = HiddenField() login_method = HiddenField() submit = SubmitField(_("Sign in")) def validate_login_method(self, field): field.data = wash_login_method(field.data) class ChangeUserEmailSettingsForm(InvenioBaseForm): email = TextField(_("New email")) class RegisterForm(Form): """ User registration form """ email = TextField( _("Email address"), validators=[Required(message=_("Email not provided"))], description=_("Example") + ": john.doe@example.com") nickname = TextField( _("Nickname"), validators=[Required(message=_("Nickname not provided"))], description=_("Example") + ": johnd") password = PasswordField( _("Password"), description=_("The password phrase may contain punctuation, spaces, etc.")) password2 = PasswordField(_("Confirm password"),) referer = HiddenField() action = HiddenField(default='login') submit = SubmitField(_("Register")) def validate_nickname(self, field): if nickname_valid_p(field.data) != 1: raise validators.ValidationError( _("Desired nickname %s is invalid.") % field.data ) # is nickname already taken? try: User.query.filter(User.nickname == field.data).one() raise validators.ValidationError( _("Desired nickname %s already exists in the database.") % field.data ) except SQLAlchemyError: pass def validate_email(self, field): field.data = field.data.lower() if email_valid_p(field.data.lower()) != 1: raise validators.ValidationError( _("Supplied email address %s is invalid.") % field.data ) # is email already taken? try: User.query.filter(User.email == field.data).one() raise validators.ValidationError( _("Supplied email address %s already exists in the database.") % field.data ) except SQLAlchemyError: pass def validate_password2(self, field): if field.data != self.password.data: raise validators.ValidationError(_("Both passwords must match."))
Python
0
@@ -4443,32 +4443,202 @@ pass%0A%0A + def validate_password(self, field):%0A if len(field.data) %3C= 6:%0A raise validators.ValidationError(_(%22Password must be at least 6 characters long.%22))%0A%0A def validate
9968247d4a73549f1c5b02abf8976f11662b46f7
Add a default logger. Specifically log repeated regulation node labels
regcore/settings/base.py
regcore/settings/base.py
"""Base settings file; used by manage.py. All settings can be overridden via local_settings.py""" import os from django.utils.crypto import get_random_string INSTALLED_APPS = [ 'haystack', 'regcore', 'regcore_read', 'regcore_write', 'south' ] SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', get_random_string(50)) DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'eregs.db' } } TEST_RUNNER = 'django_nose.runner.NoseTestSuiteRunner' ROOT_URLCONF = 'regcore.urls' DEBUG = True BACKENDS = { 'regulations': 'regcore.db.django_models.DMRegulations', 'layers': 'regcore.db.django_models.DMLayers', 'notices': 'regcore.db.django_models.DMNotices', 'diffs': 'regcore.db.django_models.DMDiffs' } NOSE_ARGS = [ '--with-coverage', '--cover-package=regcore,regcore_read,regcore_write' ] ELASTIC_SEARCH_URLS = [] ELASTIC_SEARCH_INDEX = 'eregs' HAYSTACK_CONNECTIONS = { 'default': { 'ENGINE': 'haystack.backends.solr_backend.SolrEngine', 'URL': 'http://localhost:8983/solr' } } try: from local_settings import * except ImportError: pass
Python
0
@@ -1091,16 +1091,469 @@ %7D%0A%7D%0A%0A +LOGGING = %7B%0A 'version': 1,%0A 'disable_existing_loggers': False,%0A 'handlers': %7B%0A 'console': %7B%0A 'level': 'INFO',%0A 'class': 'logging.StreamHandler',%0A %7D%0A %7D,%0A 'loggers': %7B%0A '': %7B%0A 'handlers': %5B'console'%5D,%0A 'level': 'INFO',%0A %7D,%0A 'django.request': %7B%0A 'handlers': %5B'console'%5D,%0A 'propagate': False,%0A 'level': 'ERROR'%0A %7D%0A %7D%0A%7D%0A%0A try:%0A
16eda1aac6183f612c678ae555367113f1326c0a
Mark upcoming release number.
registration/__init__.py
registration/__init__.py
VERSION = (2, 1, 2, 'final', 0) def get_version(): """ Returns a PEP 386-compliant version number from VERSION. """ assert len(VERSION) == 5 assert VERSION[3] in ('alpha', 'beta', 'rc', 'final') # Now build the two parts of the version number: # main = X.Y[.Z] # sub = .devN - for pre-alpha releases # | {a|b|c}N - for alpha, beta and rc releases parts = 2 if VERSION[2] == 0 else 3 main = '.'.join(str(x) for x in VERSION[:parts]) sub = '' if VERSION[3] != 'final': mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'} sub = mapping[VERSION[3]] + str(VERSION[4]) return str(main + sub)
Python
0
@@ -11,20 +11,20 @@ (2, -1, 2, 'final +2, 0, 'alpha ', 0
cdb1af30a0f48adcb7d94b444642f5a43f5905dd
Make release-notes update timing configurable
bin/cron.py
bin/cron.py
#!/usr/bin/env python from __future__ import print_function, unicode_literals import datetime import os import sys from subprocess import check_call import requests from apscheduler.schedulers.blocking import BlockingScheduler from decouple import config from pathlib2 import Path schedule = BlockingScheduler() DEAD_MANS_SNITCH_URL = config('DEAD_MANS_SNITCH_URL', default='') # ROOT path of the project. A pathlib.Path object. ROOT_PATH = Path(__file__).resolve().parents[1] ROOT = str(ROOT_PATH) MANAGE = str(ROOT_PATH / 'manage.py') def call_command(command): check_call('python {0} {1}'.format(MANAGE, command), shell=True) class scheduled_job(object): """Decorator for scheduled jobs. Takes same args as apscheduler.schedule_job.""" def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs def __call__(self, fn): self.name = fn.__name__ self.callback = fn schedule.add_job(self.run, id=self.name, *self.args, **self.kwargs) self.log('Registered') return self.run def run(self): self.log('starting') try: self.callback() except Exception as e: self.log('CRASHED: {}'.format(e)) raise else: self.log('finished successfully') def log(self, message): msg = '[{}] Clock job {}@{}: {}'.format( datetime.datetime.utcnow(), self.name, os.getenv('DEIS_APP', 'default_app'), message) print(msg, file=sys.stderr) def ping_dms(function): """Pings Dead Man's Snitch after job completion if URL is set.""" def _ping(): function() if DEAD_MANS_SNITCH_URL: utcnow = datetime.datetime.utcnow() payload = {'m': 'Run {} on {}'.format(function.__name__, utcnow.isoformat())} requests.get(DEAD_MANS_SNITCH_URL, params=payload) _ping.__name__ = function.__name__ return _ping def schedule_database_jobs(): @scheduled_job('interval', minutes=15) @ping_dms def update_product_details(): call_command('update_product_details_files --database bedrock') @scheduled_job('interval', minutes=30) def update_externalfiles(): call_command('update_externalfiles') @scheduled_job('interval', minutes=30) def update_security_advisories(): call_command('update_security_advisories') @scheduled_job('interval', hours=6) def update_tweets(): call_command('cron update_tweets') @scheduled_job('interval', hours=1) def ical_feeds(): call_command('cron update_ical_feeds') call_command('cron cleanup_ical_events') @scheduled_job('interval', hours=1) def update_blog_feeds(): call_command('update_wordpress --database bedrock') @scheduled_job('interval', minutes=5) def update_release_notes(): call_command('update_release_notes --quiet') def schedul_l10n_jobs(): @scheduled_job('interval', minutes=10) def update_locales(): call_command('l10n_update') if __name__ == '__main__': args = sys.argv[1:] has_jobs = False if 'db' in args: schedule_database_jobs() has_jobs = True if 'l10n' in args: schedul_l10n_jobs() has_jobs = True if has_jobs: try: schedule.start() except (KeyboardInterrupt, SystemExit): pass
Python
0
@@ -375,16 +375,101 @@ ault='') +%0AREL_NOTES_UPDATE_MINUTES = config('REL_NOTES_UPDATE_MINUTES', default='5', cast=int) %0A%0A# ROOT @@ -2930,17 +2930,40 @@ minutes= -5 +REL_NOTES_UPDATE_MINUTES )%0A de
18e4e457752051dc4d5f57e78e83572638c4fe62
Refactor syncdb replacement. Clone existing schemata if they don't exist at syncdb time.
multi_schema/management/commands/syncdb.py
multi_schema/management/commands/syncdb.py
from django.core.management.commands import syncdb from django.db import models, connection, transaction try: from south.management.commands import syncdb except ImportError: pass class Command(syncdb.Command): def handle_noargs(self, **options): cursor = connection.cursor() # Ensure we have a __template__ schema. cursor.execute("SELECT schema_name FROM information_schema.schemata WHERE schema_name = '__template__';") if not cursor.fetchone(): cursor.execute("CREATE SCHEMA __template__;") transaction.commit_unless_managed() # Set the search path, so we find created models correctly cursor.execute("SET search_path TO public,__template__;") super(Command, self).handle_noargs(**options)
Python
0
@@ -183,16 +183,63 @@ pass%0A%0A +from ...models import Schema, template_schema%0A%0A class Co @@ -305,53 +305,8 @@ s):%0A - cursor = connection.cursor() %0A @@ -361,247 +361,37 @@ -cursor.execute(%22SELECT schema_name FROM information_schema.schemata WHERE schema_name = '__template__';%22)%0A if not cursor.fetchone():%0A cursor.execute(%22CREATE SCHEMA __template__;%22)%0A transaction.commit_unless_managed +template_schema.create_schema ()%0A @@ -465,16 +465,53 @@ rrectly%0A + cursor = connection.cursor()%0A @@ -569,32 +569,32 @@ e__;%22)%0A %0A - super(Co @@ -622,16 +622,204 @@ oargs(**options) +%0A %0A # Ensure all existing schemata exist (in case we imported them using loaddata or something)%0A for schema in Schema.objects.all():%0A schema.create_schema()
2665aa46702175a0d33ae76cfccdbbbddf42d316
Allow for comments in the sql file that do not start the line.
multi_schema/management/commands/syncdb.py
multi_schema/management/commands/syncdb.py
import os.path from django.core.management.commands import syncdb from django.db import models, connection, transaction try: from south.management.commands import syncdb except ImportError: pass from ...models import Schema, template_schema class Command(syncdb.Command): def handle_noargs(self, **options): # Ensure we have the clone_schema() function clone_schema_file = os.path.join(os.path.abspath(__file__ + '/../../../'), 'sql', 'clone_schema.sql') clone_schema_function = " ".join([x.strip() for x in open(clone_schema_file).readlines() if not x.startswith('--')]) clone_schema_function = clone_schema_function.replace("'%'", "'%%'") cursor = connection.cursor() cursor.execute(clone_schema_function) # Ensure we have a __template__ schema. template_schema.create_schema() # Set the search path, so we find created models correctly cursor = connection.cursor() cursor.execute("SET search_path TO public,__template__;") super(Command, self).handle_noargs(**options) # Ensure all existing schemata exist (in case we imported them using loaddata or something) for schema in Schema.objects.all(): schema.create_schema()
Python
0
@@ -586,16 +586,24 @@ f not x. +strip(). startswi
74f016d343fe270ab3affe79cc82266d94120e5c
Remove now unused pick_server_from_list
synapse/http/federation/srv_resolver.py
synapse/http/federation/srv_resolver.py
# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import random import time import attr from twisted.internet import defer from twisted.internet.error import ConnectError from twisted.names import client, dns from twisted.names.error import DNSNameError, DomainError from synapse.logging.context import make_deferred_yieldable logger = logging.getLogger(__name__) SERVER_CACHE = {} @attr.s(slots=True, frozen=True) class Server(object): """ Our record of an individual server which can be tried to reach a destination. Attributes: host (bytes): target hostname port (int): priority (int): weight (int): expires (int): when the cache should expire this record - in *seconds* since the epoch """ host = attr.ib() port = attr.ib() priority = attr.ib(default=0) weight = attr.ib(default=0) expires = attr.ib(default=0) def pick_server_from_list(server_list): """Randomly choose a server from the server list Args: server_list (list[Server]): list of candidate servers Returns: Tuple[bytes, int]: (host, port) pair for the chosen server """ if not server_list: raise RuntimeError("pick_server_from_list called with empty list") # TODO: currently we only use the lowest-priority servers. We should maintain a # cache of servers known to be "down" and filter them out min_priority = min(s.priority for s in server_list) eligible_servers = list(s for s in server_list if s.priority == min_priority) total_weight = sum(s.weight for s in eligible_servers) target_weight = random.randint(0, total_weight) for s in eligible_servers: target_weight -= s.weight if target_weight <= 0: return s.host, s.port # this should be impossible. raise RuntimeError("pick_server_from_list got to end of eligible server list.") def _sort_server_list(server_list): """Given a list of SRV records sort them into priority order and shuffle each priority with the given weight. """ priority_map = {} for server in server_list: priority_map.setdefault(server.priority, []).append(server) results = [] for priority in sorted(priority_map): servers = priority_map[priority] # This algorithms follows the algorithm described in RFC2782. # # N.B. Weights can be zero, which means that you should pick that server # last *or* that its the only server in this priority. # We sort to ensure zero weighted items are first. servers.sort(key=lambda s: s.weight) total_weight = sum(s.weight for s in servers) while servers: target_weight = random.randint(0, total_weight) for s in servers: target_weight -= s.weight if target_weight <= 0: break results.append(s) servers.remove(s) total_weight -= s.weight return results class SrvResolver(object): """Interface to the dns client to do SRV lookups, with result caching. The default resolver in twisted.names doesn't do any caching (it has a CacheResolver, but the cache never gets populated), so we add our own caching layer here. Args: dns_client (twisted.internet.interfaces.IResolver): twisted resolver impl cache (dict): cache object get_time (callable): clock implementation. Should return seconds since the epoch """ def __init__(self, dns_client=client, cache=SERVER_CACHE, get_time=time.time): self._dns_client = dns_client self._cache = cache self._get_time = get_time @defer.inlineCallbacks def resolve_service(self, service_name): """Look up a SRV record Args: service_name (bytes): record to look up Returns: Deferred[list[Server]]: a list of the SRV records, or an empty list if none found """ now = int(self._get_time()) if not isinstance(service_name, bytes): raise TypeError("%r is not a byte string" % (service_name,)) cache_entry = self._cache.get(service_name, None) if cache_entry: if all(s.expires > now for s in cache_entry): servers = list(cache_entry) return _sort_server_list(servers) try: answers, _, _ = yield make_deferred_yieldable( self._dns_client.lookupService(service_name) ) except DNSNameError: # TODO: cache this. We can get the SOA out of the exception, and use # the negative-TTL value. return [] except DomainError as e: # We failed to resolve the name (other than a NameError) # Try something in the cache, else rereaise cache_entry = self._cache.get(service_name, None) if cache_entry: logger.warn( "Failed to resolve %r, falling back to cache. %r", service_name, e ) return list(cache_entry) else: raise e if ( len(answers) == 1 and answers[0].type == dns.SRV and answers[0].payload and answers[0].payload.target == dns.Name(b".") ): raise ConnectError("Service %s unavailable" % service_name) servers = [] for answer in answers: if answer.type != dns.SRV or not answer.payload: continue payload = answer.payload servers.append( Server( host=payload.target.name, port=payload.port, priority=payload.priority, weight=payload.weight, expires=now + answer.ttl, ) ) self._cache[service_name] = list(servers) return _sort_server_list(servers)
Python
0
@@ -1516,1011 +1516,8 @@ )%0A%0A%0A -def pick_server_from_list(server_list):%0A %22%22%22Randomly choose a server from the server list%0A%0A Args:%0A server_list (list%5BServer%5D): list of candidate servers%0A%0A Returns:%0A Tuple%5Bbytes, int%5D: (host, port) pair for the chosen server%0A %22%22%22%0A if not server_list:%0A raise RuntimeError(%22pick_server_from_list called with empty list%22)%0A%0A # TODO: currently we only use the lowest-priority servers. We should maintain a%0A # cache of servers known to be %22down%22 and filter them out%0A%0A min_priority = min(s.priority for s in server_list)%0A eligible_servers = list(s for s in server_list if s.priority == min_priority)%0A total_weight = sum(s.weight for s in eligible_servers)%0A target_weight = random.randint(0, total_weight)%0A%0A for s in eligible_servers:%0A target_weight -= s.weight%0A%0A if target_weight %3C= 0:%0A return s.host, s.port%0A%0A # this should be impossible.%0A raise RuntimeError(%22pick_server_from_list got to end of eligible server list.%22)%0A%0A%0A def
e2555422c12f0b4cf59d8c636a087eddc3150948
allow CR
anaconda_verify/utils.py
anaconda_verify/utils.py
import sys import collections from anaconda_verify.const import MAGIC_HEADERS, DLL_TYPES def get_object_type(data): head = data[:4] if head not in MAGIC_HEADERS: return None lookup = MAGIC_HEADERS.get(head) if lookup == 'DLL': pos = data.find('PE\0\0') if pos < 0: return "<no PE header found>" i = ord(data[pos + 4]) + 256 * ord(data[pos + 5]) return "DLL " + DLL_TYPES.get(i) elif lookup.startswith('MachO'): return lookup elif lookup == 'ELF': return "ELF" + {'\x01': '32', '\x02': '64'}.get(data[4]) def all_ascii(data): for c in data: n = ord(c) if sys.version_info[0] == 2 else c if not (n == 10 or 32 <= n < 127): return False return True class memoized(object): """Decorator. Caches a function's return value each time it is called. If called later with the same arguments, the cached value is returned (not reevaluated). """ def __init__(self, func): self.func = func self.cache = {} def __call__(self, *args): if not isinstance(args, collections.Hashable): # uncacheable. a list, for instance. # better to not cache than blow up. return self.func(*args) if args in self.cache: return self.cache[args] else: value = self.func(*args) self.cache[args] = value return value if __name__ == '__main__': print(sys.version) print(all_ascii(b'Hello\x00'), all_ascii(b"Hello World!"))
Python
0.00006
@@ -711,13 +711,19 @@ (n -== 10 +in (10, 13) or
cdefb28463a90be54b3b77f1d6d76fb8a2201da9
Update models.py
analytics_kits/models.py
analytics_kits/models.py
from __future__ import unicode_literals from django.db import models from django.apps import apps from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes.fields import GenericForeignKey from django.utils.translation import ugettext_lazy as _ import logging # A model to save absolute URL for each objects. # It helps to lookup in different models for an object with an specific URL. class ObjectUrl(models.Model): content_type = models.ForeignKey(ContentType) object_id = models.PositiveIntegerField() url = models.TextField() # Mixin Model to record Urls into ObjectUrl class AnalyiticsKitsMixin(object): def save(self, force_insert=False, force_update=False, using=None, update_fields=None): super(AnalyiticsKitsMixin, self).save( force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields) content_type = ContentType.objects.get_for_model(self) try: url = self.get_absolute_url() except: logging.error('get_absolute_url method is not defined') return False obj_url = ObjectUrl.objects.get_or_create( content_type=content_type, object_id=self.pk, defaults={'url': url} ) obj_url.url = url obj_url.save() # Most popular abstract model class AnalyticsResult(models.Model): pulled_date = models.DateTimeField( 'Date pulled from analytics', null=True, blank=True) title = models.TextField() item_url = models.CharField( max_length=255, null=True, blank=True, unique=True) no_of_views = models.IntegerField() object_id = models.PositiveIntegerField( db_index=True, null=True, blank=True) content_type = models.ForeignKey( ContentType, verbose_name=_('Content type'), related_name="%(app_label)s_%(class)s", null=True, blank=True) content_object = GenericForeignKey() # You can add extra fields on your inherited model to customise it # For example you may want to add publish_date and is_published fields # # publish_date = models.DateTimeField(blank=True, null=True) # is_published = models.BooleanField(default=True) class Meta: abstract = True @staticmethod def get_metrics(): return 'ga:pageviews' @staticmethod def get_dimensions(): return 'ga:pagePath,ga:pageTitle' @staticmethod def get_sort(): return "-ga:pageviews" @staticmethod def get_filters(): return 'ga:pagePath!~^/$;ga:pagePath!~^/search/*;ga:pagePath!~^/accounts*;ga:pagePath!~^/iw-admin*;ga:pagePath!~^/[A-z-]+/$' # save data passed by the mnagement command to the model @classmethod def process_data(self, data, date): # types = self.retrieve_types() for page_path, page_title, page_views in data['rows']: obj = self.get_object(page_path) # print page_path if obj is not None: data, created = self.objects.get_or_create( item_url=page_path, defaults={'no_of_views': page_views}) data.pulled_date = date data.content_type = ContentType.objects.get_for_model(obj) data.object_id = obj.id data.content_object = obj data.title = obj.title data.save() @classmethod def get_object(self, url): try: obj_info = ObjectUrl.objects.get(url=url) content_type = obj_info.content_type model = apps.get_model(content_type.app_label, content_type.model) return model.objects.get(pk=obj_info.object_id) except: return None # A Model to define Google Analytic class Account(models.Model): account_name = models.CharField(max_length=255) # Service_account and Private key should be stored encrypted # Encryption code is defined on the admin save method. service_account = models.TextField() private_key = models.TextField() def __unicode__(self): return self.account_name
Python
0
@@ -2572,16 +2572,82 @@ views%22%0A%0A + @staticmethod%0A def get_results_count():%0A return 50%0A%0A @sta
04a3544a6bc9d9d02c4f7d5f1ba2168a45807f83
Version bump
pushjournal/__version__.py
pushjournal/__version__.py
__version__ = "0.1.1"
Python
0.000001
@@ -14,9 +14,9 @@ %220. -1.1 +2.0 %22%0A
9382b15993bd8a77ab35cbec8e1f8d2304513c5d
remove attrname doc
pyardrone/navdata/types.py
pyardrone/navdata/types.py
import collections import functools import io import itertools import struct import types from pyardrone.utils.dochelper import DocFile def _grouper(iterable, n, fillvalue=None): "Collect data into fixed-length chunks or blocks" # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx" args = [iter(iterable)] * n return itertools.zip_longest(*args, fillvalue=fillvalue) class Type: def get_size(self): return struct.calcsize(self.code) def __getitem__(self, item): return ArrayType(self, item) def __repr__(self): return '<{self.__class__.__name__} {self.name!r}>'.format(self=self) class ValueType(Type): def __init__(self, code, name): self.code = code self.name = name def unpack(self, buffer): return struct.unpack(self.code, buffer)[0] def unpack_file(self, file): return self.unpack(file.read(struct.calcsize(self.code))) bool_t = ValueType('?', 'bool_t') char = ValueType('c', 'char') int8_t = ValueType('b', 'int8_t') int16_t = ValueType('h', 'int16_t') int32_t = ValueType('i', 'int32_t') int64_t = ValueType('q', 'int64_t') uint8_t = ValueType('B', 'uint8_t') uint16_t = ValueType('H', 'uint16_t') uint32_t = ValueType('I', 'uint32_t') uint64_t = ValueType('Q', 'uint64_t') float32_t = ValueType('f', 'float32_t') float64_t = ValueType('d', 'float64_t') class Embed: def __init__(self, option): self.option = option class ContainerType(Type): def unpack_file(self, file): return self.unpack(file.read(struct.calcsize(self.code))) class MatrixType(ContainerType): def __init__(self, a, b): self.a = a self.b = b self.count = a * b self.code = 'f' * self.count @property def name(self): return 'matrix{self.a}{self.b}_t'.format(self=self) def unpack(self, buffer): return tuple(_grouper(struct.unpack(self.code, buffer), self.a)) class VectorType(ContainerType): def __init__(self, name, vargs, type=float32_t): count = len(vargs) self.code = type.code * count self.name = name self.container = collections.namedtuple( name, vargs ) def unpack(self, buffer): return self.container(*struct.unpack(self.code, buffer)) class ArrayType(ContainerType): def __init__(self, value_type, count): self.value_type = value_type self.count = count self.code = self.value_type.code * count @property def name(self): return '{}[{}]'.format(self.value_type.name, self.count) def unpack(self, buffer): return struct.unpack(self.code, buffer) class OptionNamespace(dict): def __init__(self): self['_fields'] = [] def __setitem__(self, key, value): if isinstance(value, Type): self['_fields'].append((key, value)) elif isinstance(value, Embed): self['_fields'].extend(value.option._fields) super().__setitem__(key, value) class OptionType(type, Type): @classmethod def __prepare__(cls, name, bases): return OptionNamespace() def __new__(cls, name, bases, namespace): cls.update_doc(namespace) return type.__new__(cls, name, bases, dict(namespace)) def unpack(self, buffer): return self.unpack_file(io.BytesIO(buffer)) def unpack_file(self, file): obj = super().__call__() for name, type_ in self._fields: setattr(obj, name, type_.unpack_file(file)) return obj @staticmethod def update_doc(namespace): # TODO: This should be done by a sphinx extension instead of a # metaclass hack if '__doc__' not in namespace or '_attrname' not in namespace: return df = DocFile(namespace['__doc__']) df.write('\n') df.writeline( "available via :py:class:`~pyardrone.navdata.NavData`'s " "attribute: ``{}``".format( namespace['_attrname'] ) ) namespace['__doc__'] = df.getvalue() @property def code(self): return ''.join(field[1].code for field in self._fields) __call__ = unpack class Option(types.SimpleNamespace, metaclass=OptionType): ''' Base class of all NavData options. Corresponds to C struct ``navdata_option_t``. .. py:data:: attrname The attribute name the get this option from :py:class:`~pyardrone.navdata.NavData` ''' class OptionHeader(Option): tag = uint16_t size = uint16_t class OptionIndex(dict): def register(self, tag): return functools.partial(self._register, tag) def _register(self, tag, function): if tag in self: raise KeyError('Key {!r} conflict with existing item {}'.format( tag, self[tag])) self[tag] = function return function
Python
0.000001
@@ -4382,135 +4382,8 @@ t%60%60. -%0A%0A .. py:data:: attrname%0A%0A The attribute name the get this option from%0A :py:class:%60~pyardrone.navdata.NavData%60 %0A
20260402fcdab75ddc98c6ad91c412e8a5fe8d01
add test for empty functions
pychecker2/utest/unused.py
pychecker2/utest/unused.py
from pychecker2.TestSupport import WarningTester from pychecker2 import VariableChecks class UnusedTestCase(WarningTester): def testUnusedBasic(self): self.warning('def f(i, j): return i * 2\n', 1, VariableChecks.UnusedCheck.unused, 'j') self.warning('def _unused(): pass\n', 1, VariableChecks.UnusedCheck.unused, '_unused') def testUnusedAbstract(self): self.silent('def f(i): assert 0\n') self.silent('def f(i): assert None\n') self.silent('def f(i): return\n') self.silent('def f(i): return 7\n') self.silent('def f(i): pass\n') self.silent('def f(i): raise NotImplementedError\n') def testUnusedScopeNotSelf(self): self.silent('class A:\n' ' def f(self, j): return j * 2\n') self.argv = ['--reportUnusedSelf'] self.warning('class A:\n' ' def f(self, j): return j * 2\n', 2, VariableChecks.UnusedCheck.unused, 'self') def testUnusedScope(self): self.warning('class A:\n' ' def f(self, j): return self\n', 2, VariableChecks.UnusedCheck.unused, 'j') self.silent('def f(a, b):\n' ' def g(x):\n' ' return x * a\n' ' return g(b)\n') def testUnusedIgnore(self): self.warning('def f(a, xyzzySilly): return a', 1, VariableChecks.UnusedCheck.unused, 'xyzzySilly') self.argv = ['--unusedPrefixes=["xyzzy"]'] self.silent('def f(a, xyzzySilly): return a') def testGlobal(self): self.silent('x = 1\ndef f(x=x): return 7\n') self.silent('def f(x):\n' ' global _y\n' ' _y = _y + x\n') def testUnpack(self): self.silent('_x, _y = 1, 2\n') self.silent('def f(a, (b, c)): print a, b\n') self.argv = ['--no-unpackedUsed'] self.warning('_x, _y = 1, 2\n' 'print _x\n', 1, VariableChecks.UnusedCheck.unused, '_y') self.warning('def f(a, (b, c)): print a, b\n', 1, VariableChecks.UnusedCheck.unused, 'c')
Python
0.000003
@@ -2267,16 +2267,106 @@ ed, 'c') +%0A self.silent('def f(a):%5Cn'%0A ' %22this is an empty function%22%5Cn') %0A%0A
bfaa56817fddbe698d2fe29268185ec6dff5dbe4
Remove two more items from drop-down list
pycon/sponsorship/forms.py
pycon/sponsorship/forms.py
from django import forms from django.contrib.admin.widgets import AdminFileWidget from django.forms.models import inlineformset_factory, BaseInlineFormSet from django.utils.translation import ugettext_lazy as _ from multi_email_field.forms import MultiEmailField from pycon.sponsorship.models import Sponsor, SponsorBenefit, SponsorLevel class SponsorDetailsForm(forms.ModelForm): contact_emails = MultiEmailField( help_text=_(u"Please enter one email address per line.") ) class Meta: model = Sponsor fields = ["name", "contact_name", "contact_emails", "contact_phone", "contact_address", "external_url", "display_url", "web_description", "web_logo", ] widgets = { 'web_description': forms.widgets.Textarea(attrs={'cols': 40, 'rows': 5}), } class SponsorApplicationForm(SponsorDetailsForm): class Meta(SponsorDetailsForm.Meta): fields = SponsorDetailsForm.Meta.fields + [ "level", "wants_table", "wants_booth", ] def __init__(self, *args, **kwargs): self.user = kwargs.pop("user") kwargs.update({ "initial": { "contact_name": self.user.get_full_name(), "contact_emails": [self.user.email], } }) super(SponsorApplicationForm, self).__init__(*args, **kwargs) # TODO: there should be a way to turn off each level as it # fills, instead of having to edit code. Plus, this should # really be a radio button, where the unavailable ones stay # visible but grayed out with "Full" or "Out" next to them. self.fields['level'].queryset = SponsorLevel.objects.exclude( name='Open Source and Community').exclude( name='Silver').exclude( name='Gold').exclude( name='Diamond') def save(self, commit=True): obj = super(SponsorApplicationForm, self).save(commit=False) obj.applicant = self.user if commit: obj.save() return obj class SponsorBenefitsInlineFormSet(BaseInlineFormSet): def _construct_form(self, i, **kwargs): form = super(SponsorBenefitsInlineFormSet, self)._construct_form(i, **kwargs) # only include the relevant data fields for this benefit type fields = form.instance.data_fields() form.fields = dict((k, v) for (k, v) in form.fields.items() if k in fields + ["id"]) for field in fields: # don't need a label, the form template will label it with the benefit name form.fields[field].label = "" # provide word limit as help_text if form.instance.benefit.type in ["text", "richtext"] and form.instance.max_words: form.fields[field].help_text = u"maximum %s words" % form.instance.max_words # use admin file widget that shows currently uploaded file if field == "upload": form.fields[field].widget = AdminFileWidget() return form SponsorBenefitsFormSet = inlineformset_factory( Sponsor, SponsorBenefit, formset=SponsorBenefitsInlineFormSet, can_delete=False, extra=0, fields=["text", "upload"] ) class SponsorEmailForm(forms.Form): from_ = forms.EmailField(widget=forms.TextInput(attrs={'class': 'fullwidth-input'})) cc = forms.CharField(help_text=_(u"(comma-separated addresses)"), required=False, widget=forms.TextInput(attrs={'class': 'fullwidth-input'})) bcc = forms.CharField(help_text=_(u"(comma-separated addresses)"), required=False, widget=forms.TextInput(attrs={'class': 'fullwidth-input'})) subject = forms.CharField(widget=forms.TextInput(attrs={'class': 'fullwidth-input'})) body = forms.CharField(widget=forms.Textarea(attrs={'class': 'fullwidth-textarea'}))
Python
0.000003
@@ -1843,16 +1843,30 @@ eryset = + (%0A Sponsor @@ -1878,25 +1878,16 @@ .objects -.exclude( %0A @@ -1887,24 +1887,33 @@ +.exclude( name='Open S @@ -1933,25 +1933,52 @@ munity') -.exclude( +%0A .exclude(name='Patron') %0A @@ -1978,24 +1978,33 @@ +.exclude( name='Silver @@ -2005,25 +2005,16 @@ Silver') -.exclude( %0A @@ -2014,24 +2014,33 @@ +.exclude( name='Gold') @@ -2039,25 +2039,54 @@ ='Gold') -.exclude( +%0A .exclude(name='Platinum') %0A @@ -2090,16 +2090,25 @@ +.exclude( name='Di @@ -2114,16 +2114,30 @@ iamond') +%0A ) %0A%0A de
f3be5abebca4cdb83cc23cf765c94ae25adfa8e6
Fix a typo
pytablereader/ltsv/core.py
pytablereader/ltsv/core.py
# encoding: utf-8 """ .. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com> """ from __future__ import absolute_import, unicode_literals import io import pathvalidate as pv import typepy from pytablereader import DataError, InvalidHeaderNameError from .._common import get_file_encoding from .._constant import TableNameTemplate as tnt from .._logger import FileSourceLogger, TextSourceLogger from .._validator import FileValidator, TextValidator from ..interface import TableLoader from ..json.formatter import SingleJsonTableConverterA class LtsvTableLoader(TableLoader): """ Abstract class of `Labeled Tab-separated Values (LTSV) <http://ltsv.org/>`__ format table loaders. .. py:attribute:: encoding Encoding of the LTSV data. """ @property def format_name(self): return "ltsv" def __init__(self, source, quoting_flags, type_hints): super(LtsvTableLoader, self).__init__(source, quoting_flags, type_hints) self._ltsv_input_stream = None def _to_data_matrix(self): from collections import OrderedDict data_matrix = [] for row_idx, row in enumerate(self._ltsv_input_stream): if typepy.is_empty_sequence(row): continue ltsv_record = OrderedDict() for col_idx, ltsv_item in enumerate(row.strip().split("\t")): try: label, value = ltsv_item.split(":") except ValueError: raise DataError( "invalid lstv item found: line={}, col={}, item='{}'".format( row_idx, col_idx, ltsv_item ) ) label = label.strip('"') try: pv.validate_ltsv_label(label) except (pv.NullNameError, pv.InvalidCharError): raise InvalidHeaderNameError( "invalid label found (acceptable chars are [0-9A-Za-z_.-]): " "line={}, col={}, label='{}'".format(row_idx, col_idx, label) ) ltsv_record[label] = value data_matrix.append(ltsv_record) # using generator to prepare for future enhancement to support # iterative load. yield data_matrix class LtsvTableFileLoader(LtsvTableLoader): """ `Labeled Tab-separated Values (LTSV) <http://ltsv.org/>`__ format file loader class. :param str file_path: Path to the loading LTSV file. .. py:attribute:: table_name Table name string. Defaults to ``%(filename)s``. """ def __init__(self, file_path, quoting_flags=None, type_hints=None): super(LtsvTableFileLoader, self).__init__(file_path, quoting_flags, type_hints) self.encoding = None self._validator = FileValidator(file_path) self._logger = FileSourceLogger(self) self.__file = None def load(self): """ Extract tabular data as |TableData| instances from a LTSV file. |load_source_desc_file| :return: Loaded table data. |load_table_name_desc| =================== ======================================== Format specifier Value after the replacement =================== ======================================== ``%(filename)s`` |filename_desc| ``%(format_name)s`` ``"ltsv"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ======================================== :rtype: |TableData| iterator :raises pytablereader.InvalidHeaderNameError: If an invalid label name is included in the LTSV file. :raises pytablereader.DataError: If the LTSV data is invalid. """ self._validate() self._logger.logging_load() self.encoding = get_file_encoding(self.source, self.encoding) self._ltsv_input_stream = io.open(self.source, "r", encoding=self.encoding) for data_matrix in self._to_data_matrix(): formatter = SingleJsonTableConverterA(data_matrix) formatter.accept(self) return formatter.to_table_data() def _get_default_table_name_template(self): return tnt.FILENAME class LtsvTableTextLoader(LtsvTableLoader): """ `Labeled Tab-separated Values (LTSV) <http://ltsv.org/>`__ format text loader class. :param str text: LTSV text to load. .. py:attribute:: table_name Table name string. Defaults to ``%(format_name)s%(format_id)s``. """ def __init__(self, text, quoting_flags=None, type_hints=None): super(LtsvTableTextLoader, self).__init__(text, quoting_flags, type_hints) self._validator = TextValidator(text) self._logger = TextSourceLogger(self) def load(self): """ Extract tabular data as |TableData| instances from a LTSV text object. |load_source_desc_text| :return: Loaded table data. |load_table_name_desc| =================== ======================================== Format specifier Value after the replacement =================== ======================================== ``%(filename)s`` ``""`` ``%(format_name)s`` ``"ltsv"`` ``%(format_id)s`` |format_id_desc| ``%(global_id)s`` |global_id| =================== ======================================== :rtype: |TableData| iterator :raises pytablereader.InvalidHeaderNameError: If an invalid label name is included in the LTSV file. :raises pytablereader.DataError: If the LTSV data is invalid. """ self._validate() self._logger.logging_load() self._ltsv_input_stream = self.source.splitlines() for data_matrix in self._to_data_matrix(): formatter = SingleJsonTableConverterA(data_matrix) formatter.accept(self) return formatter.to_table_data() def _get_default_table_name_template(self): return "{:s}{:s}".format(tnt.FORMAT_NAME, tnt.FORMAT_ID)
Python
1
@@ -1560,18 +1560,18 @@ nvalid l -s t +s v item f
57431b251ec17a13e42cd29640da0fbe2e949cf2
Update version to 0.0.1
qdarkgraystyle/__init__.py
qdarkgraystyle/__init__.py
# -*- coding: utf-8 -*- # # The MIT License (MIT) # # Copyright (c) <2013-2014> <Colin Duquesnoy> # Copyright (c) <2017> <Michell Stuttgart> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # """ Initialise the QDarkGrayStyleSheet module when used with python. This modules provides a function to transparently load the stylesheets with the correct rc file. """ import logging import platform __version__ = '1.0.0' def _logger(): return logging.getLogger('qdarkgraystyle') def load_stylesheet(pyside=True): """ Loads the stylesheet. Takes care of importing the rc module. :param pyside: True to load the pyside rc file, False to load the PyQt rc file :return the stylesheet string """ # Smart import of the rc file if pyside: import qdarkgraystyle.pyside_style_rc else: import qdarkgraystyle.pyqt_style_rc # Load the stylesheet content from resources if not pyside: from PyQt4.QtCore import QFile, QTextStream else: from PySide.QtCore import QFile, QTextStream f = QFile(':qdarkgraystyle/style.qss') if not f.exists(): _logger().error('Unable to load stylesheet, file not found in ' 'resources') return '' else: f.open(QFile.ReadOnly | QFile.Text) ts = QTextStream(f) stylesheet = ts.readAll() if platform.system().lower() == 'darwin': # see issue #12 on github mac_fix = ''' QDockWidget::title { background-color: #31363b; text-align: center; height: 12px; } ''' stylesheet += mac_fix return stylesheet def load_stylesheet_pyqt5(): """ Loads the stylesheet for use in a pyqt5 application. :return the stylesheet string """ # Smart import of the rc file import qdarkgraystyle.pyqt5_style_rc # Load the stylesheet content from resources from PyQt5.QtCore import QFile, QTextStream f = QFile(':qdarkgraystyle/style.qss') if not f.exists(): _logger().error('Unable to load stylesheet, file not found in ' 'resources') return '' else: f.open(QFile.ReadOnly | QFile.Text) ts = QTextStream(f) stylesheet = ts.readAll() if platform.system().lower() == 'darwin': # see issue #12 on github mac_fix = ''' QDockWidget::title { background-color: #31363b; text-align: center; height: 12px; } ''' stylesheet += mac_fix return stylesheet
Python
0
@@ -1416,13 +1416,13 @@ = ' -1.0.0 +0.0.1 '%0A%0A%0A
d4ff0f80f065b6f3efa79a5cf17bc4e81a6bb6f2
Add TODO comment.
qipipe/staging/__init__.py
qipipe/staging/__init__.py
""" Image processing preparation. The staging package defines the functions used to prepare the study image files for import into XNAT, submission to the TCIA QIN collections and pipeline processing. """ # The ohsu module creates the OHSU QIN collections. # TODO - this should be a config item. from . import ohsu
Python
0
@@ -200,16 +200,23 @@ .%0A%22%22%22%0A%0A# + OHSU - The ohs
db608f092192aa9552e5e75096ab4cb11323a8df
use slice
chainercv/datasets/cub/cub_keypoint_dataset.py
chainercv/datasets/cub/cub_keypoint_dataset.py
import collections import numpy as np import os from chainercv.datasets.cub.cub_utils import CUBDatasetBase from chainercv import utils class CUBKeypointDataset(CUBDatasetBase): """`Caltech-UCSD Birds-200-2011`_ dataset with annotated keypoints. .. _`Caltech-UCSD Birds-200-2011`: http://www.vision.caltech.edu/visipedia/CUB-200-2011.html An index corresponds to each image. When queried by an index, this dataset returns the corresponding :obj:`img, keypoint, kp_mask`, a tuple of an image, keypoints and a keypoint mask that indicates visible keypoints in the image. The data type of the three elements are :obj:`float32, float32, bool`. If :obj:`return_mask = True`, :obj:`mask` will be returned as well, making the returned tuple to be of length four. :obj:`mask` is a :obj:`uint8` image which indicates the region of the image where a bird locates. keypoints are packed into a two dimensional array of shape :math:`(K, 2)`, where :math:`K` is the number of keypoints. Note that :math:`K=15` in CUB dataset. Also note that not all fifteen keypoints are visible in an image. When a keypoint is not visible, the values stored for that keypoint are undefined. The second axis corresponds to the :math:`y` and :math:`x` coordinates of the keypoints in the image. A keypoint mask array indicates whether a keypoint is visible in the image or not. This is a boolean array of shape :math:`(K,)`. A mask image of the bird shows how likely the bird is located at a given pixel. If the value is close to 255, more likely that a bird locates at that pixel. The shape of this array is :math:`(1, H, W)`, where :math:`H` and :math:`W` are height and width of the image respectively. Args: data_dir (string): Path to the root of the training data. If this is :obj:`auto`, this class will automatically download data for you under :obj:`$CHAINER_DATASET_ROOT/pfnet/chainercv/cub`. crop_bbox (bool): If true, this class returns an image cropped by the bounding box of the bird inside it. mask_dir (string): Path to the root of the mask data. If this is :obj:`auto`, this class will automatically download data for you under :obj:`$CHAINER_DATASET_ROOT/pfnet/chainercv/cub`. return_mask (bool): Decide whether to include mask image of the bird in a tuple served for a query. """ def __init__(self, data_dir='auto', crop_bbox=True, mask_dir='auto', return_mask=False): super(CUBKeypointDataset, self).__init__( data_dir=data_dir, crop_bbox=crop_bbox) self.return_mask = return_mask # load keypoint parts_loc_file = os.path.join(self.data_dir, 'parts', 'part_locs.txt') self.kp_dict = collections.OrderedDict() self.kp_mask_dict = collections.OrderedDict() for loc in open(parts_loc_file): values = loc.split() id_ = int(values[0]) - 1 if id_ not in self.kp_dict: self.kp_dict[id_] = [] if id_ not in self.kp_mask_dict: self.kp_mask_dict[id_] = [] # (y, x) order keypoint = [float(v) for v in values[3:1:-1]] kp_mask = bool(int(values[4])) self.kp_dict[id_].append(keypoint) self.kp_mask_dict[id_].append(kp_mask) def get_example(self, i): # this i is transformed to id for the entire dataset img = utils.read_image( os.path.join(self.data_dir, 'images', self.filenames[i]), color=True) keypoint = np.array(self.kp_dict[i], dtype=np.float32) kp_mask = np.array(self.kp_mask_dict[i], dtype=np.bool) if self.crop_bbox: # (y_min, x_min, y_max, x_max) bbox = self.bboxes[i].astype(np.int32) img = img[:, bbox[0]: bbox[2], bbox[1]: bbox[3]] keypoint[:, :2] = keypoint[:, :2] - np.array([bbox[0], bbox[1]]) if not self.return_mask: return img, keypoint, kp_mask mask = utils.read_image( os.path.join(self.mask_dir, self.filenames[i][:-4] + '.png'), dtype=np.uint8, color=False) if self.crop_bbox: mask = mask[:, bbox[0]: bbox[2], bbox[1]: bbox[3]] return img, keypoint, kp_mask, mask
Python
0.000033
@@ -4039,36 +4039,16 @@ %5D - -np.array(%5Bbbox%5B0%5D, bbox%5B1%5D%5D) +bbox%5B:2%5D %0A%0A
082b1f542501b6f3ae496fb3fe50c9e50d770b2f
Update godeps to github
git_gate.py
git_gate.py
#!/usr/bin/python """Merge gating script for git go projects.""" from __future__ import print_function import argparse import os import subprocess import sys from utility import ( print_now, temp_dir, ) class SubcommandError(Exception): def __init__(self, command, subcommand, error): self.command = command self.subcommand = subcommand self.error = error def __str__(self): return "Subprocess {} {} failed with code {}".format( self.command, self.subcommand, self.error.returncode) class SubcommandRunner(object): def __init__(self, command, environ=None): self.command = command self.subprocess_kwargs = {} if environ is not None: self.subprocess_kwargs["env"] = environ def __call__(self, subcommand, *args): cmdline = [self.command, subcommand] cmdline.extend(args) try: subprocess.check_call(cmdline, **self.subprocess_kwargs) except subprocess.CalledProcessError as e: raise SubcommandError(self.command, subcommand, e) def go_test(args, gopath): """Download, build and test a go package.""" goenv = dict(os.environ) goenv["GOPATH"] = gopath go = SubcommandRunner("go", goenv) git = SubcommandRunner("git") final_project = args.project if args.feature_branch: final_project = from_feature_dir(args.project) project_ellipsis = final_project + "/..." directory = os.path.join(gopath, "src", final_project) if args.tsv_path: print_now("Getting and installing godeps") go("get", "-v", "-d", "launchpad.net/godeps/...") go("install", "launchpad.net/godeps/...") if args.project_url: print_now("Cloning {} from {}".format(final_project, args.project_url)) git("clone", args.project_url, directory) if args.go_get_all and not (args.project_url and args.merge_url): print_now("Getting {} and dependencies using go".format(args.project)) go("get", "-v", "-d", "-t", project_ellipsis) os.chdir(directory) if args.project_ref: print_now("Switching repository to {}".format(args.project_ref)) git("checkout", args.project_ref) if args.merge_url: print_now("Merging {} ref {}".format(args.merge_url, args.merge_ref)) git("fetch", args.merge_url, args.merge_ref) git("merge", "--no-ff", "-m", "Merged " + args.merge_ref, "FETCH_HEAD") if args.go_get_all: print_now("Updating {} dependencies using go".format(args.project)) go("get", "-v", "-d", "-t", project_ellipsis) if args.dependencies: for dep in args.dependencies: print_now("Getting {} and dependencies using go".format(dep)) go("get", "-v", "-d", dep) if args.tsv_path: tsv_path = os.path.join(gopath, "src", final_project, args.tsv_path) print_now("Getting dependencies using godeps from {}".format(tsv_path)) godeps = SubcommandRunner(os.path.join(gopath, "bin", "godeps"), goenv) godeps("-u", tsv_path) go("build", project_ellipsis) go("test", project_ellipsis) def from_feature_dir(directory): """ For feature branches on repos that are versioned with gopkg.in, we need to do some special handling, since the test code expects the branch name to be appended to the reponame with a ".". However, for a feature branch, the branchname is different than the base gopkg.in branch. To account for this, we use the convention of base_branch_name.featurename, and thus this code can know that it needs to strip out the featurename when locating the code on disk. Thus, the feature branch off of gopkg.in/juju/charm.v6 would be a branch named charm.v6.myfeature, which should end up in $GOPATH/src/gokpg.in/juju/charm.v6 """ name = os.path.basename(directory) parts = name.split(".") if len(parts) == 3: return directory[:-len(parts[2]) - 1] return directory def parse_args(args=None): """Parse arguments for gating script.""" parser = argparse.ArgumentParser() project_group = parser.add_argument_group() project_group.add_argument( "--keep", action="store_true", help="Do not remove working dir after testing.") project_group.add_argument( "--project", required=True, help="Go import path of package to test.") project_group.add_argument( "--project-url", help="URL to git repository of package.") project_group.add_argument( "--project-ref", help="Branch name or tag to use as basis.") project_group.add_argument( "--feature-branch", action="store_true", help="Use special handling for pending feature branches.") merge_group = parser.add_argument_group() merge_group.add_argument( "--merge-url", help="URL to git repository to merge before testing.") merge_group.add_argument( "--merge-ref", default="HEAD", help="Branch name or tag to merge before testing.") dep_group = parser.add_mutually_exclusive_group() dep_group.add_argument( "--dependencies", nargs="+", help="Any number of package import paths needed for build or testing.") dep_group.add_argument( "--go-get-all", action="store_true", help="Go import path of package needed to for build or testing.") dep_group.add_argument( "--tsv-path", help="Path to dependencies.tsv file relative to project dir.") args = parser.parse_args(args) if args.project_url is None and not args.go_get_all: parser.error("Must supply either --project-url or --go-get-all") if args.feature_branch and args.go_get_all: parser.error("Cannot use --feature-branch and --go-get-all together") return args def main(): args = parse_args() with temp_dir(keep=args.keep) as d: try: go_test(args, d) except SubcommandError as err: print(err, file=sys.stderr) return 1 return 0 if __name__ == '__main__': sys.exit(main())
Python
0
@@ -1623,37 +1623,43 @@ -v%22, %22-d%22, %22 -launchpad.net +github.com/rogpeppe /godeps/...%22 @@ -1687,21 +1687,27 @@ %22, %22 -launchpad.net +github.com/rogpeppe /god
0d0041678b598e623b3479942c3dd4fc23c5ab23
Upgrade Pip
perfkitbenchmarker/linux_packages/pip.py
perfkitbenchmarker/linux_packages/pip.py
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module containing pip installation and cleanup functions. Uninstalling the pip package will also remove all python packages added after installation. """ from perfkitbenchmarker import vm_util def _Install(vm): """Install pip on the VM.""" vm.InstallPackages('python-pip') vm.RemoteCommand('mkdir -p {0} && pip freeze > {0}/requirements.txt'.format( vm_util.VM_TMP_DIR)) def YumInstall(vm): """Installs the pip package on the VM.""" vm.InstallEpelRepo() _Install(vm) def AptInstall(vm): """Installs the pip package on the VM.""" _Install(vm) def _Uninstall(vm): """Uninstalls the pip package on the VM.""" vm.RemoteCommand('pip freeze | grep --fixed-strings --line-regexp ' '--invert-match --file {0}/requirements.txt | ' 'xargs --no-run-if-empty sudo pip uninstall -y'.format( vm_util.VM_TMP_DIR)) def YumUninstall(vm): """Uninstalls the pip package on the VM.""" _Uninstall(vm) def AptUninstall(vm): """Uninstalls the pip package on the VM.""" _Uninstall(vm)
Python
0
@@ -889,16 +889,86 @@ n-pip')%0A + vm.RemoteCommand('sudo pip install -U pip') # Make pip upgrade pip%0A vm.Rem
43ce3068d8ca97aef018caf2b53eea018e7e447e
Fix typo
nupic/research/frameworks/pytorch/lr_scheduler.py
nupic/research/frameworks/pytorch/lr_scheduler.py
# Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2019, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # import copy from bisect import bisect from torch.optim.lr_scheduler import OneCycleLR, _LRScheduler class ScaledLR(_LRScheduler): """ Multiply the learning rate of each parameter group by a specific factor assigned to the epoch. This LR scheduler could be chained together with other schedulers. This is useful when scaling the LR to the batch size. .. seealso:: See https://arxiv.org/pdf/1706.02677.pdf :param optimizer: Wrapped optimizer :param lr_scale: dict mapping initial epoch to LR scale :param last_epoch: The index of last epoch. Default: -1. """ def __init__(self, optimizer, lr_scale, last_epoch=-1): self.lr_scale = lr_scale self.epochs = sorted(self.lr_scale.keys()) super().__init__(optimizer=optimizer, last_epoch=last_epoch) def get_lr(self): scale = self.lr_scale[self.epochs[bisect(self.epochs, self.last_epoch) - 1]] return map(lambda group: group["lr"] * scale, self.optimizer.param_groups) class ComposedLRScheduler(_LRScheduler): """ Learning scheduler composed of different LR schedulers and optimizer parameters to be effective once the number of epochs reaches the specified epoch milestone. Similar to :class:`torch.optim.lr_scheduler.MultiStepLR` but instead of just updating the LR at the epoch milestone it replaces the LR Scheduler and update other optimizer parameters. For example:: # Use "OneCycleLR" for the first 35 epochs and "StepLR" for the rest lr_scheduler = ComposedLRScheduler(schedulers={ 0: dict( lr_scheduler_class=torch.optim.lr_scheduler.OneCycleLR, lr_scheduler_args=dict( max_lr=6.0, div_factor=6, # initial_lr = 1.0 final_div_factor=4000, # min_lr = 0.00025 pct_start=4.0 / 35.0, epochs=35, steps_per_epoch=len(train_loader), anneal_strategy="linear", max_momentum=0.01, cycle_momentum=False, ), optimizer_args=dict( lr=0.1, weight_decay=0.0001, momentum=0.0, nesterov=False, ), ), 35: dict( lr_scheduler_class=torch.optim.lr_scheduler.StepLR, lr_scheduler_args=dict( gamma=0.1, step_size=10, ), optimizer_args=dict( lr=0.1, weight_decay=1e-04, momentum=0.9, dampening=0, nesterov=True ), ), }) :param optimizer: Wrapped optimizer :type optimizer: torch.optim.optimizer.Optimizer :param schedulers: dict mapping epoch milestones to LRScheduler and Optimizer parameters with the following fields: - "optimizer_args": Optimizer arguments to override - "lr_scheduler_class": LR Scheduler class - "lr_scheduler_args": LR Scheduler class constructor args in addition to optimizer :type schedulers: dict[int, dict] :param steps_per_epoch: Number of batches/steps per epoch. Must be specified when the LR is updated on every batch. Default 1 :type steps_per_epoch: int :param last_step: The index of last step. Default: -1. :type last_epoch: int """ def __init__(self, optimizer, schedulers, steps_per_epoch=1, last_epoch=-1): self.schedulers = schedulers self.steps_per_epoch = steps_per_epoch self.lr_scheduler = None self.active_milestone = None self.milestones = sorted(self.schedulers.keys()) assert len(self.milestones) > 0 super().__init__(optimizer=optimizer, last_epoch=last_epoch) def step(self, epoch=None): """ Step should be called after every batch update is OneCycleLR is one of the mapped LR Schedulers. Make sure to specify "steps_per_epoch" when """ # Get milestone for current step current_step = epoch if current_step is None: current_step = self.last_epoch + 1 current_epoch = current_step // self.steps_per_epoch current_batch = current_step % self.steps_per_epoch current_milestone = self.milestones[bisect(self.milestones, current_epoch) - 1] # Update LR scheduler and optimizer once the milestone changes if current_batch == 0 and self.active_milestone != current_milestone: self.active_milestone = current_milestone self._update_optimizer() self._update_lr_scheduler() elif isinstance(self.lr_scheduler, OneCycleLR): # Step every batch self.lr_scheduler.step() elif current_batch == 0 and self.lr_scheduler is not None: # Step once per epoch self.lr_scheduler.step() super().step(epoch) def get_lr(self): return self.lr_scheduler.get_lr() def _update_optimizer(self): params = self.schedulers[self.active_milestone] # Re-initialize optimizer using the default values args = copy.deepcopy(self.optimizer.defaults) # Override parameters for this milestone args.update(params.get("optimizer_args", {})) # Update parameters for all parameter groups for group in self.optimizer.param_groups: group.update(args) def _update_lr_scheduler(self): params = self.schedulers[self.active_milestone] lr_scheduler_class = params.get("lr_scheduler_class", None) if lr_scheduler_class is not None: lr_scheduler_args = params.get("lr_scheduler_args", None) for group in self.optimizer.param_groups: # reset initial_lr for new scheduler group.pop("initial_lr", None) self.lr_scheduler = lr_scheduler_class(self.optimizer, **lr_scheduler_args)
Python
0.99232
@@ -4948,17 +4948,17 @@ update i -s +f OneCycl
d25bfd459bfc03ea7a3a84a26d80b9db8036c168
Add new NAMES_TO_EDITIONS mapping
reporters_db/__init__.py
reporters_db/__init__.py
import datetime import json import os import six from .utils import suck_out_variations_only from .utils import suck_out_editions # noinspection PyBroadException def datetime_parser(dct): for k, v in dct.items(): if isinstance(v, six.string_types): try: dct[k] = datetime.datetime.strptime(v, "%Y-%m-%dT%H:%M:%S") except: pass return dct db_root = os.path.dirname(os.path.realpath(__file__)) with open(os.path.join(db_root, 'data', 'reporters.json')) as f: REPORTERS = json.load(f, object_hook=datetime_parser) with open(os.path.join(db_root, 'data', 'state_abbreviations.json')) as f: STATE_ABBREVIATIONS = json.load(f) with open(os.path.join(db_root, 'data', 'case_name_abbreviations.json')) as f: CASE_NAME_ABBREVIATIONS = json.load(f) VARIATIONS_ONLY = suck_out_variations_only(REPORTERS) EDITIONS = suck_out_editions(REPORTERS)
Python
0.000002
@@ -74,60 +74,58 @@ out_ -variations_only%0Afrom .utils import suck_out_editions +editions, suck_out_names, suck_out_variations_only %0A%0A%0A# @@ -404,16 +404,17 @@ rn dct%0A%0A +%0A db_root @@ -906,20 +906,66 @@ editions(REPORTERS)%0A +NAMES_TO_EDITIONS = suck_out_names(REPORTERS)%0A
bffdc451e8dc9df2219158349b60f082ab087a27
add proposal pk to serializer to give votes unique id
meinberlin/apps/budgeting/serializers.py
meinberlin/apps/budgeting/serializers.py
from rest_framework import serializers from adhocracy4.categories.models import Category from .models import Proposal class CategoryField(serializers.Field): def to_internal_value(self, category): if category: return Category.objects.get(pk=category) else: return None def to_representation(self, category): return {'id': category.pk, 'name': category.name} class ProposalSerializer(serializers.ModelSerializer): creator = serializers.SerializerMethodField() comment_count = serializers.SerializerMethodField() positive_rating_count = serializers.SerializerMethodField() negative_rating_count = serializers.SerializerMethodField() category = CategoryField() url = serializers.SerializerMethodField() class Meta: model = Proposal fields = ('budget', 'category', 'comment_count', 'created', 'creator', 'is_archived', 'name', 'negative_rating_count', 'positive_rating_count', 'url') read_only_fields = ('budget', 'category', 'comment_count', 'created', 'creator', 'is_archived', 'name', 'negative_rating_count', 'positive_rating_count', 'url') def get_creator(self, proposal): return proposal.creator.username def get_comment_count(self, proposal): if hasattr(proposal, 'comment_count'): return proposal.comment_count else: return 0 def get_positive_rating_count(self, proposal): if hasattr(proposal, 'positive_rating_count'): return proposal.positive_rating_count else: return 0 def get_negative_rating_count(self, proposal): if hasattr(proposal, 'negative_rating_count'): return proposal.negative_rating_count else: return 0 def get_url(self, proposal): return proposal.get_absolute_url()
Python
0
@@ -1017,16 +1017,22 @@ ', 'url' +, 'pk' )%0A @@ -1276,16 +1276,22 @@ 'url' +, 'pk' )%0A%0A d
4715f20655bd0cfac5f2cfad43ae09d600deb4bf
Raise exceptions on unimplemented statements
raco/myrial/interpreter.py
raco/myrial/interpreter.py
#!/usr/bin/python import raco.myrial.parser as parser import raco.algebra import raco.expression as colexpr import raco.catalog import collections import random import sys import types class ExpressionProcessor: '''Convert syntactic expressions into a relational algebra operation''' def __init__(self, symbols, db): self.symbols = symbols self.db = db def evaluate(self, expr): method = getattr(self, expr[0].lower()) return method(*expr[1:]) def alias(self, _id): return self.symbols[_id] def scan(self, relation_key, scheme): if not scheme: scheme = self.db.get_scheme(relation_key) rel = raco.catalog.Relation(relation_key, scheme) return raco.algebra.Scan(rel) def load(self, path, schema): raise NotImplementedError() def table(self, tuple_list, schema): raise NotImplementedError() def bagcomp(self, from_expression, where_clause, emit_clause): # Evaluate the nested expression to get a RA operator op = self.evaluate(from_expression) if where_clause: op = raco.algebra.Select(condition=where_clause, input=op) if emit_clause: op = raco.algebra.Apply(mappings=emit_clause, input=op) return op def distinct(self, expr): op = self.evaluate(expr) return raco.algebra.Distinct(input=op) def __process_bitop(self, _type, id1, id2): left = self.symbols[id1] right = self.symbols[id2] raise NotImplementedError() def unionall(self, id1, id2): left = self.symbols[id1] right = self.symbols[id2] # TODO: Figure out set/bag semantics here return raco.algebra.Union(left, right) def countall(self, _id): op = self.symbols[_id] return raco.algebra.GroupBy(groupinglist=[], aggregatelist=[colexpr.COUNT()], input=op) def intersect(self, id1, id2): raise NotImplementedError() def diff(self, id1, id2): raise NotImplementedError() def limit(self, _id, count): raise NotImplementedError() def cross(self, left_target, right_target): left = self.evaluate(left_target) right = self.evaluate(right_target) return raco.algebra.CrossProduct(left, right) def join(self, left_target, right_target): """Convert parser.JoinTarget arguments into a Join operation""" left = self.evaluate(left_target.expr) right = self.evaluate(right_target.expr) assert len(left_target.columns) == len(right_target.columns) def get_attribute_ref(column_ref, scheme, offset): """Convert a string or int into an attribute ref on the new table""" if type(column_ref) == types.IntType: index = column_ref else: index = scheme.getPosition(column_ref) return raco.expression.UnnamedAttributeRef(index + offset) left_scheme = left.scheme() left_refs = [get_attribute_ref(c, left_scheme, 0) for c in left_target.columns] right_scheme = right.scheme() right_refs = [get_attribute_ref(c, right_scheme, len(left_scheme)) for c in right_target.columns] join_conditions = [colexpr.EQ(x,y) for x,y in zip(left_refs, right_refs)] # Merge the join conditions into a big AND expression def andify(x,y): """Merge two column expressions with an AND""" return colexpr.AND(x,y) condition = reduce(andify, join_conditions[1:], join_conditions[0]) return raco.algebra.Join(condition, left, right) class StatementProcessor: '''Evaluate a list of statements''' def __init__(self, db=None): # Map from identifiers (aliases) to raco.algebra.Operation instances self.symbols = {} # Identifiers that the user has asked us to materialize # (via store, dump, etc.). Contains tuples of the form: # (id, raco.algebra.Operation) self.output_symbols = [] self.db = db self.ep = ExpressionProcessor(self.symbols, db) def evaluate(self, statements): '''Evaluate a list of statements''' for statement in statements: # Switch on the first tuple entry method = getattr(self, statement[0].lower()) method(*statement[1:]) def assign(self, _id, expr): '''Assign to a variable by modifying the symbol table''' op = self.ep.evaluate(expr) self.symbols[_id] = op def store(self, _id, relation_key): child_op = self.symbols[_id] op = raco.algebra.Store(relation_key, child_op) self.output_symbols.append((_id, op)) def dump(self, _id): child_op = self.symbols[_id] self.output_symbols.append((_id, child_op)) @property def output_symbols(self): return self.output_symbols def explain(self, _id): pass def describe(self, _id): pass def dowhile(self, statement_list, termination_ex): pass
Python
0.000002
@@ -5092,36 +5092,59 @@ , _id):%0A -pass +raise NotImplementedError() %0A%0A def descri @@ -5166,20 +5166,43 @@ -pass +raise NotImplementedError() %0A%0A de @@ -5262,9 +5262,32 @@ -pass +raise NotImplementedError() %0A
655134fa5bebdc86985fd40ec770153bf133f362
Handle case where value is an empty string
IPython/nbconvert/exporters/exporter.py
IPython/nbconvert/exporters/exporter.py
"""This module defines a base Exporter class. For Jinja template-based export, see templateexporter.py. """ from __future__ import print_function, absolute_import import io import os import copy import collections import datetime from IPython.config.configurable import LoggingConfigurable from IPython.config import Config from IPython import nbformat from IPython.utils.traitlets import MetaHasTraits, Unicode, List, TraitError from IPython.utils.importstring import import_item from IPython.utils import text, py3compat class ResourcesDict(collections.defaultdict): def __missing__(self, key): return '' class FilenameExtension(Unicode): """A trait for filename extensions.""" default_value = u'' info_text = 'a filename extension, beginning with a dot' def validate(self, obj, value): # cast to proper unicode value = super(FilenameExtension, self).validate(obj, value) # check that it starts with a dot if not value.startswith('.'): msg = "FileExtension trait '{}' does not begin with a dot: {!r}" raise TraitError(msg.format(self.name, value)) return value class Exporter(LoggingConfigurable): """ Class containing methods that sequentially run a list of preprocessors on a NotebookNode object and then return the modified NotebookNode object and accompanying resources dict. """ file_extension = FilenameExtension( '.txt', config=True, help="Extension of the file that should be written to disk" ) # MIME type of the result file, for HTTP response headers. # This is *not* a traitlet, because we want to be able to access it from # the class, not just on instances. output_mimetype = '' #Configurability, allows the user to easily add filters and preprocessors. preprocessors = List(config=True, help="""List of preprocessors, by name or namespace, to enable.""") _preprocessors = List() default_preprocessors = List(['IPython.nbconvert.preprocessors.coalesce_streams', 'IPython.nbconvert.preprocessors.SVG2PDFPreprocessor', 'IPython.nbconvert.preprocessors.ExtractOutputPreprocessor', 'IPython.nbconvert.preprocessors.CSSHTMLHeaderPreprocessor', 'IPython.nbconvert.preprocessors.RevealHelpPreprocessor', 'IPython.nbconvert.preprocessors.LatexPreprocessor', 'IPython.nbconvert.preprocessors.ClearOutputPreprocessor', 'IPython.nbconvert.preprocessors.ExecutePreprocessor', 'IPython.nbconvert.preprocessors.HighlightMagicsPreprocessor'], config=True, help="""List of preprocessors available by default, by name, namespace, instance, or type.""") def __init__(self, config=None, **kw): """ Public constructor Parameters ---------- config : config User configuration instance. """ with_default_config = self.default_config if config: with_default_config.merge(config) super(Exporter, self).__init__(config=with_default_config, **kw) self._init_preprocessors() @property def default_config(self): return Config() def from_notebook_node(self, nb, resources=None, **kw): """ Convert a notebook from a notebook node instance. Parameters ---------- nb : :class:`~IPython.nbformat.NotebookNode` Notebook node (dict-like with attr-access) resources : dict Additional resources that can be accessed read/write by preprocessors and filters. **kw Ignored (?) """ nb_copy = copy.deepcopy(nb) resources = self._init_resources(resources) if 'language' in nb['metadata']: resources['language'] = nb['metadata']['language'].lower() # Preprocess nb_copy, resources = self._preprocess(nb_copy, resources) return nb_copy, resources def from_filename(self, filename, resources=None, **kw): """ Convert a notebook from a notebook file. Parameters ---------- filename : str Full filename of the notebook file to open and convert. """ # Pull the metadata from the filesystem. if resources is None: resources = ResourcesDict() if not 'metadata' in resources or resources['metadata'] == '': resources['metadata'] = ResourcesDict() basename = os.path.basename(filename) notebook_name = basename[:basename.rfind('.')] resources['metadata']['name'] = notebook_name modified_date = datetime.datetime.fromtimestamp(os.path.getmtime(filename)) resources['metadata']['modified_date'] = modified_date.strftime(text.date_format) with io.open(filename, encoding='utf-8') as f: return self.from_notebook_node(nbformat.read(f, as_version=4), resources=resources, **kw) def from_file(self, file_stream, resources=None, **kw): """ Convert a notebook from a notebook file. Parameters ---------- file_stream : file-like object Notebook file-like object to convert. """ return self.from_notebook_node(nbformat.read(file_stream, as_version=4), resources=resources, **kw) def register_preprocessor(self, preprocessor, enabled=False): """ Register a preprocessor. Preprocessors are classes that act upon the notebook before it is passed into the Jinja templating engine. preprocessors are also capable of passing additional information to the Jinja templating engine. Parameters ---------- preprocessor : preprocessor """ if preprocessor is None: raise TypeError('preprocessor') isclass = isinstance(preprocessor, type) constructed = not isclass # Handle preprocessor's registration based on it's type if constructed and isinstance(preprocessor, py3compat.string_types): # Preprocessor is a string, import the namespace and recursively call # this register_preprocessor method preprocessor_cls = import_item(preprocessor) return self.register_preprocessor(preprocessor_cls, enabled) if constructed and hasattr(preprocessor, '__call__'): # Preprocessor is a function, no need to construct it. # Register and return the preprocessor. if enabled: preprocessor.enabled = True self._preprocessors.append(preprocessor) return preprocessor elif isclass and isinstance(preprocessor, MetaHasTraits): # Preprocessor is configurable. Make sure to pass in new default for # the enabled flag if one was specified. self.register_preprocessor(preprocessor(parent=self), enabled) elif isclass: # Preprocessor is not configurable, construct it self.register_preprocessor(preprocessor(), enabled) else: # Preprocessor is an instance of something without a __call__ # attribute. raise TypeError('preprocessor') def _init_preprocessors(self): """ Register all of the preprocessors needed for this exporter, disabled unless specified explicitly. """ self._preprocessors = [] # Load default preprocessors (not necessarly enabled by default). for preprocessor in self.default_preprocessors: self.register_preprocessor(preprocessor) # Load user-specified preprocessors. Enable by default. for preprocessor in self.preprocessors: self.register_preprocessor(preprocessor, enabled=True) def _init_resources(self, resources): #Make sure the resources dict is of ResourcesDict type. if resources is None: resources = ResourcesDict() if not isinstance(resources, ResourcesDict): new_resources = ResourcesDict() new_resources.update(resources) resources = new_resources #Make sure the metadata extension exists in resources if 'metadata' in resources: if not isinstance(resources['metadata'], ResourcesDict): resources['metadata'] = ResourcesDict(resources['metadata']) else: resources['metadata'] = ResourcesDict() if not resources['metadata']['name']: resources['metadata']['name'] = 'Notebook' #Set the output extension resources['output_extension'] = self.file_extension return resources def _preprocess(self, nb, resources): """ Preprocess the notebook before passing it into the Jinja engine. To preprocess the notebook is to apply all of the Parameters ---------- nb : notebook node notebook that is being exported. resources : a dict of additional resources that can be accessed read/write by preprocessors """ # Do a copy.deepcopy first, # we are never safe enough with what the preprocessors could do. nbc = copy.deepcopy(nb) resc = copy.deepcopy(resources) #Run each preprocessor on the notebook. Carry the output along #to each preprocessor for preprocessor in self._preprocessors: nbc, resc = preprocessor(nbc, resc) return nbc, resc
Python
0.001642
@@ -971,24 +971,34 @@ %0A if +value and not value.st
b8fb50f9f1b7c85811ed3550418eae2b72d30faf
Add index
radar/radar/models/logs.py
radar/radar/models/logs.py
from datetime import datetime from sqlalchemy import event, DDL, Column, Integer, DateTime, String, text from sqlalchemy.dialects import postgresql from sqlalchemy.orm import relationship from radar.database import db class Log(db.Model): __tablename__ = 'logs' id = Column(Integer, primary_key=True) date = Column(DateTime(timezone=True), nullable=False, default=datetime.utcnow, server_default=text('now()')) type = Column(String, nullable=False) user_id = Column(Integer) user = relationship('User', primaryjoin='User.id == Log.user_id', foreign_keys=[user_id]) table_name = Column(String) original_data = Column(postgresql.JSONB) new_data = Column(postgresql.JSONB) statement = Column(String) data = Column(postgresql.JSONB) def log_changes(cls): event.listen(cls.__table__, 'after_create', DDL(""" CREATE TRIGGER {0}_log_changes AFTER INSERT OR UPDATE OR DELETE ON {0} FOR EACH ROW EXECUTE PROCEDURE log_changes() """.format(cls.__tablename__))) return cls event.listen(db.Model.metadata, 'before_create', DDL(""" CREATE OR REPLACE FUNCTION log_changes() RETURNS TRIGGER AS $body$ DECLARE user_id INTEGER; BEGIN BEGIN user_id = current_setting('radar.user_id'); EXCEPTION WHEN OTHERS THEN user_id = NULL; END; IF (TG_OP = 'UPDATE') THEN INSERT INTO logs ( type, user_id, table_name, original_data, new_data, statement ) VALUES ( 'UPDATE', user_id, TG_TABLE_NAME, row_to_json(OLD)::jsonb, row_to_json(NEW)::jsonb, current_query() ); RETURN NEW; ELSIF (TG_OP = 'DELETE') THEN INSERT INTO logs ( type, user_id, table_name, original_data, statement ) VALUES ( 'DELETE', user_id, TG_TABLE_NAME, row_to_json(OLD)::jsonb, current_query() ); RETURN OLD; ELSIF (TG_OP = 'INSERT') THEN INSERT INTO logs ( type, user_id, table_name, new_data, statement ) VALUES ( 'INSERT', user_id, TG_TABLE_NAME, row_to_json(NEW)::jsonb, current_query() ); RETURN NEW; ELSE RAISE WARNING '[log_action] Unknown action: %% at %%', TG_OP, now(); RETURN NULL; END IF; END; $body$ LANGUAGE plpgsql """)) event.listen(db.Model.metadata, 'after_drop', DDL(""" DROP FUNCTION IF EXISTS log_changes() """))
Python
0.000017
@@ -98,16 +98,23 @@ ng, text +, Index %0Afrom sq @@ -782,16 +782,50 @@ JSONB)%0A%0A +Index('logs_date_idx', Log.date)%0A%0A %0Adef log
fcafa9704003c300be3486584773a0b13a057460
Fix Flakes Errors: openspending/ui/test/functional/test_dimension.py
openspending/ui/test/functional/test_dimension.py
openspending/ui/test/functional/test_dimension.py
import json from .. import ControllerTestCase, url, helpers as h from openspending.ui.lib.helpers import member_url from openspending.model import Dataset, CompoundDimension, meta as db class TestDimensionController(ControllerTestCase): def setup(self): super(TestDimensionController, self).setup() h.load_fixture('cra') h.clean_and_reindex_solr() self.cra = Dataset.by_name('cra') for dimension in self.cra.dimensions: if isinstance(dimension, CompoundDimension) and \ dimension.name == 'cofog1': members = list(dimension.members( dimension.alias.c.name == '3', limit=1)) self.member = members.pop() break def test_index(self): response = self.app.get(url(controller='dimension', dataset='cra', action='index')) h.assert_true('Paid by' in response, "'Paid by' not in response!") h.assert_true('Paid to' in response, "'Paid to' not in response!") h.assert_true('Programme Object Group' in response, "'Programme Object Group' not in response!") h.assert_true('CG, LG or PC' in response, "'CG, LG or PC' not in response!") def test_index_json(self): response = self.app.get(url(controller='dimension', dataset='cra', action='index', format='json')) obj = json.loads(response.body) h.assert_equal(len(obj), 12) h.assert_equal(obj[0]['key'], 'cap_or_cur') h.assert_equal(obj[0]['label'], 'CG, LG or PC') def test_index_csv(self): h.skip("CSV dimension index not yet implemented!") def test_view(self): response = self.app.get(url(controller='dimension', dataset='cra', action='view', dimension='from')) h.assert_true('Paid by' in response, "'Paid by' not in response!") h.assert_true('The entity that the money was paid from.' in response.body, "'The entity that the money was paid from.' not in response!") def test_view_json(self): response = self.app.get(url(controller='dimension', dataset='cra', action='view', dimension='from', format='json')) obj = json.loads(response.body) h.assert_equal(obj['key'], 'from') def test_distinct_json(self): response = self.app.get(url(controller='dimension', dataset='cra', action='distinct', dimension='from', format='json')) obj = json.loads(response.body)['results'] assert len(obj) == 5, obj assert obj[0]['name'] == 'Dept032', obj[0] q = 'Ministry of Ju' response = self.app.get(url(controller='dimension', dataset='cra', action='distinct', dimension='from', format='json', q=q)) obj = json.loads(response.body)['results'] assert len(obj) == 1, obj assert obj[0]['label'].startswith(q), obj[0] def test_view_csv(self): h.skip("CSV dimension view not yet implemented!") def test_view_member_html(self): url_ = member_url(self.cra.name, 'cofog1', self.member) result = self.app.get(url_) h.assert_equal(result.status, '200 OK') # Links to entries json and csv and entries listing h.assert_true('<a href="/cra/cofog1/3.json">' in result) h.assert_true('<a href="/cra/cofog1/3/entries">Search</a>' in result) def test_view_member_json(self): url_ = member_url(self.cra.name, 'cofog1', self.member, format='json') result = self.app.get(url_) h.assert_equal(result.status, '200 OK') h.assert_equal(result.content_type, 'application/json') json_data = json.loads(result.body) h.assert_equal(json_data['name'], u'3') h.assert_equal(json_data['label'], self.member['label']) h.assert_equal(json_data['id'], self.member['id']) def test_view_entries_json(self): url_ = url(controller='dimension', action='entries', format='json', dataset=self.cra.name, dimension='cofog1', name=self.member['name']) result = self.app.get(url_) result = result.follow() h.assert_equal(result.status, '200 OK') h.assert_equal(result.content_type, 'application/json') json_data = json.loads(result.body).get('results') h.assert_equal(len(json_data), 5) def test_view_entries_csv(self): url_ = url(controller='dimension', action='entries', format='csv', dataset=self.cra.name, dimension='cofog1', name=self.member['name']) result = self.app.get(url_) result = result.follow() h.assert_equal(result.status, '200 OK') h.assert_equal(result.content_type, 'text/csv') h.assert_true('amount,' in result.body) # csv headers h.assert_true('id,' in result.body) # csv headers def test_view_entries_html(self): url_ = url(controller='dimension', action='entries', format='html', dataset=self.cra.name, dimension='cofog1', name=self.member['name']) result = self.app.get(url_) h.assert_equal(result.status, '200 OK') h.assert_equal(result.content_type, 'text/html') # Content is filled in by client-side code.
Python
0.000046
@@ -171,20 +171,8 @@ sion -, meta as db %0A%0A%0Ac
628655791fc41e5496dd8bed05beb232d7c0d104
Store calculated intermediate values and use them for inheritance
UM/Settings/SettingOverrideDecorator.py
UM/Settings/SettingOverrideDecorator.py
# Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. from UM.Scene.SceneNodeDecorator import SceneNodeDecorator from UM.Signal import Signal, SignalEmitter from UM.Application import Application from copy import deepcopy ## A decorator that can be used to override individual setting values. class SettingOverrideDecorator(SceneNodeDecorator, SignalEmitter): def __init__(self): super().__init__() self._settings = {} self._setting_values = {} settingAdded = Signal() settingRemoved = Signal() settingValueChanged = Signal() def getAllSettings(self): return self._settings def getAllSettingValues(self): values = {} settings = Application.getInstance().getMachineManager().getActiveMachineInstance().getMachineDefinition().getAllSettings(include_machine = False) for setting in settings: key = setting.getKey() if key in self._setting_values: values[key] = setting.parseValue(self._setting_values[key]) continue values[key] = setting.getDefaultValue(self) return values def addSetting(self, key): instance = Application.getInstance().getMachineManager().getActiveMachineInstance() setting = instance.getMachineDefinition().getSetting(key) if not setting: return self._settings[key] = setting self.settingAdded.emit() Application.getInstance().getController().getScene().sceneChanged.emit(self.getNode()) def setSettingValue(self, key, value): if key not in self._settings: return self._setting_values[key] = value self.settingValueChanged.emit(self._settings[key]) Application.getInstance().getController().getScene().sceneChanged.emit(self.getNode()) def getSetting(self, key): if key not in self._settings: parent = self._node.getParent() # It could be that the parent does not have a decoration but it's parent parent does. while parent is not None: if parent.hasDecoration("getSetting"): return parent.callDecoration("getSetting") else: parent = parent.getParent() else: return self._settings[key] def getSettingValue(self, key): if key not in self._settings: if self.getNode().callDecoration("getProfile"): return self.getNode().callDecoration("getProfile").getSettingValue(key) return Application.getInstance().getMachineManager().getActiveProfile().getSettingValue(key) setting = self._settings[key] if key in self._setting_values: return setting.parseValue(self._setting_values[key]) return setting.getDefaultValue(self) def removeSetting(self, key): if key not in self._settings: return del self._settings[key] if key in self._setting_values: del self._setting_values[key] self.settingRemoved.emit() Application.getInstance().getController().getScene().sceneChanged.emit(self.getNode())
Python
0
@@ -513,24 +513,56 @@ alues = %7B%7D%0A%0A + self._temp_values = %7B%7D%0A%0A settingA @@ -743,24 +743,35 @@ f):%0A +self._temp_ values = %7B%7D%0A @@ -1047,32 +1047,43 @@ +self._temp_ values%5Bkey%5D = se @@ -1164,16 +1164,27 @@ +self._temp_ values%5Bk @@ -1216,24 +1216,90 @@ alue(self)%0A%0A + values = self._temp_values%0A self._temp_values = %7B%7D%0A retu @@ -2565,32 +2565,65 @@ n self._settings + and key not in self._temp_values :%0A if @@ -2855,32 +2855,112 @@ tingValue(key)%0A%0A + if key in self._temp_values:%0A return self._temp_values%5Bkey%5D%0A%0A setting
9df2420f152e48a0e99598220e4560fe25c9fd36
add an argument to TblTreeEntries.__init__()
AlphaTwirl/HeppyResult/TblTreeEntries.py
AlphaTwirl/HeppyResult/TblTreeEntries.py
# Tai Sakuma <tai.sakuma@cern.ch> from ..mkdir_p import mkdir_p from ..listToAlignedText import listToAlignedText import os import ROOT ##__________________________________________________________________|| class TblTreeEntries(object): def __init__(self, analyzerName, fileName, treeName, outPath): self.analyzerName = analyzerName self.fileName = fileName self.treeName = treeName self.outPath = outPath self._rows = [['component', 'n']] def begin(self): pass def read(self, component): inputPath = os.path.join(getattr(component, self.analyzerName).path, self.fileName) file = ROOT.TFile.Open(inputPath) tree = file.Get(self.treeName) row = [component.name, tree.GetEntries()] self._rows.append(row) def end(self): f = self._open(self.outPath) f.write(listToAlignedText(self._rows)) self._close(f) def _open(self, path): mkdir_p(os.path.dirname(path)) return open(path, 'w') def _close(self, file): file.close() ##__________________________________________________________________||
Python
0.000551
@@ -295,16 +295,34 @@ outPath +, columnName = 'n' ):%0A @@ -490,19 +490,26 @@ onent', -'n' +columnName %5D%5D%0A%0A
1fcaeb69048a5124ad2a74ce8761685e2d76a360
normalize elem start in wp_parser.py
wp_parser.py
wp_parser.py
""" parse data from huge Wikipedia XML Dump """ __author__ = "siznax" __verion__ = "29 Sep 2015" class WPParser: MAX_ELEM_BYTES = 1024**2 _ebfr = "" _found_end = False _found_start = False _sbfr = "" elem = "" elems_found = 0 elems_processed = 0 def __init__(self, start="<page>", end="</page>"): self.start = start self.end = end def process(self, elem): """override this method to process elements""" pass def parse(self, chunk): """scan uncompressed chunk for start/end""" if len(self.elem) > self.MAX_ELEM_BYTES: print self.elem[:1024] raise RuntimeError("elem grew too big!") for char in chunk: self._scan(char) def _scan(self, char): if self._found_start: self.elem += char self._find_end(char) else: self._find_start(char) if self._found_end: self._found_end = False self.process(self.elem) self.elems_processed += 1 self.elem = "" def _find_start(self, char): if len(self._sbfr) == len(self.start): if self._sbfr == self.start: if self._found_start: raise RuntimeError("already found start!") self._found_start = True self.elem = self.start + "\n" self.elems_found += 1 # print self._sbfr self._sbfr = "" if self._sbfr: self._sbfr += char if char == self.start[0]: self._sbfr = char def _find_end(self, char): if len(self._ebfr) == len(self.end): if self._ebfr == self.end: if self._found_end: raise RuntimeError("already found end!") self._found_end = True self._found_start = False # print self._ebfr self._ebfr = "" if self._ebfr: self._ebfr += char if char == self.end[0]: self._ebfr = char
Python
0.000013
@@ -7,16 +7,36 @@ rse data + (naive & wreckless) from hu @@ -1399,16 +1399,23 @@ f.elem = + %22 %22 + self.st
6b38f0c872e17ec9869f0c62c886fbbc133a9599
Customize tag rendering
barpyrus.py
barpyrus.py
import sys import contextlib from barpyrus import hlwm from barpyrus import widgets as W from barpyrus.core import Theme from barpyrus import lemonbar from barpyrus import conky @contextlib.contextmanager def maybe_orange(match, predicate='> 90'): with cg.if_('match ${%s} %s' % (match, predicate)): cg.fg('#ffc726') yield cg.fg(None) hc = hlwm.connect() monitor = sys.argv[1] if len(sys.argv) >= 2 else 0 x, y, monitor_w, monitor_h = hc.monitor_rect(monitor) height = 16 width = monitor_w hc(['pad', str(monitor), str(height)]) cg = conky.ConkyGenerator(lemonbar.textpainter()) ## CPU / RAM / df with cg.temp_fg('#9fbc00'): cg.symbol(0xe026) cg.space(5) for cpu in '1234': with maybe_orange('cpu cpu%s' % cpu): cg.var('cpu cpu' + cpu) cg.text('% ') with cg.temp_fg('#9fbc00'): cg.symbol(0xe021) cg.space(5) with maybe_orange('memperc'): cg.var('memperc') cg.text('% ') with cg.temp_fg('#9fbc00'): cg.symbol(0x00e1bb) cg.space(5) with maybe_orange('fs_used_perc /'): cg.var('fs_used_perc /') cg.text('% ') ## Network wifi_icons = [0xe217, 0xe218, 0xe219, 0xe21a] wifi_delta = 100 / len(wifi_icons) with cg.if_('up tun0'): with cg.temp_fg('#ff0000'): cg.symbol(0xe0a6) for iface in ['eth', 'wlan', 'ppp0']: with cg.if_('up %s' % iface), cg.if_('match "${addr %s}" != "No Address"' % iface): with cg.temp_fg('#9fbc00'): if iface == 'wlan': with cg.cases(): for i, icon in enumerate(wifi_icons[:-1]): cg.case('match ${wireless_link_qual_perc wlan} < %d' % ((i+1)*wifi_delta)) cg.symbol(icon) cg.else_() cg.symbol(wifi_icons[-1]) # icon for 100 percent cg.space(5) elif iface == 'eth': cg.symbol(0xe0af) elif iface == 'ppp0': cg.symbol(0xe0f3) else: assert False if iface == 'wlan': cg.var('wireless_essid') if iface != 'ppp0': cg.space(5) cg.var('addr %s' % iface) cg.space(5) with cg.temp_fg('#9fbc00'): cg.symbol(0xe13c) cg.var('downspeedf %s' % iface) cg.text('K ') cg.var('totaldown %s' % iface) cg.space(5) with cg.temp_fg('#9fbc00'): cg.symbol(0xe13b) cg.var('upspeedf %s' % iface) cg.text('K ') cg.var('totalup %s' % iface) cg.space(5) ## Battery # first icon: 0 percent # last icon: 100 percent bat_icons = [ 0xe242, 0xe243, 0xe244, 0xe245, 0xe246, 0xe247, 0xe248, 0xe249, 0xe24a, 0xe24b, ] bat_delta = 100 / len(bat_icons) with cg.if_('existing /sys/class/power_supply/BAT0'): cg.fg('#9fbC00') with cg.if_('match "$battery" != "discharging $battery_percent%"'): cg.symbol(0xe0db) with cg.cases(): for i, icon in enumerate(bat_icons[:-1]): cg.case('match $battery_percent < %d' % ((i+1)*bat_delta)) cg.symbol(icon) cg.else_() cg.symbol(bat_icons[-1]) # icon for 100 percent cg.fg(None) cg.space(5) with maybe_orange('battery_percent', '< 10'): cg.var('battery_percent') cg.text('% ') cg.var('battery_time') cg.space(5) with cg.temp_fg('#9fbc00'): cg.symbol(0xe015) cg.space(5) cg.var('time %d. %B, %H:%M') conky_config = { 'update_interval': '5', } # Widget configuration: bar = lemonbar.Lemonbar(geometry = (x,y,width,height)) bar.widget = W.ListLayout([ W.RawLabel('%{l}'), hlwm.HLWMTags(hc, monitor, tag_renderer = hlwm.underlined_tags), W.RawLabel('%{c}'), hlwm.HLWMWindowTitle(hc), W.RawLabel('%{r}'), conky.ConkyWidget(text=str(cg), config=conky_config), ])
Python
0
@@ -114,16 +114,25 @@ rt Theme +, Painter %0Afrom ba @@ -362,16 +362,768 @@ None)%0A%0A%0A +def underlined_tags(taginfo, painter):%0A if taginfo.empty:%0A return%0A painter.set_flag(painter.underline, True if taginfo.visible else False)%0A painter.fg('#a0a0a0' if taginfo.occupied else '#909090')%0A if taginfo.urgent:%0A painter.ol('#FF7F27')%0A painter.fg('#FF7F27')%0A painter.set_flag(Painter.underline, True)%0A painter.bg('#57000F')%0A elif taginfo.here:%0A painter.fg('#ffffff')%0A painter.ol(taginfo.activecolor if taginfo.focused else '#ffffff')%0A painter.bg(taginfo.emphbg)%0A else:%0A painter.ol('#454545')%0A painter.space(3)%0A painter += taginfo.name%0A painter.space(3)%0A painter.bg()%0A painter.ol()%0A painter.set_flag(painter.underline, False)%0A painter.space(2)%0A%0A%0A hc = hlw @@ -4436,24 +4436,17 @@ renderer - = hlwm. += underlin
daf61b55beea74b516a249db8e963b803621cab8
Version bump (0.3.3)
requestbuilder/__init__.py
requestbuilder/__init__.py
# Copyright (c) 2012-2015, Eucalyptus Systems, Inc. # # Permission to use, copy, modify, and/or distribute this software for # any purpose with or without fee is hereby granted, provided that the # above copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import argparse import operator import os.path import subprocess __version__ = '0.3.1' if '__file__' in globals(): # Check if this is a git repo; maybe we can get more precise version info try: repo_path = os.path.join(os.path.dirname(__file__), '..') env = {'GIT_DIR': os.path.join(repo_path, '.git')} git = subprocess.Popen(['git', 'describe'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) git.wait() git.stderr.read() if git.returncode == 0: __version__ = git.stdout.read().strip().lstrip('v') if type(__version__).__name__ == 'bytes': __version__ = __version__.decode() except: # Not really a bad thing; we'll just use what we had pass ########## SINGLETONS ########## # Indicates a parameter that should be sent to the server without a value. # Contrast this with empty strings, with are omitted from requests entirely. EMPTY = type('EMPTY', (), {'__repr__': lambda self: "''", '__str__': lambda self: ''})() # Getters used for arg routing PARAMS = operator.attrgetter('params') SESSION = operator.attrgetter('service.session_args') ########## ARG CLASSES ########## class Arg(object): ''' A command line argument. Positional and keyword arguments to __init__ are the same as those to argparse.ArgumentParser.add_argument. The value specified by the 'dest' argument (or the one inferred if none is specified) is used as the name of the parameter to server queries unless send=False is also supplied. ''' def __init__(self, *pargs, **kwargs): if 'route_to' in kwargs: if isinstance(kwargs['route_to'], (list, set, tuple)): self.routes = tuple(kwargs.pop('route_to')) else: self.routes = (kwargs.pop('route_to'),) else: self.routes = None self.pargs = pargs self.kwargs = kwargs def __eq__(self, other): if isinstance(other, Arg): return sorted(self.pargs) == sorted(other.pargs) return False class MutuallyExclusiveArgList(list): ''' Pass Args as positional arguments to __init__ to create a set of command line arguments that are mutually exclusive. If you also call the required() method then the user must specify exactly one of them. The recommended way to do that is via chaining it from __init__. Examples: MutuallyExclusiveArgList(Arg('--spam'), Arg('--eggs')) MutuallyExclusiveArgList(Arg('--spam'), Arg('--eggs')).required() ''' def __init__(self, *args): if len(args) > 0 and isinstance(args[0], bool): self.is_required = args[0] list.__init__(self, args[1:]) else: self.is_required = False list.__init__(self, args) def required(self): self.is_required = True return self class Filter(object): ''' An AWS query API filter. For APIs that support filtering by name/value pairs, adding a Filter to a request's list of filters will allow a user to send an output filter to the server with '--filter name=value' at the command line. The value specified by the 'dest' argument (or the 'name' argument, if none is given) is used as the name of a filter in queries. ''' def __init__(self, name, type=str, choices=None, help=None): self.name = name self.type = type self.choices = choices self.help = help def matches_argval(self, argval): return argval.startswith(self.name + '=') def convert(self, argval): ''' Given an argument to --filter of the form "<name>=<value>", convert the value to the appropriate type by calling self.type on it, then return a (name, converted_value) tuple. If the value's type conversion doesn't work then an ArgumentTypeError will result. If the conversion succeeds but does not appear in self.choices when it exists, an ArgumentTypeError will result as well. ''' if '=' not in argval: msg = "filter '{0}' must have format 'NAME=VALUE'".format(argval) raise argparse.ArgumentTypeError(msg) (name, value_str) = argval.split('=', 1) try: value = self.type(value_str) except ValueError: msg = "{0} filter value '{1}' must have type {2}".format( name, value_str, self.type.__name__) raise argparse.ArgumentTypeError(msg) if self.choices and value not in self.choices: msg = "{0} filter value '{1}' must match one of {2}".format( name, value, ', '.join([str(choice) for choice in self.choices])) raise argparse.ArgumentTypeError(msg) if value == '': value = EMPTY return (name, value) class GenericTagFilter(Filter): ''' A filter that accepts "tag:<key>=<value>" values ''' def matches_argval(self, argval): return argval.startswith('tag:') and '=' in argval
Python
0
@@ -852,17 +852,17 @@ = '0.3. -1 +3 '%0A%0A%0Aif '
dab6d148e90ebf99287db870387361a53d4bef14
add magic for 'clone_gist'
lib/plugins/git_completers.py
lib/plugins/git_completers.py
""" git_completers WARNING: most of this stuff fails silently and ipython mangles scope on the hooks you register, so for instance DO NOT attempt to abstract local variables out of your handle functions unless you really know what you're doing. no idea wtf ipython does that or how to work around it. TODO: smarter with ?http://pypi.python.org/pypi/gitinfo/0.0.2 ? depends on GitPython, which is largeish TODO: 'git diff'' should use branch completion AND fs-completion TODO: completion for e.g. 'git push origin XYZ' TODO: 'git push' should complete from the names of the remotes TODO: consider .gitignore ? TODO: 'git add' could be smarter if i could detect rebase state TODO: format-patch isn't completed useful- __IPYTHON__.shell.Completer.custom_completers.regexs """ import os import IPython.ipapi from smashlib.util import report, set_complete from smashlib.smash_plugin import SmashPlugin def uncomitted_files_completer(self, event): """ awkward, but cannot find a better way to do this.. """ lines = os.popen('git status|grep modified').readlines() sys_output = [ x.strip()[2:].split()[-1] for x in lines ] return sys_output def fsc_utfc(self, event): """ filesystem-completer + untracked_files-completer """ return filesystem_completer(self, event) + \ untracked_files_completer(self,event) def untracked_files_completer(self, event): lines = os.popen('git status').readlines() begin = None for line in lines: if 'Untracked files:' in line: begin = lines.index(line) if begin is None: return [] lines = lines[begin:] lines = [line for line in lines if line.startswith('#\t')] lines = [ line.strip().replace('\t','')[1:] for line in lines ] return lines def reset_completer(self, event): options = '--patch --soft --mixed --hard --merge --keep'.split() return options def filesystem_completer(self, event): """ awkward, but cannot find a better way to do this.. """ data = event.line.split()[2:] # e.g. 'git',' mv', './<tab>' # no info: complete from the contents of the wd if not data: return os.listdir(os.getcwd()) # started typing something.. leverage ipython's completion else: data = data[-1] base = __IPYTHON__.complete(data) r = [ x for x in base if os.path.exists(x) ] return r def remote_branches_completer(self, event): all_branches_cmd = 'git branch -a|grep remote' tmp = os.popen(all_branches_cmd).readlines() tmp = [x.split()[0].strip() for x in tmp if x] tmp = [x.replace("remotes/",'') for x in tmp ] return tmp def local_branches(self, event): if event.symbol.startswith('origin'): return remote_branches_completer(self, event) all_branches_cmd = 'git branch -a|grep -v remote' return ['HEAD', 'origin/'] + \ filter(None, map(lambda x: x.replace("*","").strip(), os.popen(all_branches_cmd).readlines())) def subcommands(*args, **kargs): # WOW.. be careful! # this doesn't work if you take GIT_SUBCOMMANDS out of the function. # ipython is somehow, for some reason, mangling scope for the handlers # this is particularly nasty because it seems it fails totally silently GIT_SUBCOMMANDS = ['add', 'bisect', 'branch', 'checkout', 'cherry-pick', 'clone', 'commit', 'diff', 'fetch', 'grep', 'init', 'log', 'merge', 'mv', 'pull', 'push', 'rebase', 'reset', 'rm', 'show', 'status', 'tag'] return GIT_SUBCOMMANDS def fsc2(self, event): """ better file system completer that uses the working directory. the other code should probably use this.. """ return __IPYTHON__.Completer.file_matches(event.symbol) class Plugin(SmashPlugin): GIT_ALIASES = [ 'grm git rebase -i origin/master', 'grc git rebase --continue', 'gra git rebase --abort', 'checkout git checkout', 'rebase git rebase -i', 'gs git show --color', 'gc git commit', 'gd git diff --color', 'st git status', 'co git checkout', ('vlog git log --graph --date-order --date=relative --color'),] def install(self): from smashlib import ALIASES as aliases [ aliases.add(x, '__git_plugin__') for x in self.GIT_ALIASES ] aliases.install() report.git_completer('setting prompt to use git vcs') __IPYTHON__._cgb = lambda : os.popen("current_git_branch").read().strip() set_complete(local_branches, 'git checkout [\S]*$') set_complete(fsc2, 'git checkout [\S]* ') set_complete(fsc2, 'git rm') set_complete(subcommands, 'git [\s]*[\S]*$') set_complete(filesystem_completer, 'git mv') set_complete(uncomitted_files_completer, 'git commit') set_complete(uncomitted_files_completer, 'gd') set_complete(uncomitted_files_completer, 'git diff') set_complete(local_branches, 'git merge') set_complete(local_branches, 'git log') set_complete(reset_completer,'git reset') set_complete(local_branches, 'git reset --.* ') set_complete(local_branches, 'git rebase .* ') #set_complete(lambda self, event: git.local_branches, 'git push') # TODO: .. only need file-system if in the middle of rebase .. set_complete(fsc_utfc, 'git add')
Python
0.000002
@@ -869,16 +869,48 @@ on.ipapi +%0Afrom IPython.macro import Macro %0A%0Afrom s @@ -997,16 +997,258 @@ Plugin%0A%0A +def clone_gist(parameter_s=''):%0A cmd = 'git clone git@gist.github.com:%7B0%7D.git %7B1%7D'%0A args = parameter_s.split()%0A assert args%0A if len(args)==1: args+=%5B''%5D%0A cmd = cmd.format(*args)%0A print '%5Ct', cmd%0A __IPYTHON__.system(cmd)%0A %0Adef unc @@ -5906,12 +5906,68 @@ 'git add')%0A + self.contribute_magic('clone_gist', clone_gist)%0A
0de57c0c14362d2f9c40975326c8cb1bf792e2a0
make compiled dir
binding.gyp
binding.gyp
{ 'targets': [ { 'target_name': 'chimera', 'sources': [ 'src/top.cc', 'src/cookiejar.cc', 'src/chimera.cc', 'src/browser.cc' ], 'conditions': [ ['OS=="mac"', { 'include_dirs': [ 'qt/include', 'qt/include/QtCore', 'qt/include/QtGui', 'qt/include/QtNetwork', 'qt/include/QtWebkit' ], 'libraries': [ '-framework AppKit', '../qt/lib/libQtGui.a', '../qt/lib/libQtCore.a', '../qt/lib/libQtNetwork.a', '../qt/lib/libQtWebKit.a', '../qt/lib/libjscore.a', '../qt/lib/libwebcore.a', '../qt/lib/libQtXml.a' ], }], ['OS=="linux"', { 'include_dirs': [ 'qt/include', 'qt/include/QtCore', 'qt/include/QtGui', 'qt/include/QtNetwork', 'qt/include/QtWebKit' ], 'libraries': [ '../deps/openssl/linux/lib/libssl.a', '../deps/openssl/linux/lib/libcrypto.a', '../qt/lib/libQtCore.a', '../qt/lib/libQtGui.a', '../qt/lib/libQtXml.a', '../qt/lib/libQtNetwork.a', '../qt/lib/libQtWebKit.a', '../qt/lib/libwebcore.a', '../qt/lib/libjscore.a' ], }] ] } ] }
Python
0
@@ -258,32 +258,41 @@ %0A 'qt +_compiled /include',%0A @@ -293,32 +293,41 @@ %0A 'qt +_compiled /include/QtCore' @@ -335,32 +335,41 @@ %0A 'qt +_compiled /include/QtGui', @@ -376,32 +376,41 @@ %0A 'qt +_compiled /include/QtNetwo @@ -421,32 +421,41 @@ %0A 'qt +_compiled /include/QtWebki @@ -538,32 +538,41 @@ '../qt +_compiled /lib/libQtGui.a' @@ -583,32 +583,41 @@ '../qt +_compiled /lib/libQtCore.a @@ -629,32 +629,41 @@ '../qt +_compiled /lib/libQtNetwor @@ -678,32 +678,41 @@ '../qt +_compiled /lib/libQtWebKit @@ -726,32 +726,41 @@ '../qt +_compiled /lib/libjscore.a @@ -772,32 +772,41 @@ '../qt +_compiled /lib/libwebcore. @@ -819,32 +819,41 @@ '../qt +_compiled /lib/libQtXml.a' @@ -939,32 +939,41 @@ %0A 'qt +_compiled /include',%0A @@ -974,32 +974,41 @@ %0A 'qt +_compiled /include/QtCore' @@ -1016,32 +1016,41 @@ %0A 'qt +_compiled /include/QtGui', @@ -1057,32 +1057,41 @@ %0A 'qt +_compiled /include/QtNetwo @@ -1110,16 +1110,25 @@ 'qt +_compiled /include @@ -1289,32 +1289,41 @@ '../qt +_compiled /lib/libQtCore.a @@ -1335,32 +1335,41 @@ '../qt +_compiled /lib/libQtGui.a' @@ -1380,32 +1380,41 @@ '../qt +_compiled /lib/libQtXml.a' @@ -1425,32 +1425,41 @@ '../qt +_compiled /lib/libQtNetwor @@ -1474,32 +1474,41 @@ '../qt +_compiled /lib/libQtWebKit @@ -1522,32 +1522,41 @@ '../qt +_compiled /lib/libwebcore. @@ -1577,16 +1577,25 @@ '../qt +_compiled /lib/lib
20ee95f56033b5a7d9d1e5f022118850b339ace9
remove old ssl
binding.gyp
binding.gyp
{ 'targets': [ { 'target_name': 'chimera', 'sources': [ 'src/top.cc', 'src/cookiejar.cc', 'src/chimera.cc', 'src/browser.cc' ], 'conditions': [ ['OS=="mac"', { 'include_dirs': [ 'qt_compiled/include', 'qt_compiled/include/QtCore', 'qt_compiled/include/QtGui', 'qt_compiled/include/QtNetwork', 'qt_compiled/include/QtWebkit' ], 'libraries': [ '-framework AppKit', '../qt_compiled/lib/libQtGui.a', '../qt_compiled/lib/libQtCore.a', '../qt_compiled/lib/libQtNetwork.a', '../qt_compiled/lib/libQtWebKit.a', '../qt_compiled/lib/libjscore.a', '../qt_compiled/lib/libwebcore.a', '../qt_compiled/lib/libQtXml.a' ], }], ['OS=="linux"', { 'include_dirs': [ 'qt_compiled/include', 'qt_compiled/include/QtCore', 'qt_compiled/include/QtGui', 'qt_compiled/include/QtNetwork', 'qt_compiled/include/QtWebKit' ], 'libraries': [ '../deps/openssl/linux/lib/libssl.a', '../deps/openssl/linux/lib/libcrypto.a', '../qt_compiled/lib/libQtCore.a', '../qt_compiled/lib/libQtGui.a', '../qt_compiled/lib/libQtXml.a', '../qt_compiled/lib/libQtNetwork.a', '../qt_compiled/lib/libQtWebKit.a', '../qt_compiled/lib/libwebcore.a', '../qt_compiled/lib/libjscore.a' ], }] ] } ] }
Python
0.999082
@@ -1180,111 +1180,8 @@ : %5B%0A - '../deps/openssl/linux/lib/libssl.a',%0A '../deps/openssl/linux/lib/libcrypto.a',%0A
fe290c9f3edc477707e88cb5942ee6c5bd1db568
fix the http backend -- outgoing was still busted
lib/rapidsms/backends/http.py
lib/rapidsms/backends/http.py
#!/usr/bin/env python # vim: ai ts=4 sts=4 et sw=4 import BaseHTTPServer, SocketServer import select import random import re import urllib import httphandlers as handlers import rapidsms from rapidsms.backends.base import BackendBase class HttpServer (BaseHTTPServer.HTTPServer, SocketServer.ThreadingMixIn): def handle_request (self, timeout=1.0): # don't block on handle_request reads, writes, errors = (self,), (), () reads, writes, errors = select.select(reads, writes, errors, timeout) if reads: BaseHTTPServer.HTTPServer.handle_request(self) class Backend(BackendBase): def configure(self, host="localhost", port=8080, handler="HttpHandler", **kwargs): #module_name = "httphandlers" #module = __import__(module_name, {}, {}, ['']) component_class = getattr(handlers, handler) self.server = HttpServer((host, int(port)), component_class) self.type = "HTTP" # set this backend in the server instance so it # can callback when a message is received self.server.backend = self def run (self): while self.running: msg = self.next_message() if msg: if handlers.msg_store.has_key(msg.connection.identity): handlers.msg_store[msg.connection.identity].append(msg.text) else: handlers.msg_store[msg.connection.identity] = [] handlers.msg_store[msg.connection.identity].append(msg.text) self.server.handle_request()
Python
0.000001
@@ -877,32 +877,71 @@ ndler)%0A %0A + self.handler = component_class%0A self.ser @@ -993,16 +993,16 @@ _class)%0A - @@ -1162,16 +1162,258 @@ = self%0A + # also set it in the handler class so we can callback%0A self.handler.backend = self%0A%0A # set the slug based on the handler, so we can have multiple%0A # http backends%0A self._slug = %22http_%25s%22 %25 handler%0A %0A @@ -1897,8 +1897,76 @@ quest()%0A +%0A def send(self, message):%0A self.handler.outgoing(message)
9f27bf690113d30228b9a5a4ef2c729ea0fe361d
Remove blank line
app/timetables/models.py
app/timetables/models.py
from __future__ import unicode_literals from django.contrib.auth.models import User from django.core.exceptions import ValidationError from django.core.validators import MinValueValidator from django.db import models from django.utils.translation import ugettext_lazy as _ from common.mixins import ForceCapitalizeMixin, TimestampMixin class Weekday(ForceCapitalizeMixin, models.Model): """Model representing the day of the week.""" name = models.CharField(max_length=60, unique=True) capitalized_field_names = ('name',) def __str__(self): return self.name class Meal(ForceCapitalizeMixin, models.Model): """ Model representing food occasions. This represents an occasion during the day that food is scheduled to be served. E.g breakfast, lunch, etc. """ name = models.CharField(max_length=60, unique=True) start_time = models.TimeField() end_time = models.TimeField() capitalized_field_names = ('name',) def clean(self): if self.start_time >= self.end_time: raise ValidationError(_('start_time must be less than end_time.')) super().clean() def __str__(self): return self.name class MealOption(ForceCapitalizeMixin, models.Model): """Model representing course/dish combinations to be served during a given meal.""" name = models.CharField(max_length=120, unique=True) capitalized_field_names = ('name',) def __str__(self): return self.name class Course(ForceCapitalizeMixin, models.Model): """Model representing the particular dish served as one of the parts of a meal option.""" name = models.CharField(max_length=150, unique=True) capitalized_field_names = ('name',) def __str__(self): return self.name class Timetable(TimestampMixin): """ Central model of the platform. It represents/encapsulates the entire structure and scheduling of meals, menu-items, dishes, courses, options etc, served at a location, to a team or the entire organisation. """ name = models.CharField(max_length=255, unique=True) code = models.CharField(max_length=60, unique=True) api_key = models.CharField(max_length=255, unique=True) cycle_length = models.PositiveSmallIntegerField( validators=[MinValueValidator(1)] ) current_cycle_day = models.PositiveSmallIntegerField( validators=[MinValueValidator(1)] ) description = models.TextField(blank=True) admins = models.ManyToManyField(User, through='Admin') def clean(self): # Ensure current_cycle_day and cycle_length are not None before compare if self.current_cycle_day and self.cycle_length: if self.current_cycle_day > self.cycle_length: raise ValidationError(_( 'Ensure Current cycle day is not greater than Cycle length.') ) super().clean() def save(self, *args, **kwargs): # Calling full_clean instead of clean to ensure validators are called self.full_clean() return super().save(*args, **kwargs) def __str__(self): return self.name class Dish(TimestampMixin): """ Model representing the actual food served. A dish represents the actual food served as a given course under an option of a meal on a cycle day in a timetable. E.g, Coconut rice garnished with fish stew and chicken or just Ice-cream. """ name = models.CharField(max_length=255, unique=True) description = models.TextField(blank=True) def __str__(self): return self.name class Admin(models.Model): """Model representing timetables' administratorship""" user = models.ForeignKey(User, on_delete=models.CASCADE) timetable = models.ForeignKey(Timetable, on_delete=models.CASCADE) is_super = models.BooleanField() def __str__(self): return self.user.username class Meta: unique_together = ('user', 'timetable') class MenuItem(TimestampMixin): """ Model representing a Menu Item. A MenuItem represents the particular meal combination option that is to be served on a given cycle-day of a particular timetable. """ timetable = models.ForeignKey(Timetable) cycle_day = models.PositiveSmallIntegerField( validators=[MinValueValidator(1)] ) meal = models.ForeignKey(Meal) meal_option = models.ForeignKey(MealOption) class Meta: unique_together = ('timetable', 'cycle_day', 'meal', 'meal_option') def __str__(self): return '{0} {1}'.format(self.cycle_day, self.meal)
Python
0.999999
@@ -3993,17 +3993,16 @@ ble')%0A%0A%0A -%0A class Me
47902926f56b3944dbc00f6b2871fd7f77c7c510
Add max_limit arg
saleor/graphql/core/fields.py
saleor/graphql/core/fields.py
import json from functools import partial import graphene from django_measurement.models import MeasurementField from django_prices.models import MoneyField, TaxedMoneyField from graphene.relay import PageInfo from graphene_django.converter import convert_django_field from graphene_django.fields import DjangoConnectionField from graphql.error import GraphQLError from graphql_relay.connection.arrayconnection import connection_from_list_slice from promise import Promise from ..utils.sorting import sort_queryset_for_connection from .connection import connection_from_queryset_slice from .types.common import Weight from .types.money import Money, TaxedMoney def patch_pagination_args(field: DjangoConnectionField): """Add descriptions to pagination arguments in a connection field. By default Graphene's connection fields comes without description for pagination arguments. This functions patches those fields to add the descriptions. """ field.args["first"].description = "Return the first n elements from the list." field.args["last"].description = "Return the last n elements from the list." field.args[ "before" ].description = ( "Return the elements in the list that come before the specified cursor." ) field.args[ "after" ].description = ( "Return the elements in the list that come after the specified cursor." ) class BaseConnectionField(graphene.ConnectionField): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) patch_pagination_args(self) class BaseDjangoConnectionField(DjangoConnectionField): @classmethod def resolve_connection(cls, connection, args, iterable): common_args = { "connection_type": connection, "edge_type": connection.Edge, "pageinfo_type": PageInfo, } if isinstance(iterable, list): common_args["args"] = args _len = len(iterable) connection = connection_from_list_slice( iterable, slice_start=0, list_length=_len, list_slice_length=_len, **common_args, ) else: iterable, sort_by = sort_queryset_for_connection( iterable=iterable, args=args ) args["sort_by"] = sort_by common_args["args"] = args connection = connection_from_queryset_slice(iterable, **common_args) connection.iterable = iterable return connection def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) patch_pagination_args(self) @convert_django_field.register(TaxedMoneyField) def convert_field_taxed_money(*_args): return graphene.Field(TaxedMoney) @convert_django_field.register(MoneyField) def convert_field_money(*_args): return graphene.Field(Money) @convert_django_field.register(MeasurementField) def convert_field_measurements(*_args): return graphene.Field(Weight) class PrefetchingConnectionField(BaseDjangoConnectionField): @classmethod def connection_resolver( cls, resolver, connection, default_manager, queryset_resolver, max_limit, enforce_first_or_last, root, info, **args, ): # Disable `enforce_first_or_last` if not querying for `edges`. values = [ field.name.value for field in info.field_asts[0].selection_set.selections ] if "edges" not in values: enforce_first_or_last = False return super().connection_resolver( resolver, connection, default_manager, queryset_resolver, max_limit, enforce_first_or_last, root, info, **args, ) class FilterInputConnectionField(PrefetchingConnectionField): def __init__(self, *args, **kwargs): self.filter_field_name = kwargs.pop("filter_field_name", "filter") self.filter_input = kwargs.get(self.filter_field_name) self.filterset_class = None if self.filter_input: self.filterset_class = self.filter_input.filterset_class super().__init__(*args, **kwargs) @classmethod def connection_resolver( cls, resolver, connection, default_manager, queryset_resolver, max_limit, enforce_first_or_last, filterset_class, filters_name, root, info, **args, ): # Disable `enforce_first_or_last` if not querying for `edges`. values = [ field.name.value for field in info.field_asts[0].selection_set.selections ] if "edges" not in values: enforce_first_or_last = False first = args.get("first") last = args.get("last") if enforce_first_or_last and not (first or last): raise GraphQLError( f"You must provide a `first` or `last` value to properly paginate " f"the `{info.field_name}` connection." ) if max_limit: if first: assert first <= max_limit, ( "Requesting {} records on the `{}` connection exceeds the " "`first` limit of {} records." ).format(first, info.field_name, max_limit) args["first"] = min(first, max_limit) if last: assert last <= max_limit, ( "Requesting {} records on the `{}` connection exceeds the " "`last` limit of {} records." ).format(last, info.field_name, max_limit) args["last"] = min(last, max_limit) iterable = resolver(root, info, **args) if iterable is None: iterable = default_manager # thus the iterable gets refiltered by resolve_queryset # but iterable might be promise iterable = queryset_resolver(connection, iterable, info, args) on_resolve = partial(cls.resolve_connection, connection, args) filter_input = args.get(filters_name) if filter_input and filterset_class: instance = filterset_class( data=dict(filter_input), queryset=iterable, request=info.context ) # Make sure filter input has valid values if not instance.is_valid(): raise GraphQLError(json.dumps(instance.errors.get_json_data())) iterable = instance.qs if Promise.is_thenable(iterable): return Promise.resolve(iterable).then(on_resolve) return on_resolve(iterable) def get_resolver(self, parent_resolver): return partial( super().get_resolver(parent_resolver), self.filterset_class, self.filter_field_name, )
Python
0.999444
@@ -1701,32 +1701,48 @@ , args, iterable +, max_limit=None ):%0A commo @@ -6174,16 +6174,29 @@ partial( +%0A cls.reso @@ -6223,24 +6223,54 @@ ection, args +, max_limit=max_limit%0A )%0A%0A f
9f53cdc4ffa2b34aae59cf9f95958aae83964e7f
Update pexels.py
postcards/plugin_pexels/util/pexels.py
postcards/plugin_pexels/util/pexels.py
import random import requests from bs4 import BeautifulSoup import os import urllib import sys words_location = os.path.dirname(os.path.realpath(__file__)) + '/words.txt' pexels_search_url = 'https://www.pexels.com/search/' headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3', 'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8', 'Connection': 'keep-alive'} def get_random_image(keyword=None): url = get_random_image_url(keyword=keyword) return read_from_url(url) def read_from_url(url): request = urllib.request.Request(url, None, headers) # The assembled request return urllib.request.urlopen(request) def get_random_image_url(keyword=None, number=1, _count=0): words = read_words() if keyword: search_term = keyword else: search_term = random.choice(words) r = requests.get(pexels_search_url + search_term, headers=headers) soup = BeautifulSoup(r.text, 'html.parser') imgs = [] for article in soup.findAll("article", {"class": "photo-item"}): src = article.a.img['src'].split('?')[0] imgs.append(src) if imgs: chosen = [] if number > len(imgs): number = len(imgs) for i in range(0, number): img = random.choice(imgs) imgs.remove(img) chosen.append(img) if number is 1: return chosen[0] else: return chosen elif _count < 10 and not keyword: return get_random_image_url(_count=_count + 1) elif keyword: raise Exception("No image found for keyword: " + keyword) else: raise Exception(f"Something is broken, tried {count} times but no images") def read_words(): with open(words_location) as f: content = f.readlines() return [x.strip() for x in content] if __name__ == '__main__': keyword = None number = 1 if len(sys.argv) > 1: keyword = sys.argv[1] if len(sys.argv) > 2: number = int(sys.argv[2]) try: imgs = get_random_image_url(keyword=keyword, number=number) if isinstance(imgs, str): print(imgs) else: for url in imgs: print(url) except Exception as e: print(e) # raise e
Python
0
@@ -87,16 +87,396 @@ port sys +%0Aimport base64%0Afrom pypexels import PyPexels%0A%0A#Decode pexels API key from abertschi%0Abase64_message = 'NTYzNDkyYWQ2ZjkxNzAwMDAxMDAwMDAxNmEzNTcyNDA4YmVjNGFmNzc0YTYzYTRjNjdjZGNkMGIK'%0Abase64_bytes = base64_message.encode('ascii')%0Amessage_bytes = base64.b64decode(base64_bytes)%0Aapi_key = message_bytes.decode('ascii')%0A%0A# instantiate PyPexels object%0Apy_pexel = PyPexels(api_key=api_key) %0A%0Awords_ @@ -574,15 +574,14 @@ http -s :// -www +api .pex @@ -592,15 +592,24 @@ com/ +v1/ search -/ +?query= '%0A%0Ah @@ -1447,45 +1447,47 @@ -r = requests.get( +search_results = py_ pexel -s_ +. search -_url + +(query= sear @@ -1499,214 +1499,206 @@ rm, -headers=headers)%0A soup = BeautifulSoup(r.text, 'html.parser')%0A imgs = %5B%5D%0A%0A for article in soup.findAll(%22article%22, %7B%22class%22: %22photo-item%22%7D):%0A src = article.a.img%5B'src'%5D.split('?')%5B0%5D%0A +per_page=40)%0A imgs = %5B%5D%0A while True:%0A for photo in search_results.entries:%0A #print(photo.id, photo.photographer, photo.url)%0A src = photo.src.get('original')%0A imgs @@ -1689,24 +1689,26 @@ ')%0A + imgs.append( @@ -1712,16 +1712,131 @@ nd(src)%0A + if not search_results.has_next:%0A break%0A search_results = search_results.get_next_page()%0A %0A if
6508912a20ef25f0c70b08dabe98e59964e00be7
Add seatsAvailable to database fixture
test/base.py
test/base.py
import unittest import datetime from google.appengine.api import users from google.appengine.api import memcache from google.appengine.ext import ndb from google.appengine.ext import testbed from google.appengine.datastore import datastore_stub_util from models import ( Profile, Conference, Session, ) from utils import getUserId class BaseEndpointAPITestCase(unittest.TestCase): """ Base endpoint API unit tests. """ def setUp(self): # create an instance of the Testbed class. self.testbed = testbed.Testbed() # Then activate the testbed, which prepares the service stubs for use. self.testbed.activate() # Create a consistency policy that will simulate the High Replication consistency model. self.policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=1) # Initialize the datastore stub with this policy. self.testbed.init_datastore_v3_stub(consistency_policy=self.policy) # declare other service stubs self.testbed.init_memcache_stub() self.testbed.init_user_stub() self.testbed.init_taskqueue_stub() self.taskqueue_stub = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME) # Clear ndb's in-context cache between tests. # This prevents data from leaking between tests. # Alternatively, you could disable caching by # using ndb.get_context().set_cache_policy(False) ndb.get_context().clear_cache() def tearDown(self): self.testbed.deactivate() def initDatabase(self): """ Adds database fixtures """ _profiles = [ {'displayName': 'Luiz', 'mainEmail': 'test1@test.com', 'teeShirtSize': '1', 'conferenceKeysToAttend': []}, {'displayName': 'Batman', 'mainEmail': 'test2@test.com', 'teeShirtSize': '2', 'conferenceKeysToAttend': []}, {'displayName': 'Goku', 'mainEmail': 'test3@test.com', 'teeShirtSize': '3', 'conferenceKeysToAttend': []} ] # add profiles to database ndb.put_multi([Profile(key=ndb.Key(Profile, p['mainEmail']),**p) for p in _profiles]) now = datetime.datetime.now() # 3 conferences with `test1@test.com` # 1 conference with `test2@test.com` _conferences = [ { 'name': 'room #1', 'organizerUserId': 'test1@test.com', 'topics': ['programming', 'web design', 'web performance'], 'city': 'London', 'startDate': now, 'endDate': now + datetime.timedelta(days=5), 'maxAttendees': 100, 'sessions': [ {'name': 'PHP', 'speaker': 'superman', 'typeOfSession': 'educational', 'date': (now + datetime.timedelta(days=10)).date(), 'startTime': (now + datetime.timedelta(days=1)).time(), 'duration': 60}, {'name': 'Python', 'speaker': 'flash', 'typeOfSession': 'educational', 'date': (now + datetime.timedelta(days=10)).date(), 'startTime': (now + datetime.timedelta(days=1, hours=1)).time(), 'duration': 60} ] }, { 'name': 'room #2', 'organizerUserId': 'test1@test.com', 'topics': ['web performance'], 'city': 'Baton Rouge', 'startDate': now + datetime.timedelta(days=10), 'endDate': now + datetime.timedelta(days=20), 'maxAttendees': 5, 'sessions': [] }, { 'name': 'room #3', 'organizerUserId': 'test1@test.com', 'topics': ['programming', 'misc'], 'startDate': now + datetime.timedelta(days=8), 'endDate': now + datetime.timedelta(days=10), 'maxAttendees': 6, 'sessions': [] }, { 'name': 'room #4', 'organizerUserId': 'test2@test.com', 'topics': ['misc'], 'startDate': now + datetime.timedelta(days=10), 'endDate': now + datetime.timedelta(days=20), 'maxAttendees': 6, 'sessions': [ {'name': 'Intro to Poker', 'speaker': 'joker', 'typeOfSession': 'fun', 'date': (now + datetime.timedelta(days=10)).date(), 'startTime': (now + datetime.timedelta(days=10)).time(), 'duration': 60}, {'name': 'Google App Engine', 'speaker': 'Bill Gates', 'typeOfSession': 'informative', 'date': (now + datetime.timedelta(days=10)).date(), 'startTime': (now + datetime.timedelta(days=10, hours=1)).time(), 'duration': 60} ] } ] # add conferences to database for data in _conferences: p_key = ndb.Key(Profile, data['organizerUserId']) c_id = Conference.allocate_ids(size=1, parent=p_key)[0] data['key'] = ndb.Key(Conference, c_id, parent=p_key) # pop the sessions from `data` and add the conference to the database sessions = data.pop('sessions') conf = Conference(**data) conf.put() # Now that the conference has been added to the database, we can add the sessions that # were previously removed using `pop()` for session in sessions: c_id = Session.allocate_ids(size=1, parent=conf.key)[0] session['key'] = ndb.Key(Session, c_id, parent=conf.key) Session(**session).put() def login(self, email='test1@test.com', is_admin=False): """ Logs in user (using simulation). If no arguments are given, logs in using default user `test1@test.com` """ self.testbed.setup_env( user_email=email, user_is_admin='1' if is_admin else '0', overwrite=True, # support oauth login using `endpoints.get_current_user()` ENDPOINTS_AUTH_EMAIL=email, ENDPOINTS_AUTH_DOMAIN='testing.com' ) def logout(self): """ Logs out user (using simulation) """ self.login('') def getUserId(self): """ Returns current user's id """ user = users.get_current_user() if not user: raise ValueError("User must be logged in to retrieve user id") return getUserId(user) def testLogin(self): """TEST: User login simulation""" assert not users.get_current_user() self.login() assert users.get_current_user().email() == 'test1@test.com' self.login(is_admin=True) assert users.is_current_user_admin() self.logout() assert not users.get_current_user()
Python
0
@@ -2595,16 +2595,55 @@ ays=5),%0A + 'seatsAvailable': 100,%0A @@ -3564,32 +3564,69 @@ delta(days=20),%0A + 'seatsAvailable': 1,%0A @@ -3641,17 +3641,17 @@ ndees': -5 +1 ,%0A @@ -3960,32 +3960,69 @@ delta(days=10),%0A + 'seatsAvailable': 6,%0A @@ -4280,32 +4280,32 @@ delta(days=10),%0A - @@ -4342,32 +4342,69 @@ delta(days=20),%0A + 'seatsAvailable': 6,%0A
7c1e5418198b848b7e35fb53c41f8b6315d2d0c8
fix diagnostics error when default terminal width <= 80
modules/tools/diagnostics/diagnostics.py
modules/tools/diagnostics/diagnostics.py
#!/usr/bin/env python ############################################################################### # Copyright 2017 The Apollo Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### """ Real Time Plotting of planning and control """ import curses import os import random import threading import traceback import rospy from std_msgs.msg import String from message import Message primitive = (int, str, bool, unicode) class Diagnostics(object): """ Plotter Class """ def __init__(self, stdscr): META_DATA_FILE = os.path.join(os.path.dirname(__file__), 'meta.data') self.stdscr = stdscr # topic self.messages = [] curses.init_pair(1, curses.COLOR_RED, curses.COLOR_BLACK) curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK) self.stdscr.nodelay(1) curses.curs_set(0) self.lock = threading.Lock() with open(META_DATA_FILE) as f: for line in f: line = line.strip() # Skip empty lines, header and comments. if not line or line.startswith('#'): continue module_name, proto_name, topic, period = line.split() self.messages.append(Message(module_name, proto_name, topic, period, self.stdscr, self.lock)) self.selection = 0 self.current_index = 0 self.MENU = True def callback_timer(self, event): """ Update Main Screen """ if self.MENU: with self.lock: self.stdscr.clear() self.stdscr.addstr(0, 0, "Module", curses.A_BOLD) self.stdscr.addstr(0, 15, "Topic", curses.A_BOLD) self.stdscr.addstr(0, 50, "Period", curses.A_BOLD) self.stdscr.addstr(0, 60, "Max", curses.A_BOLD) self.stdscr.addstr(0, 70, "Min", curses.A_BOLD) self.stdscr.addstr(0, 80, "Delay", curses.A_BOLD) for idx in xrange(len(self.messages)): lidx = idx + 1 if idx == self.selection: self.stdscr.addstr(lidx, 0, self.messages[idx].name, curses.A_REVERSE) else: self.stdscr.addstr(lidx, 0, self.messages[idx].name) self.stdscr.addstr( lidx, 15, self.messages[idx].topic, curses.color_pair(2 if self.messages[idx] .msg_received == True else 1)) self.stdscr.addstr( lidx, 50, "{0:.2f}".format(self.messages[idx].msg_interval)) self.stdscr.addstr( lidx, 60, "{0:.2f}".format(self.messages[idx].msg_max)) self.stdscr.addstr( lidx, 70, "{0:.2f}".format(self.messages[idx].msg_min)) self.stdscr.addstr( lidx, 80, "{0:.2f}".format(self.messages[idx].msg_delay)) self.stdscr.refresh() def main(stdscr): """ Main function """ rospy.init_node('adu_diagnostics_' + str(random.random()), anonymous=True) diag = Diagnostics(stdscr) sublist = [msg.subscribe() for msg in diag.messages] maintimercallback = rospy.Timer(rospy.Duration(0.05), diag.callback_timer) while True: with diag.lock: c = stdscr.getch() curses.flushinp() if c == ord('q') or c == 27: maintimercallback.shutdown() for sub in sublist: sub.unregister() break if c == ord('b'): for sub in sublist: sub.unregister() if c == curses.KEY_DOWN: if diag.MENU: diag.selection = min((diag.selection + 1), len(diag.messages) - 1) else: diag.messages[diag.selection].key_down() elif c == curses.KEY_UP: if diag.MENU: diag.selection = max((diag.selection - 1), 0) else: diag.messages[diag.selection].key_up() elif c == curses.KEY_RIGHT: currentmsg = diag.messages[diag.selection] if diag.MENU: diag.MENU = False currentmsg.field.show = True currentmsg.field.display_on_screen() else: currentmsg.key_right() elif c == curses.KEY_LEFT: if not diag.MENU: currentmsg = diag.messages[diag.selection] if currentmsg.field.show: currentmsg.field.show = False diag.MENU = True else: currentmsg.key_left() elif c == ord('w'): currentmsg = diag.messages[diag.selection] if not diag.MENU: currentmsg.index_incr() elif c == ord('s'): currentmsg = diag.messages[diag.selection] if not diag.MENU: currentmsg.index_decr() elif c == ord('a'): currentmsg = diag.messages[diag.selection] if not diag.MENU: currentmsg.index_begin() elif c == ord('d'): currentmsg = diag.messages[diag.selection] if not diag.MENU: currentmsg.index_end() rospy.sleep(0.01) if __name__ == '__main__': try: curses.wrapper(main) except Exception as e: print traceback.format_exc()
Python
0.000001
@@ -898,16 +898,30 @@ raceback +%0Aimport signal %0A%0Aimport @@ -1467,16 +1467,493 @@ set(0)%0A%0A + %22%22%22%0A Workaround for issuse #1774, since we know the exactly number of the%0A columns that we used, so if the default terminal width %3C= 80, we just%0A resize the terminal to ensure it is bigger enough for the addstr() call.%0A Otherwise, addstr() may not happy: %22error: addstr() returned ERR%22.%0A %22%22%22%0A maxY, maxX = self.stdscr.getmaxyx()%0A if maxY %3C= 80:%0A curses.resizeterm(maxX, 90) #80+10, 10 for the %22Delay%22 column%0A%0A @@ -4341,24 +4341,75 @@ ion%0A %22%22%22%0A + signal.signal(signal.SIGWINCH, signal.SIG_DFL)%0A rospy.in
bf336d99484cc3804f469631b513a927940ada30
Add scan_steps wrapper for scan_nd
profile_collection/startup/50-scans.py
profile_collection/startup/50-scans.py
# vim: sw=4 ts=4 sts expandtab smarttab # HXN step-scan configuration import hxntools.scans from bluesky.global_state import get_gs gs = get_gs() hxntools.scans.setup() ct = hxntools.scans.count ascan = hxntools.scans.absolute_scan dscan = hxntools.scans.relative_scan fermat = hxntools.scans.relative_fermat spiral = hxntools.scans.relative_spiral mesh = hxntools.scans.absolute_mesh dmesh = hxntools.scans.relative_mesh d2scan = hxntools.scans.d2scan a2scan = hxntools.scans.a2scan gs.DETS = [zebra, sclr1, merlin1, xspress3, lakeshore2] gs.TABLE_COLS = ['sclr1_ch2','sclr1_ch3', 'sclr1_ch4', 'sclr1_ch5_calc', 'ssx', 'ssy', 'ssz', 't_base', 't_sample', 't_vlens', 't_hlens'] # Plot this by default versus motor position: gs.PLOT_Y = 'Det2_Cr' gs.OVERPLOT = False gs.BASELINE_DEVICES = [smll,vmll, hmll, ssa2, zp]
Python
0.000001
@@ -479,16 +479,55 @@ s.a2scan +%0Ascan_steps = hxntools.scans.scan_steps %0A%0Ags.DET @@ -848,13 +848,44 @@ = %5B +dcm, m1, m2, beamline_status, smll, + vmll
817fbb054f7422ec8adc16f079b8f70ed2534356
remove shebang and executable bit
bin/auth.py
bin/auth.py
#!/usr/bin/env python3 import argparse import pathlib import googleapiclient.discovery import httplib2 import oauth2client.client import oauth2client.file import oauth2client.tools YOUTUBE_API_SERVICE_NAME = 'youtube' YOUTUBE_API_VERSION = 'v3' YOUTUBE_ANALYTICS_API_SERVICE_NAME = 'youtubeAnalytics' YOUTUBE_ANALYTICS_API_VERSION = 'v1' HERE = pathlib.Path(__file__).resolve().parent ROOT = HERE.parent CONF_DIR = ROOT / 'config' CLIENT_SECRETS_FILE = CONF_DIR / 'client_secrets.json' MISSING_CLIENT_SECRETS_MESSAGE = f''' WARNING: Please configure OAuth 2.0 by downloading client_secrets.json from https://console.developers.google.com/apis/credentials?project=YOUR_PROJECT and putting it at {CLIENT_SECRETS_FILE} For more information about the client_secrets.json file format, please visit: https://developers.google.com/api-client-library/python/guide/aaa_client_secrets ''' class ArgumentParser(argparse.ArgumentParser): def __init__(self, **kwargs): parents = list(kwargs.get('parents', [])) parents.append(oauth2client.tools.argparser) kwargs['parents'] = parents super().__init__(**kwargs) # youtube_scopes is a list of OAuth scopes with the # https://www.googleapis.com/auth/ prefix stripped. # # In case only a single scope is needed, a single string is allowed in # place of youtube_scopes, which is automatically understood as a # singleton list. # # Typical scopes: # - youtube # - youtube.readonly # - youtube.upload # - yt-analytics.readonly def get_authenticated_http_client(args, youtube_scopes): if isinstance(youtube_scopes, str): # Singleton youtube_scopes = [youtube_scopes] flow = oauth2client.client.flow_from_clientsecrets( CLIENT_SECRETS_FILE, scope=' '.join(f'https://www.googleapis.com/auth/{scope}' for scope in youtube_scopes), message=MISSING_CLIENT_SECRETS_MESSAGE, ) oauth_credentials_file = CONF_DIR / f'credentials-{",".join(youtube_scopes)}.json' storage = oauth2client.file.Storage(oauth_credentials_file) credentials = storage.get() if credentials is None or credentials.invalid: credentials = oauth2client.tools.run_flow(flow, storage, args) return credentials.authorize(httplib2.Http()) # Typical scopes: # - youtube # - youtube.readonly # - youtube.upload def get_youtube_client(args, scopes): http = get_authenticated_http_client(args, scopes) return googleapiclient.discovery.build( YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, http=http, ) # Typical scopes: # - yt-analytics.readonly def get_youtube_analytics_client(args, scopes): http = get_authenticated_http_client(args, scopes) return googleapiclient.discovery.build( YOUTUBE_ANALYTICS_API_SERVICE_NAME, YOUTUBE_ANALYTICS_API_VERSION, http=http, )
Python
0.000001
@@ -1,28 +1,4 @@ -#!/usr/bin/env python3%0A%0A impo
50c06c3e24807c57654f292eee168c544939d06d
Update ALLOWED_HOSTS
config/settings.py
config/settings.py
from datetime import time from os import getenv from pathlib import Path import dj_database_url from dotenv import load_dotenv import tomlkit load_dotenv() BASE_DIR = Path(__file__).resolve().parent.parent PYPROJECT_PATH = BASE_DIR / "pyproject.toml" PYPROJECT = tomlkit.parse(PYPROJECT_PATH.open().read()) SECRET_KEY = getenv("STARMINDER_DJANGO_SECRET_KEY") ENVIRONMENT = getenv("STARMINDER_ENVIRONMENT") DEBUG = bool(int(getenv("STARMINDER_DEBUG"))) ALLOWED_HOSTS = [] INSTALLED_APPS = [ "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sessions", "django.contrib.messages", # "django.contrib.staticfiles", "django.contrib.sites", "allauth", "allauth.account", "allauth.socialaccount", "allauth.socialaccount.providers.github", "starminder.main", ] MIDDLEWARE = [ "django.middleware.security.SecurityMiddleware", "whitenoise.middleware.WhiteNoiseMiddleware", "django.contrib.sessions.middleware.SessionMiddleware", "django.middleware.common.CommonMiddleware", "django.middleware.csrf.CsrfViewMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware", "django.middleware.clickjacking.XFrameOptionsMiddleware", "starminder.main.middleware.EmailRequiredMiddleware", "starminder.main.middleware.FooterStatsMiddleware", ] ROOT_URLCONF = "config.urls" TEMPLATES = [ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [], "APP_DIRS": True, "OPTIONS": { "context_processors": [ "django.template.context_processors.debug", "django.template.context_processors.request", "django.contrib.auth.context_processors.auth", "django.contrib.messages.context_processors.messages", ], }, }, ] WSGI_APPLICATION = "config.wsgi.application" ENVIRONMENT_DATABASES = { "test": { "ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:", }, "local": { "ENGINE": "django.db.backends.sqlite3", "NAME": BASE_DIR / "db.sqlite3", }, "prod": dj_database_url.config(), } DATABASES = { "default": ENVIRONMENT_DATABASES[ENVIRONMENT], } AUTH_PASSWORD_VALIDATORS = [ { "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", }, { "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", }, { "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", }, { "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", }, ] LANGUAGE_CODE = "en-us" TIME_ZONE = "UTC" USE_I18N = False USE_L10N = False USE_TZ = True STATIC_URL = "/static/" STATICFILES_DIRS = [BASE_DIR / "static"] ################################################## ADMIN_PREFIX = getenv("STARMINDER_ADMIN_PREFIX") ENCRYPTION_KEY = getenv("STARMINDER_ENCRYPTION_KEY") SITE_ID = 1 APPEND_SLASH = True STARMINDER_VERSION = PYPROJECT["tool"]["poetry"]["version"] ################ # django-allauth AUTHENTICATION_BACKENDS = ( "django.contrib.auth.backends.ModelBackend", "allauth.account.auth_backends.AuthenticationBackend", ) ACCOUNT_EMAIL_VERIFICATION = "none" LOGIN_REDIRECT_URL = "dashboard" LOGOUT_REDIRECT_URL = "home" ACCOUNT_LOGOUT_ON_GET = True ################## # Profile defaults DEFAULT_DAY = getenv("STARMINDER_DEFAULT_DAY") DEFAULT_TIME = time.fromisoformat(getenv("STARMINDER_DEFAULT_TIME")) DEFAULT_NUMBER = int(getenv("STARMINDER_DEFAULT_NUMBER")) AWS_ACCESS_KEY_ID = getenv("AWS_ACCESS_KEY_ID") AWS_SECRET_ACCESS_KEY = getenv("AWS_SECRET_ACCESS_KEY")
Python
0.000001
@@ -472,16 +472,96 @@ OSTS = %5B +%0A getenv(%22DO_APP_HOSTNAME%22),%0A %22starminder.xyz%22,%0A %22www.starminder.xyz%22,%0A %5D%0A%0AINSTA
9291d1f6ddb2fe3b89bc4cc64676a8b9e050d80e
Add hashbang
gnetplus.py
gnetplus.py
import collections import serial import struct import sys import time class InvalidMessage(Exception): pass class Message(object): """ Base message class for representing a message """ SOH = 0x01 def __init__(self, address, function, data): self.address = address self.function = function self.data = data def __str__(self): msgstr = struct.pack('BBB', self.address, self.function, len(self.data)) + self.data crc = self.gencrc(msgstr) return chr(self.SOH) + msgstr + struct.pack('>H', crc) def __repr__(self): return ("{name}(address={address}, " "function={function}, " "data={data})").format(name=self.__class__.__name__, address=hex(self.address), function=hex(self.function), data=repr(self.data)) def sendto(self, serial): serial.write(str(self)) @classmethod def readfrom(cls, serial): header = serial.read(4) soh, address, function, length = struct.unpack('BBBB', header) if soh != cls.SOH: raise InvalidMessage("SOH does not match") data = serial.read(length) crc = serial.read(2) msg = cls(address=address, function=function, data=data) if str(msg)[-2:] != crc: raise InvalidMessage("CRC does not match") return msg @staticmethod def gencrc(msgstr): crc = 0xFFFF for char in msgstr: crc ^= ord(char) for i in xrange(8): if (crc & 1) == 1: crc = (crc >> 1) ^ 0xA001 else: crc >>= 1 return crc class QueryMessage(Message): POLLING = 0x00 GET_VERSION = 0x01 SET_SLAVE_ADDR = 0x02 LOGON = 0x03 LOGOFF = 0x04 SET_PASSWORD = 0x05 CLASSNAME = 0x06 SET_DATETIME = 0x07 GET_DATETIME = 0x08 GET_REGISTER = 0x09 SET_REGISTER = 0x0A RECORD_COUNT = 0x0B GET_FIRST_RECORD = 0x0C GET_NEXT_RECORD = 0x0D ERASE_ALL_RECORDS = 0x0E ADD_RECORD = 0x0F RECOVER_ALL_RECORDS = 0x10 DO = 0x11 DI = 0x12 ANALOG_INPUT = 0x13 THERMOMETER = 0x14 GET_NODE = 0x15 GET_SN = 0x16 SILENT_MODE = 0x17 RESERVE = 0x18 ENABLE_AUTO_MODE = 0x19 GET_TIME_ADJUST = 0x1A ECHO = 0x18 SET_TIME_ADJUST = 0x1C DEBUG = 0x1D RESET = 0x1E GO_TO_ISP = 0x1F REQUEST = 0x20 ANTI_COLLISION = 0x21 SELECT_CARD = 0x22 AUTHENTICATE = 0x23 READ_BLOCK = 0x24 WRITE_BLOCk = 0x25 SET_VALUE = 0x26 READ_VALUE = 0x27 CREATE_VALUE_BLOCK = 0x28 ACCESS_CONDITION = 0x29 HALT = 0x2A SAVE_KEY = 0x2B GET_SECOND_SN = 0x2C GET_ACCESS_CONDITION = 0x2D AUTHENTICATE_KEY = 0x2E REQUEST_ALL = 0x2F SET_VALUEEX = 0x32 TRANSFER = 0x33 RESTORE = 0x34 GET_SECTOR = 0x3D RF_POWER_ONOFF = 0x3E AUTO_MODE = 0x3F class GNetPlusError(Exception): pass class ResponseMessage(Message): ACK = 0x06 NAK = 0x15 EVN = 0x12 def to_error(self): if self.function != self.NAK: return None return GNetPlusError("Error: " + repr(self.data)) class Handle(object): def __init__(self, port, baudrate=19200, deviceaddr=0): self.baudrate = baudrate self.port = port self.serial = serial.Serial(port, baudrate=baudrate) self.deviceaddr = deviceaddr def sendmsg(self, function, data=''): QueryMessage(self.deviceaddr, function, data).sendto(self.serial) def readmsg(self, sink_events=False): while True: response = ResponseMessage.readfrom(self.serial) # skip over events. spec doesn't say what to do with them if sink_events and response.function == ResponseMessage.EVN: continue break if response.function == ResponseMessage.NAK: raise response.to_error() return response def get_sn(self): self.sendmsg(QueryMessage.REQUEST) self.readmsg(sink_events=True) self.sendmsg(QueryMessage.ANTI_COLLISION) response = self.readmsg(sink_events=True) return struct.unpack('>L', response.data)[0] def get_version(self): self.sendmsg(QueryMessage.GET_VERSION) return self.readmsg().data def set_auto_mode(self, enabled=True): self.sendmsg(QueryMessage.AUTO_MODE, chr(enabled)) self.readmsg(sink_events=True) def wait_for_card(self): self.set_auto_mode() while True: response = self.readmsg() if (response.function == ResponseMessage.EVN and response.data == 'I'): return if __name__ == '__main__': try: port = sys.argv[1] except IndexError: sys.stderr.write("Usage: {0} <serial port>\n".format(sys.argv[0])) handle = Handle(port) while True: handle.wait_for_card() try: print "Found card: {0}".format(hex(handle.get_sn())) except GNetPlusError: print "Tap card again."
Python
0.000426
@@ -1,12 +1,31 @@ +#!/usr/bin/python%0A%0A import colle
d2d8645e1c704fd956df15481fe3dd38e84bd07b
Disable caching for the time being
server.py
server.py
import os import json import logging from datetime import timedelta from operator import itemgetter from itertools import groupby from collections import namedtuple import flask from arrow import Arrow import dateutil.tz as dateutil_tz from arrow import get as get_date from arrow.parser import ParserError from flask import request, redirect, url_for, render_template, abort, jsonify from via_website import get_dates logging.basicConfig(level=logging.INFO) app = flask.Flask(__name__) ONE_DAY = timedelta(days=1) AU_PERTH = dateutil_tz.gettz('Australia/Perth') VisitResult = namedtuple('VisitResult', 'visits,updated') try: auth = json.load(open('auth.json')) access_token = '{app_id}|{app_secret}'.format_map(auth) except FileNotFoundError: access_token = os.environ.get('ACCESS_TOKEN') def get_for(date): date = date.replace(tzinfo='utc') for week in get_dates(access_token): for day, visits in week: if day == date: return VisitResult( visits, # visits for day Arrow.now(AU_PERTH) # when data was retrieved ) return VisitResult([], Arrow.now(AU_PERTH)) def cached_get_for(date): if not hasattr(cached_get_for, '_cache'): cached_get_for._cache = {} if date in cached_get_for._cache: data, timestamp = cached_get_for._cache[date] if (Arrow.now() - timestamp) < timedelta(hours=1): return data cached_get_for._cache[date] = (get_for(date), Arrow.now()) return cached_get_for._cache[date][0] def make_link(date, name='index'): return url_for(name, date=date.isoformat()) def get_date_from_request(): date = request.args.get('date') if date is not None: try: date = get_date(date) except (ValueError, TypeError, ParserError): pass if date == 'today': date = Arrow.now(AU_PERTH) if date is not None: try: date = date.floor('day') except AttributeError: # was invalid and stayed a string date = None return date def get_visits_for_date(date): res = cached_get_for(date) if not res.visits: return VisitResult({}, res.updated) visits, updated = res visits = sorted(res.visits, key=itemgetter(0)) visits = groupby(visits, key=itemgetter(0)) visits = {k: list(map(itemgetter(1), v)) for k, v in visits} return VisitResult(visits, updated) @app.errorhandler(400) def custom400(error): return jsonify({ 'error': error.description, 'status': 1 }) @app.route('/index.json') def index_json(): date = get_date_from_request() if date is None: return abort(400, 'Invalid date provided') visits, updated = get_visits_for_date(date) return jsonify({ 'date': date.isoformat(), 'updated': updated.isoformat(), 'pagination': { "next": make_link(date + ONE_DAY, ".index_json"), "prev": make_link(date - ONE_DAY, ".index_json"), }, 'visits': visits, 'status': 0 }) @app.route('/') def index(): date = get_date_from_request() if date is None: # fill in missing values with defaults return redirect(url_for('.index', date='today')) visits, updated = get_visits_for_date(date) visits = sorted(visits.items()) is_today = date.date() == Arrow.now(AU_PERTH).date() return render_template( 'index.html', date=date, visits=visits, updated=updated, is_today=is_today, next_page=make_link(date + ONE_DAY), prev_page=make_link(date - ONE_DAY) ) @app.route('/about') def about(): return render_template('about.html') if __name__ == '__main__': app.debug = True app.run(host='0.0.0.0', port=int(os.environ.get('PORT', 5000)))
Python
0
@@ -2157,16 +2157,18 @@ te):%0A + # res = c @@ -2183,24 +2183,48 @@ t_for(date)%0A + res = get_for(date)%0A if not r
0f518f87e7e2af5d93481704c2430255373a4965
fix a typo.
governor.py
governor.py
#!/usr/bin/env python import logging import os import sys import time import yaml from helpers.api import RestApiServer from helpers.etcd import Etcd from helpers.postgresql import Postgresql from helpers.ha import Ha from helpers.utils import setup_signal_handlers, sleep import helpers.aws import AWSConnection class Governor: def __init__(self, config): self.nap_time = config['loop_wait'] self.etcd = Etcd(config['etcd']) self.aws = AWSConnection(config) self.postgresql = Postgresql(config['postgresql'], self.aws.on_role_change) self.ha = Ha(self.postgresql, self.etcd) host, port = config['restapi']['listen'].split(':') self.api = RestApiServer(self, config['restapi']) self.next_run = time.time() def touch_member(self, ttl=None): connection_string = self.postgresql.connection_string + '?application_name=' + self.api.connection_string return self.etcd.touch_member(self.postgresql.name, connection_string, ttl) def initialize(self): # wait for etcd to be available while not self.touch_member(): logging.info('waiting on etcd') sleep(5) # is data directory empty? if self.postgresql.data_directory_empty(): # racing to initialize if self.etcd.race('/initialize', self.postgresql.name): self.postgresql.initialize() self.etcd.take_leader(self.postgresql.name) self.postgresql.start() self.postgresql.create_replication_user() self.postgresql.create_connection_users() else: while True: leader = self.etcd.current_leader() if leader and self.postgresql.sync_from_leader(leader): self.postgresql.write_recovery_conf(leader) self.postgresql.start() break sleep(5) elif self.postgresql.is_running(): self.postgresql.load_replication_slots() def schedule_next_run(self): self.next_run += self.nap_time current_time = time.time() nap_time = self.next_run - current_time if nap_time <= 0: self.next_run = current_time else: sleep(nap_time) def run(self): self.api.start() self.next_run = time.time() while True: self.touch_member() logging.info(self.ha.run_cycle()) self.schedule_next_run() def main(): logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.INFO) setup_signal_handlers() if len(sys.argv) < 2 or not os.path.isfile(sys.argv[1]): print('Usage: {} config.yml'.format(sys.argv[0])) return with open(sys.argv[1], 'r') as f: config = yaml.load(f) governor = Governor(config) try: governor.initialize() governor.run() except KeyboardInterrupt: pass finally: governor.touch_member(300) # schedule member removal governor.postgresql.stop() governor.etcd.delete_leader(governor.postgresql.name) if __name__ == '__main__': main()
Python
0.03285
@@ -267,22 +267,20 @@ , sleep%0A -import +from helpers
b4c73590db66c6731067995881b70dc4883ff65d
The previous fix was wrong
src/tn/plonemailing/message_factory.py
src/tn/plonemailing/message_factory.py
from email import message from email import utils from five import grok from tn.plonemailing import interfaces from zope.component import getMultiAdapter import quopri import email.header import os import random import re import time class BaseMessageFactory(object): def __init__(self, context, request, newsletter, subscriber): self.context = context self.request = request self.newsletter = newsletter self.subscriber = subscriber def __call__(self, content): msg = build_message_root(self.newsletter, self.subscriber) self._configure_message(msg, content) add_message_id(msg) return message class TextMessageFactory(grok.MultiAdapter, BaseMessageFactory): grok.adapts(None, None, interfaces.INewsletter, interfaces.ISubscriber) grok.implements(interfaces.IMessageFactory) grok.name(u'text') def _configure_message(self, msg, content): configure_text_message(self.context, self.request, self.newsletter, msg, content) class MultipartMessageFactory(grok.MultiAdapter, BaseMessageFactory): grok.adapts(None, None, interfaces.INewsletter, interfaces.ISubscriber) grok.implements(interfaces.IMessageFactory) grok.name(u'__multipart__') def _configure_message(self, msg, content): configure_multipart_message(self.context, self.request, self.newsletter, self.subscriber, msg, content) @grok.adapter(None, None, interfaces.INewsletter, interfaces.ISubscriber, name=u'html') @grok.implementer(interfaces.IMessageFactory) def HTMLMessageFactory(context, request, newsletter, subscriber): return getMultiAdapter( (context, request, newsletter, subscriber), interfaces.IMessageFactory, name=u'__multipart__' ) def build_message_root(newsletter, subscriber): msg = message.Message() add_address_header(msg, 'From', newsletter.author_name, newsletter.author_address) msg['To'] = utils.formataddr((subscriber.name, subscriber.email)) msg['Date'] = utils.formatdate() if newsletter.subject: msg['Subject'] = email.header.make_header( [(newsletter.subject, 'utf-8')], header_name='Subject' ) if subscriber.removal_url: msg['List-Unsubscribe'] = '<%s>' % subscriber.removal_url msg['Mime-Version'] = '1.0' if (newsletter.reply_to_address and newsletter.reply_to_address != newsletter.author_address): add_address_header(msg, 'Reply-To', newsletter.reply_to_name, newsletter.reply_to_address) if (newsletter.sender_address and newsletter.sender_address != newsletter.author_address): add_address_header(msg, 'Sender', newsletter.sender_name, newsletter.sender_address) return msg def configure_multipart_message(context, request, newsletter, subscriber, message_root, content): message_root['Content-Type'] = 'multipart/alternative' message_root['Content-Transfer-Encoding'] = '7bit' text_part = make_part(context, request, newsletter, content, format=u'text') extended_part = make_part(context, request, newsletter, content, format=subscriber.format) message_root.attach(text_part) message_root.attach(extended_part) def configure_text_message(context, request, newsletter, msg, content): conversion = getMultiAdapter((context, request, newsletter), interfaces.IContentConversion, name=u'text') set_part_payload(msg, conversion.content_type, conversion.apply(content)) def make_part(context, request, newsletter, content, format): conversion = getMultiAdapter((context, request, newsletter), interfaces.IContentConversion, name=format) part = message.Message() set_part_payload(part, conversion.content_type, conversion.apply(content)) part['Content-Disposition'] = 'inline' return part def set_part_payload(part, content_type, content): part.add_header('Content-Type', content_type, charset='utf-8') part['Content-Transfer-Encoding'] = 'quoted-printable' part.set_payload(quopri.encodestring(content.encode('utf-8'))) def add_address_header(part, header_name, name, address): parts = [] if name: parts.append((name, 'utf-8')) parts.append(("<%s>" % address, 'ascii')) part[header_name] = unicode( email.header.make_header(parts, header_name=header_name) ) domain_re = re.compile(r'.*@([^@]+)$') def add_message_id(message): randmax = 0x7fffffff sender_address = extract_sender(message) domain = domain_re.search(sender_address).group(1) message['Message-Id'] = "<%s.%d.%d@%s>" % (time.strftime('%Y%m%d%H%M%S'), os.getpid(), random.randrange(0, randmax), domain) def extract_sender(message): addresses = [] if message['Sender']: addresses.extend(extract_addresses(message['Sender'])) if message['From']: addresses.extend(extract_addresses(message['From'])) if not addresses: raise ValueError('No valid sender found in message.') return addresses[0] def extract_addresses(header): if isinstance(header, email.header.Header): header = unicode(header) parts = [part.strip() for part in unicode(header).split(',')] return [addr for name, addr in utils.getaddresses(parts)]
Python
0.999899
@@ -663,22 +663,18 @@ return m -essage +sg %0A%0A%0Aclass @@ -907,34 +907,38 @@ _message(self, m -sg +essage , content):%0A @@ -1095,34 +1095,38 @@ m -sg +essage ,%0A @@ -1414,18 +1414,22 @@ (self, m -sg +essage , conten @@ -1675,18 +1675,22 @@ m -sg +essage ,%0A
02abfa1d68239b2b2d20420ecae77c2388a96416
use separate variable for type purposes
mythril/laser/smt/independence_solver.py
mythril/laser/smt/independence_solver.py
import z3 from mythril.laser.smt.model import Model from mythril.laser.smt.bool import Bool from typing import Set, Tuple, Dict, List, cast def _get_expr_variables(expression: z3.ExprRef) -> List[z3.ExprRef]: """ Gets the variables that make up the current expression :param expression: :return: """ result = [] if not expression.children() and not isinstance(expression, z3.BitVecNumRef): result.append(expression) for child in expression.children(): c_children = _get_expr_variables(child) result.extend(c_children) return result class DependenceBucket: """ Bucket object to contain a set of conditions that are dependent on each other """ def __init__(self, variables=None, conditions=None): """ Initializes a DependenceBucket object :param variables: Variables contained in the conditions :param conditions: The conditions that are dependent on each other """ self.variables = variables or [] # type: List[z3.ExprRef] self.conditions = conditions or [] # type: List[z3.ExprRef] class DependenceMap: """ DependenceMap object that maintains a set of dependence buckets, used to separate independent smt queries """ def __init__(self): """ Initializes a DependenceMap object """ self.buckets = [] # type: List[DependenceBucket] self.variable_map = {} # type: Dict[str, DependenceBucket] def add_condition(self, condition: z3.BoolRef) -> None: """ Add condition to the dependence map :param condition: The condition that is to be added to the dependence map """ variables = set(_get_expr_variables(condition)) relevant_buckets = set() for variable in variables: try: bucket = self.variable_map[str(variable)] relevant_buckets.add(bucket) except KeyError: continue new_bucket = DependenceBucket(variables, [condition]) self.buckets.append(new_bucket) if relevant_buckets: # Merge buckets, and rewrite variable map accordingly relevant_buckets.add(new_bucket) new_bucket = self._merge_buckets(relevant_buckets) for variable in variables: self.variable_map[str(variable)] = new_bucket def _merge_buckets(self, bucket_list: Set[DependenceBucket]) -> DependenceBucket: """ Merges the buckets in bucket list """ variables = [] # type: List[str] conditions = [] # type: List[z3.BoolRef] for bucket in bucket_list: self.buckets.remove(bucket) variables += bucket.variables conditions += bucket.conditions new_bucket = DependenceBucket(variables, conditions) self.buckets.append(new_bucket) return new_bucket class IndependenceSolver: """An SMT solver object that uses independence optimization""" def __init__(self): """""" self.raw = z3.Solver() self.constraints = [] self.models = [] def set_timeout(self, timeout: int) -> None: """Sets the timeout that will be used by this solver, timeout is in milliseconds. :param timeout: """ self.raw.set(timeout=timeout) def add(self, *constraints: Tuple[Bool]) -> None: """Adds the constraints to this solver. :param constraints: constraints to add """ constraints = [c.raw for c in cast(Tuple[Bool], constraints)] self.constraints.extend(constraints) def append(self, *constraints: Tuple[Bool]) -> None: """Adds the constraints to this solver. :param constraints: constraints to add """ constraints = [c.raw for c in cast(Tuple[Bool], constraints)] self.constraints.extend(constraints) def check(self) -> z3.CheckSatResult: """Returns z3 smt check result. """ dependence_map = DependenceMap() for constraint in self.constraints: dependence_map.add_condition(constraint) self.models = [] for bucket in dependence_map.buckets: self.raw.reset() self.raw.append(*bucket.conditions) check_result = self.raw.check() if check_result == z3.sat: self.models.append(self.raw.model()) else: return check_result return z3.sat def model(self) -> Model: """Returns z3 model for a solution. """ return Model(self.models) def reset(self) -> None: """Reset this solver.""" self.constraints = [] def pop(self, num) -> None: """Pop num constraints from this solver. """ self.constraints.pop(num)
Python
0
@@ -3486,32 +3486,36 @@ %22%22%22%0A +raw_ constraints = %5Bc @@ -3505,32 +3505,45 @@ _constraints = %5B +%0A c.raw for c in c @@ -3563,32 +3563,67 @@ l%5D, constraints) +%0A %5D # type: List%5Bz3.BoolRef %5D%0A self.c @@ -3632,32 +3632,36 @@ straints.extend( +raw_ constraints)%0A%0A @@ -3831,16 +3831,20 @@ +raw_ constrai @@ -3850,16 +3850,29 @@ ints = %5B +%0A c.raw fo @@ -3908,16 +3908,51 @@ traints) +%0A %5D # type: List%5Bz3.BoolRef %5D%0A @@ -3977,16 +3977,20 @@ .extend( +raw_ constrai
3453e903a50859c596c0997e87b62fb0f54d6d56
add inchi_lock.cc
binding.gyp
binding.gyp
{ "target_defaults": { "conditions": [ ['OS=="win"', { }, { 'cflags' : [ "-fexceptions" ], 'cflags_cc' : [ "-fexceptions" ] } ] ], "configurations": { "Release": { 'msvs_settings': { 'VCCLCompilerTool': { 'WholeProgramOptimization': 'true', # /GL, whole program optimization, needed for LTCG 'OmitFramePointers': 'true', 'EnableFunctionLevelLinking': 'true', 'EnableIntrinsicFunctions': 'true', 'RuntimeTypeInfo': 'false', 'ExceptionHandling': '1', 'AdditionalOptions': [ '/MP', '/EHsc' ] }, 'VCLibrarianTool': { 'AdditionalOptions': [ '/LTCG', # link time code generation ], }, 'VCLinkerTool': { 'LinkTimeCodeGeneration': 1, # link-time code generation 'OptimizeReferences': 2, # /OPT:REF 'EnableCOMDATFolding': 2, # /OPT:ICF 'LinkIncremental': 1, # disable incremental linking } } } } }, "targets": [ { "target_name": "inchi", "msvs_guid": "F1B917E2-75AB-A243-6D62-3C7938A1EF68", "include_dirs": [ "<!(node -e \"require('nan')\")" ], "dependencies": [ "deps/inchi/inchi.gyp:libINCHIAPI" ], "sources": [ "src/node-inchi.cc", "src/molecule.cc", "src/atom.cc", "src/molecule_wrap.cc", "src/molecule_native.cc", "src/inchi_atom.cc", "src/get_inchi.cc", "src/get_inchi_data.cc", "src/get_struct_from_inchi.cc", "src/get_struct_from_inchi_data.cc" ], "conditions": [ ['OS=="win"', { }, { 'cflags_cc' : [ "-fexceptions" ] } ] ], }, { "target_name": "test", "type": "executable", "sources": [ "src/test/TestMain.cc", "src/test/hello.cc", "src/test/test_molecule.cc", "src/test/test_inchi_atom.cc", "src/test/test_get_struct_from_inchi.cc", "src/molecule_native.cc", "src/get_inchi_data.cc", "src/get_struct_from_inchi_data.cc", "src/inchi_atom.cc" ], "include_dirs": [ ".", "src", "<!(node -e \"require('cppunitlite')\")", "<!(node -e \"require('nan')\")" ], "dependencies": [ "node_modules/cppunitlite/binding.gyp:CppUnitLite", "deps/inchi/inchi.gyp:libINCHIAPI" ], "conditions": [ ['OS=="win"', { }, { 'cflags_cc': [ '-fexceptions' ] } ] ], # sample unit test } ] }
Python
0.000001
@@ -1670,32 +1670,61 @@ from_inchi.cc%22,%0A + %22src/inchi_lock.cc%22,%0A %22src/get @@ -2285,32 +2285,61 @@ inchi_data.cc%22,%0A + %22src/inchi_lock.cc%22,%0A %22src/inc
a292e1fe8ec72355ce2bb3c1f99dd82d6f145438
Add path to homebrew-installed pkgconfig for Mac OS 10.8 (10.9 is symlinked to 10.8) #9
binding.gyp
binding.gyp
{ 'targets': [{ 'target_name': 'sharp', 'sources': ['src/sharp.cc'], 'libraries': [ '<!@(PKG_CONFIG_PATH="/usr/local/lib/pkgconfig" pkg-config --libs vips)', '<!@(PKG_CONFIG_PATH="/usr/lib/pkgconfig" pkg-config --libs vips)' ], 'include_dirs': [ '/usr/local/include/glib-2.0', '/usr/local/lib/glib-2.0/include', '/usr/include/glib-2.0', '/usr/lib/glib-2.0/include', '/usr/lib/x86_64-linux-gnu/glib-2.0/include' ], 'cflags': ['-fexceptions', '-pedantic', '-Wall', '-O3'], 'cflags_cc': ['-fexceptions', '-pedantic', '-Wall', '-O3'] }] }
Python
0.000001
@@ -130,19 +130,27 @@ r/local/ -l +L ib +rary/ENV /pkgconf @@ -155,64 +155,39 @@ nfig -%22 +/10.8:/usr/local/lib/ pkg -- config - --libs vips)',%0A '%3C!@(PKG_CONFIG_PATH=%22 +: /usr
169a60075d37f37b0ac55e7ce26ba62e87baf913
Add --tag options
bin/alloccli/tasks.py
bin/alloccli/tasks.py
"""alloccli subcommand for viewing a list of tasks.""" from alloc import alloc class tasks(alloc): """Print a list of tasks.""" # Setup the options that this cli can accept ops = [] ops.append(('' , 'help ', 'Show this help.')) ops.append(('' , 'csv=[WHEN] ', 'Return the results in CSV format. WHEN can be "auto",\n' '"never" or "always". If WHEN is omitted, assume "always".')) ops.append(('p:', 'project=ID|NAME', 'A project ID, or a fuzzy match for a project name.')) ops.append(('t:', 'task=ID|NAME ', 'A task ID, or a fuzzy match for a task name.')) ops.append(('s:', 'status=NAME ', 'A task\'s status.\n' '(eg: open pending eg: open pending_info. Default: open)')) ops.append(('y:', 'type=NAME ', 'A task\'s type, eg: Task eg: Fault Message')) ops.append(('a:', 'assignee=NAME ', 'A task\'s assignee, username or first and surname.\n' '(eg: "jon" eg: "all" eg: "NULL". Defaults to yourself.)')) ops.append(('m:', 'manager=NAME ', 'A task\'s manager, username or first and surname".')) ops.append(('c:', 'creator=NAME ', 'A task\'s creator, username or first and surname".')) ops.append(('o:', 'order=NAME ', 'The order the Tasks are displayed in.\n' 'Default: "-o Priority -o Type -o _Rate -o status" (underscore means reverse).')) ops.append(('f:', 'fields=LIST ', 'The list of fields you would like printed.\n' '(eg: -f all eg: -f taskID -f Status -f taskStatus -f Proj\\ Pri)')) # Specify some header and footer text for the help text help_text = "Usage: %s [OPTIONS]\n" help_text += __doc__ help_text += "\n\n%s\n\nIf called without arguments this program will display all tasks that are assigned to you." def run(self, command_list): """Execute subcommand.""" # Get the command line arguments into a dictionary o, remainder_ = self.get_args(command_list, self.ops, self.help_text) # Got this far, then authenticate self.authenticate() order = [] if o['order']: order = o['order'] # Get personID, either assignee or logged in user personID = [] if not o['assignee']: personID.append(self.get_my_personID()) elif o['assignee']: personID = self.person_to_personID([0 if x.lower()=='null' else x for x in o['assignee']]) managerID = [] if o['manager']: managerID = self.person_to_personID(o['manager']) creatorID = [] if o['creator']: creatorID = self.person_to_personID(o['creator']) # Setup options for the task search ops = {} ops["personID"] = personID ops["managerID"] = managerID ops["creatorID"] = creatorID if o['project']: ops["projectNameMatches"] = o['project'] ops["taskView"] = "prioritised" ops["showTimes"] = True o["status"] = o["status"] or "open" ops['taskStatus'] = o['status'] if o['type']: ops['taskTypeID'] = o['type'] # Get a taskID either passed via command line, or figured out from a task name if self.is_num(o['task']): ops["taskID"] = o['task'] if 'taskTypeID' in ops: del ops["taskTypeID"] if 'taskStatus' in ops: del ops["taskStatus"] if 'personID' in ops: del ops["personID"] elif o['task']: ops["taskName"] = o["task"] if not o['fields']: if not order: order = ["priorityLabel", "taskTypeID", "_rate", "taskStatusLabel"] fields = ["taskID", "taskTypeID", "taskStatusLabel", "priorityLabel", "timeExpected", "timeLimit", "timeActual", "rate", "projectName", "taskName"] else: fields = o["fields"] if 'timeBest' not in o['fields'] \ and 'timeWorst' not in o['fields'] \ and 'timeExpected' not in o['fields'] \ and 'timeLimit' not in o['fields'] \ and 'timeActual' not in o['fields']: del ops['showTimes'] if 'showTimes' not in ops: if 'timeWorst' in fields: fields.remove('timeWorst') if 'timeExpected' in fields: fields.remove('timeExpected') if 'timeLimit' in fields: fields.remove('timeLimit') if 'timeActual' in fields: fields.remove('timeActual') # Get list of tasks r = self.get_list("task", ops) if r: self.print_table("task", r, fields, order)
Python
0
@@ -526,32 +526,81 @@ roject name.'))%0A + ops.append(('g:', 'tag=TEXT ', 'A tag'))%0A ops.append(('t @@ -3100,16 +3100,56 @@ %5B'type'%5D +%0A if o%5B'tag'%5D: ops%5B'tags'%5D = o%5B'tag'%5D %0A%0A # @@ -3775,16 +3775,24 @@ askName%22 +, %22tags%22 %5D%0A el
2553c362d0fe52440a3ee3bc3a5603842c524baa
Modify corresponding bindings
binding.gyp
binding.gyp
{ "targets": [ { "variables": { "variables": { "conditions": [ [ "OS=='linux'", { "gcc":"<!(gcc --version 2>&1 | head -1 | sed -e 's/^.*(.*) \(.*\)\..*$/\\1/')" } , { "gcc":"" } ] ], "juliaBase":"<!(python tools/nj_config.py <(OS) find)", }, "version":"<!(python tools/nj_config.py <(OS) version)", "njLib":"<!(python tools/nj_config.py <(OS) cwd)/lib", "juliaBin":"<(juliaBase)/bin", "conditions": [ [ "gcc=='4.6'", { "std":"c++0x" } , { "std":"c++11" } ], [ "OS=='linux' and juliaBase=='/usr'", { "juliaLib":"<(juliaBase)/lib/x86_64-linux-gnu/julia" , "juliaInclude":"<(juliaBase)/include/julia" }, { "juliaLib":"<(juliaBase)/lib/julia" , "juliaInclude":"<(juliaBase)/include/julia" } ] ] }, "target_name": "nj", "sources": [ "src/Call.cpp", "src/Exception.cpp", "src/Expr.cpp", "src/Convert.cpp", "src/Immediate.cpp", "src/Import.cpp", "src/JMain.cpp", "src/JuliaExecEnv.cpp", "src/JuliaHandle.cpp", "src/Kernel.cpp", "src/NativeArray.cpp", "src/Script.cpp", "src/Trampoline.cpp", "src/Type.cpp", "src/Types.cpp", "src/Value.cpp", "src/Values.cpp", "src/debug.cpp", "src/error.cpp", "src/lvalue.cpp", "src/request.cpp", "src/rvalue.cpp", "src/util.cpp" ], "cflags!": [ "-fno-exceptions" ], "cflags": [ "-std=<(std)", ], "defines": [ '<(OS)', 'NJ_LIB="<(njLib)"', 'JULIA_LIB="<(juliaLib)"' ], "cflags_cc!": [ "-fno-exceptions" ], "include_dirs": [ "<(juliaInclude)" ], "link_settings": { "ldflags": [ "-L<(juliaLib)", "-Wl,-rpath,<(juliaLib)" ], "libraries": [ "-ljulia" ] }, "conditions": [ [ "OS=='mac'", { "xcode_settings": { "MACOSX_DEPLOYMENT_TARGET":"10.7", "GCC_ENABLE_CPP_EXCEPTIONS": "YES", "OTHER_CPLUSPLUSFLAGS": [ "-std=c++11", "-stdlib=libc++", ], "OTHER_LDFLAGS": [ "-stdlib=libc++", "-L<(juliaLib)", "-Wl,-rpath,<(juliaLib)", "-Wl,-flat_namespace" ] } } ], [ "OS=='win'", { "msvs_settings": { "VCCLCompilerTool": { "AdditionalOptions": [ "/EHa" ], "DisableSpecificWarnings": [ 4290, 4200 ] }, "VCLinkerTool": { "AdditionalLibraryDirectories": [ "<(juliaBin)" ] } } } ], [ "version=='0.10.x'", { "sources": [ "src/Callback-v10.cpp", "src/JRef-v10.cpp", "src/ScriptEncapsulated-v10.cpp", "src/dispatch-v10.cpp", "src/nj-v10.cpp" ] } ], [ "version!='0.10.x'", { "sources": [ "src/Callback-v11.cpp", "src/JRef-v11.cpp", "src/ScriptEncapsulated-v11.cpp", "src/dispatch-v11.cpp", "src/nj-v11.cpp" ] } ] ] } ] }
Python
0.000004
@@ -421,21 +421,22 @@ %22 -njLib +NJ_LIB %22:%22%3C!(py @@ -469,16 +469,15 @@ OS) -cwd)/ +nj_ lib +) %22,%0A @@ -1764,13 +1764,14 @@ =%22%3C( -njLib +NJ_LIB )%22',
88021aeb5e7c4d0f3a50333b3f77624ac718c03c
Use `ASM` mode on the linux + non glibc environ
binding.gyp
binding.gyp
{ 'target_defaults': { 'default_configuration': 'Release', 'configurations': { 'Release': { 'cflags': [ '-O3' ], 'xcode_settings': { 'GCC_OPTIMIZATION_LEVEL': '3', 'GCC_GENERATE_DEBUGGING_SYMBOLS': 'NO', }, 'msvs_settings': { 'VCCLCompilerTool': { 'Optimization': 3, 'FavorSizeOrSpeed': 1, }, }, } }, }, 'targets': [ { 'target_name': 'fibers', 'sources': [ 'src/fibers.cc', 'src/coroutine.cc', 'src/libcoro/coro.c', # Rebuild on header changes 'src/coroutine.h', 'src/libcoro/coro.h', ], 'cflags!': ['-ansi'], 'conditions': [ ['OS == "win"', {'defines': ['CORO_FIBER', 'WINDOWS']}, # else { 'defines': ['USE_CORO', 'CORO_GUARDPAGES=1'], 'ldflags': ['-pthread'], } ], ['OS == "linux" or OS == "solaris" or OS == "sunos" or OS == "freebsd" or OS == "aix"', {'defines': ['CORO_UCONTEXT']}], ['OS == "mac"', {'defines': ['CORO_SJLJ']}], ['OS == "openbsd"', {'defines': ['CORO_ASM']}], ['target_arch == "arm"', { # There's been problems getting real fibers working on arm 'defines': ['CORO_PTHREAD'], 'defines!': ['CORO_UCONTEXT', 'CORO_SJLJ', 'CORO_ASM'], }, ], ], }, ], }
Python
0.000002
@@ -826,20 +826,330 @@ %22linux%22 - or +',%0A%09%09%09%09%09%7B%0A%09%09%09%09%09%09'variables': %7B%0A%09%09%09%09%09%09%09'USE_GLIBC': '%3C!(ldd --version 2%3E&1 %7C head -n 1 %7C grep -i %22glibc%22 %7C wc -l)',%0A%09%09%09%09%09%09%7D,%0A%09%09%09%09%09%09'conditions': %5B%0A%09%09%09%09%09%09%09%5B'%3C(USE_GLIBC) == 1',%0A%09%09%09%09%09%09%09%09%7B'defines': %5B'CORO_UCONTEXT'%5D,%7D,%0A%09%09%09%09%09%09%09%09# no use glibc%0A%09%09%09%09%09%09%09%09%7B'defines': %5B'CORO_ASM'%5D,%7D%0A%09%09%09%09%09%09%09%5D,%0A%09%09%09%09%09%09%5D,%0A%09%09%09%09%09%7D,%0A%09%09%09%09%5D,%0A%09%09%09%09%5B' OS == %22s
e68f66043b2770aa782d2d7d293de281527d1891
test travis
binding.gyp
binding.gyp
{ "targets": [ { "target_name": "addon", "sources": [ "src/addon.cc", "src/object.cc", "src/async.cc", "src/engine.cc", "src/results.cc" ], #"cflags": [ "-Werror", "-Wall", "-Wextra", "-Wpedantic", "-Wunused-parameter", "-funroll-loops", "-Ofast" ],#targets all files, c and c++ #"cflags_c": [ "-hello" ],# does this do anything? #"cflags_cc": [ "-Werror", "-Wall", "-Wextra", "-Wpedantic", "-Wunused-parameter", "-funroll-loops", "-Ofast" ],#target c++ only #"cflags": [ "-O2" ], "cflags!": [ "-fno-exceptions", "-Wno-unused-parameter" ], "cflags_cc!": [ "-fno-exceptions", "-Wno-unused-parameter" ], "include_dirs": ["<!@(node -p \"require('node-addon-api').include\")"], "dependencies": ["<!(node -p \"require('node-addon-api').gyp\")"], "defines": [ "NAPI_CPP_EXCEPTIONS", "NODE_ADDON_API_DISABLE_DEPRECATED" ], "conditions": [ ["OS==\"win\"", { "msvs_settings": { "VCCLCompilerTool": { "ExceptionHandling": 1 } } }], ["OS==\"mac\"", { "xcode_settings": { "CLANG_CXX_LANGUAGE_STANDARD": 'c++14', "CLANG_CXX_LIBRARY": "libc++", "GCC_ENABLE_CPP_EXCEPTIONS": "YES", "MACOSX_DEPLOYMENT_TARGET": "10.7", "GCC_ENABLE_CPP_RTTI" : "YES", # options to test: "fast", "3", "2", "1", "0", "", "s" #"GCC_OPTIMIZATION_LEVEL": "1", # only passed to C files "OTHER_CFLAGS" : [], # only passed to C++ files "OTHER_CPLUSPLUSFLAGS": [ "-Werror", "-Wextra", "-Wpedantic", "-Wunused-parameter", #"-Weverything" #"-fdiagnostics-show-hotness", #"-fsave-optimization-record" ], } }] ] } ] }
Python
0.000002
@@ -600,32 +600,39 @@ nused-parameter%22 +, %22-O3%22 %5D,%0A %22cflags
92014a41831b6096e50103724e64a8991b68877e
Update the version to 1.0.0
src/pymage_downloader.py
src/pymage_downloader.py
#!/usr/bin/python3 import glob import logging import os import sys from argparse import ArgumentParser import praw import requests from exceptions.pymage_exceptions import NotAbleToDownloadException from parsers.parser_factory import ParserFactory LOGGER = logging.getLogger(__name__) VERSION = "0.0.1" def main(): args = _parse_args() configure_logging(args.is_debug) prepare_download_folder(args.folder) if args.user: r = praw.Reddit(username=args.user, password=args.password) else: r = praw.Reddit() start_from = args.start_from for page in range(0, args.page_limit): LOGGER.info("Starting getting posts from page %s" % start_from) submissions = get_submissions(r, args, start_from) process_posts(submissions, args) next_page = submissions.params["after"] # We might get the same next_page as the start_from if the next listing # is less than 25, the default posts per pages coming from PRAW if not next_page or next_page is start_from: LOGGER.info("No more posts to fetch.") break start_from = next_page def get_submissions(reddit, args, start_from = None): params = {"after": start_from} if args.user: if args.should_get_upvoted: submissions = reddit.redditor(args.user).upvoted(limit=args.limit, params=params) else: submissions = reddit.redditor(args.user).saved(limit=args.limit, params=params) else: subreddit = reddit.subreddit(args.subreddit, params=params) if args.type == "controversial": submissions = subreddit.controversial(time_filter=args.period, limit=args.limit, params=params) elif args.type == "new": submissions = subreddit.new(limit=args.limit, params=params) elif args.type == "top": submissions = subreddit.top(time_filter=args.period, limit=args.limit, params=params) else: submissions = subreddit.hot(limit=args.limit, params=params) return submissions def process_posts(submissions, args): for post in submissions: if not isinstance(post, praw.models.Submission) or post.is_self: LOGGER.info("Skipping post %s as it is not a submission or is a self post..." % post.id) continue LOGGER.debug("Post domain: %s" % post.domain) pattern_to_search = os.path.join(args.folder, ("reddit_*_%s_*" % post.id)) LOGGER.debug("Pattern to search: %s" % pattern_to_search) if not args.should_overwrite and len(glob.glob(pattern_to_search)) > 0: LOGGER.info("Skipping post %s, we already have its images..." % post.id) continue parser = ParserFactory.get_parser(post.url) if not parser: LOGGER.warning("The domain in %s is not supported..." % post.url) continue try: images = parser.get_images(post) download_images(images, args.folder) except NotAbleToDownloadException as e: LOGGER.error(e) LOGGER.info("The next post ID is: %s" % submissions.params['after']) def download_images(images, folder): for i in images: LOGGER.info('Downloading %s...' % i.url) try: response = requests.get(i.url) except requests.exceptions.ConnectionError as ex: LOGGER.error(ex) raise NotAbleToDownloadException("Couldn't connect to %s, because of %s" % (i.url, str(ex))) if response.status_code == 200: file_name = os.path.join(folder, i.local_file_name) LOGGER.info('Saving %s...' % file_name) with open(file_name, 'wb') as fo: for chunk in response.iter_content(4096): fo.write(chunk) response.close() else: response.close() raise NotAbleToDownloadException( "Failed to download, we got an HTTP %i error for %s" % (response.status_code, i.url)) def prepare_download_folder(folder): if not os.path.exists(folder): LOGGER.debug("Creating folder %s" % folder) os.makedirs(folder) def _parse_args(): """Parse args with argparse :returns: args """ parser = ArgumentParser(description="Pymage Downloader %s - Download pics from subreddit posts" % VERSION) parser.add_argument('--subreddit', '-s', default='pics', # nargs='+', #TODO implement functionality for more than one subreddit help="Name of the subreddit.") parser.add_argument('--period', '-p', default='week', choices=['hour', 'day', 'week', 'month', 'year', 'all'], help="[h]our, [d]ay, [w]eek, [m]onth, [y]ear, or [a]ll. Period " "of time from which you want images. Only works for top and controversial") parser.add_argument('--type', '-t', default='hot', choices=['hot', 'top', 'new', 'controversial'], help="[hot], [top], [new], [controversial]. Type of listing of posts " "in a subreddit.") parser.add_argument('--limit', '-l', metavar='N', type=int, default=25, help="Maximum URL limit per subreddit.") parser.add_argument('--destination', '-d', dest='folder', default='reddit_pics', help="Defines a download folder.") parser.add_argument("--overwrite", "-o", dest="should_overwrite", action="store_true", help="Specifies if files should be overwritten if they were already downloaded.") parser.add_argument("--debug", dest="is_debug", action="store_true", help="Activates debug mode.") parser.add_argument("--user", "-u", dest="user", help="Specifies the user name. This overrides the subreddit option.") parser.add_argument("--pass", "-w", dest="password", help="Specifies the user name. Required if '-u' is specified.") parser.add_argument("--upvoted", dest="should_get_upvoted", action="store_true", help="Specifies if the upvoted posts of a user should be fetched. Otherwise, get the saved " "ones.") parser.add_argument('--page-limit', '-pl', dest="page_limit", metavar='N', type=int, default=4, help="Maximum amount of pages to get.") parser.add_argument('--start-from', '-sf', dest="start_from", metavar='ID', help="Post ID from which to get a listing.") args = parser.parse_args() if args.user and not args.password: parser.error("A user was specified but a password was not, please provide complete credentials.") if args.start_from and not args.start_from.startswith("t3_"): args.start_from = "t3_" + args.start_from return args def configure_logging(is_debug=False): log_format = "%(asctime)s [%(name)s] [%(levelname)s] %(message)s" logging.basicConfig(format=log_format, filename='pymage.log', level=logging.DEBUG if is_debug else logging.INFO) console_handler = logging.StreamHandler(sys.stdout) console_handler.setFormatter(logging.Formatter(log_format)) console_handler.setLevel(logging.DEBUG) LOGGER.addHandler(console_handler) LOGGER.info("******* Pymage Downloader *******") LOGGER.debug("Ready to DEBUG!") if __name__ == '__main__': main()
Python
0.000017
@@ -297,13 +297,13 @@ = %22 +1. 0.0 -.1 %22%0A%0A%0A
db1ea244ad057934d63ac0537085a40ee5122299
Remove unused ctypes import
convertbng/util.py
convertbng/util.py
# -*- coding: utf-8 -*- """ util.py Created by Stephan Hügel on 2015-06-22 This file is part of convertbng. The MIT License (MIT) Copyright (c) 2015 Stephan Hügel Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from ctypes import cdll, c_uint32, c_float, c_double, Structure, c_void_p, cast, c_size_t, POINTER from sys import platform from array import array import numpy as np import os if platform == "darwin": ext = "dylib" else: ext = "so" __author__ = u"Stephan Hügel" __version__ = "0.2.2" file_path = os.path.dirname(__file__) lib = cdll.LoadLibrary(os.path.join(file_path, 'liblonlat_bng.' + ext)) class _BNG_FFIArray(Structure): """ Convert sequence of floats to a C-compatible void array """ _fields_ = [("data", c_void_p), ("len", c_size_t)] @classmethod def from_param(cls, seq): """ Allow implicit conversions from a sequence of 32-bit floats.""" return seq if isinstance(seq, cls) else cls(seq) def __init__(self, seq, data_type = c_float): """ Convert sequence of values into array, then ctypes Structure Rather than checking types (bad), we just try to blam seq into a ctypes object using from_buffer. If that doesn't work, we try successively more conservative approaches: numpy array -> array.array -> read-only buffer -> CPython iterable """ try: len(seq) except TypeError: # we've got an iterator or a generator, so consume it seq = array('f', seq) array_type = data_type * len(seq) try: raw_seq = array_type.from_buffer(seq.astype(np.float32)) except (TypeError, AttributeError): try: raw_seq = array_type.from_buffer_copy(seq.astype(np.float32)) except (TypeError, AttributeError): # it's a list or a tuple raw_seq = array_type.from_buffer(array('f', seq)) self.data = cast(raw_seq, c_void_p) self.len = len(seq) class _BNG_RESTuple(Structure): """ Container for returned FFI BNG data """ _fields_ = [("e", _BNG_FFIArray), ("n", _BNG_FFIArray)] def _bng_void_array_to_tuple_list(restuple, _func, _args): """ Convert the lon, lat --> BNG FFI result to Python data structures """ eastings = POINTER(c_uint32 * restuple.e.len).from_buffer_copy(restuple.e)[0] northings = POINTER(c_uint32 * restuple.n.len).from_buffer_copy(restuple.n)[0] res_list = [list(eastings), list(northings)] drop_bng_array(restuple.e, restuple.n) return res_list class _LONLAT_FFIArray(Structure): """ convert sequence of ints to a C-compatible void array """ _fields_ = [("data", c_void_p), ("len", c_size_t)] @classmethod def from_param(cls, seq): """ Allow implicit conversions from a sequence of 32-bit unsigned ints """ return seq if isinstance(seq, cls) else cls(seq) def __init__(self, seq, data_type = c_uint32): """ Convert sequence of values into array, then ctypes Structure Rather than checking types (bad), we just try to blam seq into a ctypes object using from_buffer. If that doesn't work, we try successively more conservative approaches: numpy array -> array.array -> read-only buffer -> CPython iterable """ try: len(seq) except TypeError: # we've got an iterator or a generator, so consume it seq = array('f', seq) array_type = data_type * len(seq) try: raw_seq = array_type.from_buffer(seq.astype(np.uint32)) except (TypeError, AttributeError): try: raw_seq = array_type.from_buffer_copy(seq.astype(np.uint32)) except (TypeError, AttributeError): # it's a list or a tuple raw_seq = array_type.from_buffer(array('i', seq)) self.data = cast(raw_seq, c_void_p) self.len = len(seq) class _LONLAT_RESTuple(Structure): """ Container for returned FFI lon, lat data """ _fields_ = [("lon", _LONLAT_FFIArray), ("lat", _LONLAT_FFIArray)] def _lonlat_void_array_to_tuple_list(restuple, _func, _args): """ Convert the BNG --> lon, lat result to Python data structures """ lons = POINTER(c_float * restuple.lon.len).from_buffer_copy(restuple.lon)[0] lats = POINTER(c_float * restuple.lat.len).from_buffer_copy(restuple.lat)[0] res_list = [list(lons), list(lats)] drop_ll_array(restuple.lon, restuple.lat) return res_list # Multi-threaded FFI functions convert_bng = lib.convert_to_bng_threaded convert_bng.argtypes = (_BNG_FFIArray, _BNG_FFIArray) convert_bng.restype = _BNG_RESTuple convert_bng.errcheck = _bng_void_array_to_tuple_list convert_lonlat = lib.convert_to_lonlat_threaded convert_lonlat.argtypes = (_LONLAT_FFIArray, _LONLAT_FFIArray) convert_lonlat.restype = _LONLAT_RESTuple convert_lonlat.errcheck = _lonlat_void_array_to_tuple_list # Free FFI-allocated memory drop_bng_array = lib.drop_int_array drop_bng_array.argtypes = (_BNG_FFIArray, _BNG_FFIArray) drop_bng_array.restype = None drop_ll_array = lib.drop_float_array drop_ll_array.argtypes = (_LONLAT_FFIArray, _LONLAT_FFIArray) drop_ll_array.restype = None # The type checks are not exhaustive. I know. def convertbng(lons, lats): """ Multi-threaded lon, lat --> BNG conversion Returns a list of two lists containing Easting and Northing integers (longs), respectively """ if isinstance(lons, float): lons = [lons] lats = [lats] return convert_bng(lons, lats) def convertlonlat(eastings, northings): """ Multi-threaded BNG --> lon, lat conversion Returns a list of two lists containing Longitude and Latitude floats, respectively """ if isinstance(eastings, (int, long)): eastings = [eastings] northings = [northings] return convert_lonlat(eastings, northings)
Python
0
@@ -1236,18 +1236,8 @@ oat, - c_double, Str
6a6fd6a23b811421e68640d78f655ac2eb16f584
Add DBus lib
binding.gyp
binding.gyp
{ 'variables': { 'rm' : '<!(node -e "require(\'addon-tools-raub\').rm()")', 'cp' : '<!(node -e "require(\'addon-tools-raub\').cp()")', 'mkdir' : '<!(node -e "require(\'addon-tools-raub\').mkdir()")', 'binary' : '<!(node -e "require(\'addon-tools-raub\').bin()")', 'qmlui_include' : '<!(node -e "require(\'deps-qmlui-raub\').include()")', 'qmlui_bin' : '<!(node -e "require(\'deps-qmlui-raub\').bin()")', 'qt_core_bin' : '<!(node -e "require(\'deps-qt-qml-raub\').core.bin()")', 'qt_gui_bin' : '<!(node -e "require(\'deps-qt-qml-raub\').gui.bin()")', 'qt_qml_bin' : '<!(node -e "require(\'deps-qt-qml-raub\').bin()")', }, 'targets': [ { 'target_name' : 'qml', 'sources' : [ 'cpp/bindings.cpp', 'cpp/view.cpp' ], 'libraries' : [ '-lqmlui' ], 'include_dirs' : [ '<!@(node -e "require(\'addon-tools-raub\').include()")', '<(qmlui_include)', ], 'library_dirs' : [ '<(qmlui_bin)' ], 'conditions' : [ [ 'OS=="linux" or OS=="mac"', { 'libraries': [ '-Wl,-rpath <(qmlui_bin)', '-Wl,-rpath <(qt_core_bin)', '-Wl,-rpath <(qt_gui_bin)', '-Wl,-rpath <(qt_qml_bin)', '<(qt_core_bin)/libQt5Core.so.5', '<(qt_core_bin)/libQt5Network.so.5', '<(qt_core_bin)/libicui18n.so.56', '<(qt_core_bin)/libicuuc.so.56', '<(qt_core_bin)/libicudata.so.56', '<(qt_core_bin)/libicuio.so.56', '<(qt_core_bin)/libicule.so.56', '<(qt_core_bin)/libicutu.so.56', '<(qt_gui_bin)/libQt5Gui.so.5', '<(qt_gui_bin)/libQt5OpenGL.so.5', '<(qt_gui_bin)/libQt5Widgets.so.5', '<(qt_qml_bin)/libQt5Qml.so.5', '<(qt_qml_bin)/libQt5Quick.so.5', '<(qt_qml_bin)/libQt5QuickControls2.so.5', '<(qt_qml_bin)/libQt5QuickTemplates2.so.5', '<(qt_qml_bin)/libQt5QuickWidgets.so.5', ], } ], [ 'OS=="win"', { 'msvs_settings' : { 'VCCLCompilerTool' : { 'AdditionalOptions' : [ '/O2','/Oy','/GL','/GF','/Gm-', '/Fm-', '/EHsc','/MT','/GS','/Gy','/GR-','/Gd', ] }, 'VCLinkerTool' : { 'AdditionalOptions' : ['/RELEASE','/OPT:REF','/OPT:ICF','/LTCG'] }, }, }, ], ], }, { 'target_name' : 'make_directory', 'type' : 'none', 'dependencies' : ['qml'], 'actions' : [{ 'action_name' : 'Directory created.', 'inputs' : [], 'outputs' : ['build'], 'action': ['<(mkdir)', '-p', '<(binary)'] }], }, { 'target_name' : 'copy_binary', 'type' : 'none', 'dependencies' : ['make_directory'], 'actions' : [{ 'action_name' : 'Module copied.', 'inputs' : [], 'outputs' : ['binary'], 'action' : ['<(cp)', 'build/Release/qml.node', '<(binary)/qml.node'], }], }, { 'target_name' : 'remove_extras', 'type' : 'none', 'dependencies' : ['copy_binary'], 'actions' : [{ 'action_name' : 'Build intermediates removed.', 'inputs' : [], 'outputs' : ['cpp'], 'conditions' : [ [ 'OS=="linux" or OS=="mac"', { 'action' : [ 'rm', '<(module_root_dir)/build/Release/obj.target/qml/cpp/bindings.o', '<(module_root_dir)/build/Release/obj.target/qml/cpp/view.o', '<(module_root_dir)/build/Release/qml.node' ] } ], [ 'OS=="win"', { 'action' : [ '<(rm)', '<(module_root_dir)/build/Release/qml.*', '<(module_root_dir)/build/Release/obj/qml/*.*' ] } ], ], }], }, ] }
Python
0
@@ -1214,93 +1214,8 @@ )',%0A -%09%09%09%09%09%09%09'%3C(qt_core_bin)/libQt5Core.so.5',%0A%09%09%09%09%09%09%09'%3C(qt_core_bin)/libQt5Network.so.5',%0A %09%09%09%09 @@ -1446,32 +1446,158 @@ ibicutu.so.56',%0A +%09%09%09%09%09%09%09'%3C(qt_core_bin)/libQt5Core.so.5',%0A%09%09%09%09%09%09%09'%3C(qt_core_bin)/libQt5Network.so.5',%0A%09%09%09%09%09%09%09'%3C(qt_core_bin)/libQt5DBus.so.5',%0A %09%09%09%09%09%09%09'%3C(qt_gui
73c8378f91687abfb58a8a964c2e5664b3e4c096
Fix ordinal naming
angr/procedures/win32/dynamic_loading.py
angr/procedures/win32/dynamic_loading.py
import angr import claripy import logging l = logging.getLogger('angr.procedures.win32.dynamic_loading') class LoadLibraryA(angr.SimProcedure): def run(self, lib_ptr): lib = self.state.mem[lib_ptr].string.concrete return self.load(lib) def load(self, lib): if '.' not in lib: lib += '.dll' loaded = self.project.loader.dynamic_load(lib) if loaded is None: return 0 # Add simprocedures for obj in loaded: self.register(obj) l.debug("Loaded %s", lib) return self.project.loader.find_object(lib).mapped_base def register(self, obj): # can be overridden for instrumentation self.project._register_object(obj) class LoadLibraryExW(LoadLibraryA): def run(self, lib_ptr, flag1, flag2): lib = self.state.mem[lib_ptr].wstring.concrete return self.load(lib) # if you subclass LoadLibraryA to provide register, you can implement LoadLibraryExW by making an empty class that just # subclasses your special procedure and LoadLibraryExW class GetProcAddress(angr.SimProcedure): def run(self, lib_handle, name_addr): if lib_handle.symbolic: raise angr.errors.SimValueError("GetProcAddress called with symbolic library handle %s" % lib_handle) lib_handle = self.state.se.eval(lib_handle) for obj in self.project.loader.all_pe_objects: if obj.mapped_base == lib_handle: break else: l.warning("GetProcAddress: invalid library handle %s", lib_handle) return 0 if claripy.is_true(name_addr < 0x10000): # this matches the bogus name specified in the loader... ordinal = self.state.se.eval(name_addr) name = 'ordinal.%d' % ordinal else: name = self.state.mem[name_addr].string.concrete full_name = '%s.%s' % (obj.provides, name) self.procs.add(full_name) sym = obj.get_symbol(name) if sym is None and name.endswith('@'): # There seems to be some mangling parsing being done in the linker? # I don't know what I'm doing for suffix in ['Z', 'XZ']: sym = obj.get_symbol(name + suffix) if sym is not None: name = name + suffix break if sym is None: l.warning("GetProcAddress: object %s does not contain %s", obj.provides, name) return 0 else: name = sym.name # fix ordinal names full_name = '%s.%s' % (obj.provides, name) self.procs.add(full_name) l.debug("Imported %s (%#x) from %s", name, sym.rebased_addr, obj.provides) return sym.rebased_addr KEY = 'dynamically_loaded_procedures' @property def procs(self): try: return self.state.globals[self.KEY] except KeyError: x = set() self.state.globals[self.KEY] = x return x
Python
0.999575
@@ -1798,19 +1798,38 @@ l.%25d +.%25s ' %25 +( ordinal +, obj.provides) %0A @@ -3044,8 +3044,9 @@ return x +%0A
16b19a2635a3c1e3ae6ace2204b7a39b94dd76fc
Bump minimum macOS version to 10.15 (latest maintained).
binding.gyp
binding.gyp
{ "variables": { "copy_c_api": "no", "c_api_path": "<(module_root_dir)/qdb", }, "targets": [ { "target_name": "<(module_name)", "sources": [ "src/qdb_api.cpp", "src/entry.hpp", "src/expirable_entry.hpp", "src/blob.cpp", "src/blob.hpp", "src/cluster.cpp", "src/cluster.hpp", "src/error.cpp", "src/error.hpp", "src/integer.cpp", "src/integer.hpp", "src/prefix.cpp", "src/prefix.hpp", "src/query_find.cpp", "src/query_find.hpp", "src/query.cpp", "src/query.hpp", "src/range.cpp", "src/range.hpp", "src/suffix.cpp", "src/suffix.hpp", "src/tag.cpp", "src/tag.hpp", "src/time_series.cpp", "src/time_series.hpp", "src/ts_column.cpp", "src/ts_column.hpp", "src/ts_point.cpp", "src/ts_point.hpp", "src/ts_range.cpp", "src/ts_range.hpp", "src/ts_aggregation.cpp", "src/ts_aggregation.hpp", "src/cluster_data.hpp", "src/utilities.cpp", "src/utilities.hpp", "src/time.cpp", "src/time.hpp", "test/blobTest.js", "test/clusterTest.js", "test/config.js", "test/deamonRunner.js", "test/integerTest.js", "test/prefixTest.js", "test/queryTest.js", "test/rangeTest.js", "test/suffixTest.js", "test/tagTest.js", "test/tsBlobTest.js", "test/tsDoubleTest.js", "test/tsGeneralTest.js", "test/tsInt64Test.js", "test/tsStringTest.js", "test/tsTimestampTest.js", ], "conditions": [ [ "OS=='mac'", { "include_dirs": [ "/usr/local/include", "<(c_api_path)/include" ], "libraries": [ "-L<(c_api_path)/lib", "-lqdb_api", "-Wl,-rpath,@loader_path" ], "xcode_settings": { "OTHER_CFLAGS": [ "-std=c++14", "-stdlib=libc++", "-Wno-strict-aliasing", "-mmacosx-version-min=10.7" ] } } ], [ "OS=='freebsd'", { "include_dirs": [ "/usr/local/include", "<(c_api_path)/include" ], "libraries": [ "-L/usr/local/lib", "-L<(c_api_path)/lib", "-lqdb_api", "-Wl,-rpath=\'$$ORIGIN\'" ], "cflags": [ "-std=c++14", "-stdlib=libc++", "-Wno-strict-aliasing", "-Wno-deprecated-declarations", "-U_LIBCPP_TRIVIAL_PAIR_COPY_CTOR" ] } ], [ "OS=='linux'", { "include_dirs": [ "/usr/local/include", "<(c_api_path)/include" ], "libraries": [ "-L/usr/local/lib", "-L<(c_api_path)/lib", "-lqdb_api", "-Wl,-rpath=\'$$ORIGIN\'", "-static-libgcc", "-static-libstdc++" ], "cflags": [ "-std=c++14", "-Wno-strict-aliasing" ] } ], [ "OS=='win'", { "include_dirs": [ "<(c_api_path)/include" ], "msvs_settings": { "VCCLCompilerTool": { "ExceptionHandling": "2", "DisableSpecificWarnings": [ "4530" ] } }, "link_settings": { "libraries": [ "<(c_api_path)/lib/qdb_api.lib" ] } } ] ] }, { "target_name": "action_after_build", "type": "none", "dependencies": [ "<(module_name)" ], "conditions": [ [ "OS=='mac'", { "copies": [ { "destination": "<(module_path)", "files": [ "<(PRODUCT_DIR)/<(module_name).node" ], "conditions": [ [ "copy_c_api=='yes'", { "files": [ "<(c_api_path)/lib/libc++.1.0.dylib", "<(c_api_path)/lib/libc++.1.dylib", "<(c_api_path)/lib/libc++.LICENSE.TXT", "<(c_api_path)/lib/libc++.dylib", "<(c_api_path)/lib/libc++abi.1.0.dylib", "<(c_api_path)/lib/libc++abi.1.dylib", "<(c_api_path)/lib/libc++abi.LICENSE.TXT", "<(c_api_path)/lib/libc++abi.dylib", "<(c_api_path)/lib/libqdb_api.dylib" ] } ] ] } ] } ], [ "OS=='freebsd' or OS=='linux'", { "copies": [ { "destination": "<(module_path)", "files": [ "<(PRODUCT_DIR)/<(module_name).node" ], "conditions": [ [ "copy_c_api=='yes'", { "files": [ "<(c_api_path)/lib/libqdb_api.so" ] } ] ] } ] } ], [ "OS=='win'", { "copies": [ { "destination": "<(module_path)", "files": [ "<(PRODUCT_DIR)/<(module_name).node" ], "conditions": [ [ "copy_c_api=='yes'", { "files": [ "<(c_api_path)/bin/qdb_api.dll" ] } ] ] } ] } ] ] } ] }
Python
0
@@ -2955,9 +2955,10 @@ =10. -7 +15 %22%0A
b5df30371e7f975311ed4e783e204a9e38f97b0a
add conditions in binding.gyp file to fix issues
binding.gyp
binding.gyp
{ "targets": [ { "target_name": "addon", "sources": [ "addon.cc", "myobject.cc" ] } ] }
Python
0
@@ -92,16 +92,1398 @@ ct.cc%22 %5D +,%0A %22conditions%22: %5B%0A %5B'OS==%22mac%22', %7B%0A %22cflags%22: %5B %22-m64%22 %5D,%0A %22ldflags%22: %5B %22-m64%22 %5D,%0A %22xcode_settings%22: %7B%0A %22OTHER_CFLAGS%22: %5B%22-ObjC++%22%5D,%0A %22ARCHS%22: %5B %22x86_64%22 %5D%0A %7D,%0A %22link_settings%22: %7B%0A %22libraries%22: %5B%0A %22/usr/local/lib/EmotivXavier-PREMIUM/libedk.dylib%22,%0A %22/usr/local/lib/EmotivXavier-PREMIUM/libedk.1.dylib%22,%0A %22/usr/local/lib/EmotivXavier-PREMIUM/libedk_ultils_mac.dylib%22,%0A %22/usr/local/lib/EmotivXavier-PREMIUM/libiomp5.dylib%22%0A %5D%0A %7D%0A %7D%5D,%0A %5B'OS==%22linux%22', %7B%0A %22cflags%22: %5B %22-m64%22 %5D,%0A %22ldflags%22: %5B %22-m64%22 %5D,%0A %22xcode_settings%22: %7B%0A %22ARCHS%22: %5B %22x86_64%22 %5D%0A %7D,%0A %22link_settings%22: %7B%0A %22libraries%22: %5B%0A %22/usr/local/lib/libedk.so.1%22,%0A %22/usr/local/lib/libhal.so.1%22,%0A %22/usr/local/lib/libedk_utils.so%22,%0A %22/usr/local/lib/libqwt.so.5%22%0A %5D%0A %7D%0A %7D%5D%0A %5D %0A %7D%0A
8efda67b5ca352e92183132dcf78a9292da057a0
Fix loading of .plist files
package_reviewer/checkers/check_resource_files.py
package_reviewer/checkers/check_resource_files.py
import functools import itertools import json import logging import plistlib import xml.etree.ElementTree as ET from ..base import Checker from .. import jsonc l = logging.getLogger(__name__) class CheckResourceFiles(Checker): # Cache results of glob calls @functools.lru_cache() def glob(self, pattern): return list(self.base_path.glob(pattern)) def globs(self, *patterns): return itertools.chain(*(self.glob(ptrn) for ptrn in patterns)) def check(self): for name in dir(self): if name.startswith("check_"): getattr(self, name)() l.debug("CheckResourceFiles.glob cache info: %s", self.glob.cache_info()) def check_plugins_in_root(self): if self.glob("*.py"): return python_files_in_package = self.glob("*/**/*.py") if python_files_in_package: l.debug("Non-plugin Python files: %s", python_files_in_package) if not self.glob("**/*.sublime-build"): self.fail("The package contains {} Python file(s), " "but none of them are in the package root " "and no build system is specified" .format(len(python_files_in_package))) def check_has_resource_files(self): resource_file_globs = { "*.py", "**/*.sublime-build", "**/*.sublime-commands", "**/*.sublime-keymap", "**/*.sublime-macro", # almost useless without other files "**/*.sublime-menu", "**/*.sublime-mousemap", "**/*.sublime-settings", "**/*.sublime-snippet", "**/*.sublime-syntax", "**/*.sublime-theme", "**/*.tmLanguage", "**/*.tmPreferences", "**/*.tmSnippet", "**/*.tmTheme", # hunspell dictionaries "**/*.aff", "**/*.dic", } has_resource_files = any(self.glob(ptrn) for ptrn in resource_file_globs) if not has_resource_files: self.fail("The package does not define any file that interfaces with Sublime Text") def check_jsonc_files(self): # All these files allow comments and trailing commas, # which is why we'll call them "jsonc" (JSON with Comments) jsonc_file_globs = { "**/*.sublime-build", "**/*.sublime-commands", "**/*.sublime-keymap", "**/*.sublime-macro", "**/*.sublime-menu", "**/*.sublime-mousemap", "**/*.sublime-settings", "**/*.sublime-theme", } for file_path in self.globs(*jsonc_file_globs): with file_path.open(encoding='utf-8') as f: try: jsonc.loads(f.read()) except json.JSONDecodeError as e: self.fail("File '{}' is badly formatted JSON (with comments)" .format(self._rel_path(file_path)), exception=e) def check_plist_files(self): plist_file_globs = { "**/*.tmLanguage", "**/*.tmPreferences", "**/*.tmSnippet", "**/*.tmTheme", } for file_path in self.globs(*plist_file_globs): with file_path.open() as f: try: plistlib.load(f) except Exception as e: self.fail("File '{}' is a badly formatted Plist" .format(self._rel_path(file_path)), exception=e) def check_xml_files(self): for file_path in self.glob("**/*.sublime-snippet"): try: ET.parse(str(file_path)) except ET.ParseError as e: self.fail("File '{}' is badly formatted XML" .format(self._rel_path(file_path)), exception=e) def _rel_path(self, path): return path.relative_to(self.base_path)
Python
0
@@ -3359,16 +3359,20 @@ th.open( +'rb' ) as f:%0A @@ -3456,17 +3456,18 @@ ept -Exception +ValueError as
dfde913dcd096ed76ab5939e63914ce1e103f76c
Fix for #352
AppiumLibrary/keywords/_android_utils.py
AppiumLibrary/keywords/_android_utils.py
# -*- coding: utf-8 -*- import base64 from .keywordgroup import KeywordGroup from selenium.common.exceptions import TimeoutException from kitchen.text.converters import to_bytes class _AndroidUtilsKeywords(KeywordGroup): # Public def open_notifications(self): """Opens and expands an Android device's notification drawer. Android only. """ driver = self._current_application() driver.open_notifications() def get_network_connection_status(self): """Returns an integer bitmask specifying the network connection type. Android only. See `set network connection status` for more details. """ driver = self._current_application() return driver.network_connection def set_network_connection_status(self, connectionStatus): """Sets the network connection Status. Android only. Possible values: | =Value= | =Alias= | =Data= | =Wifi= | =Airplane Mode= | | 0 | (None) | 0 | 0 | 0 | | 1 | (Airplane Mode) | 0 | 0 | 1 | | 2 | (Wifi only) | 0 | 1 | 0 | | 4 | (Data only) | 1 | 0 | 0 | | 6 | (All network on) | 1 | 1 | 0 | """ driver = self._current_application() return driver.set_network_connection(int(connectionStatus)) def pull_file(self, path, decode=False): """Retrieves the file at `path` and return it's content. Android only. - _path_ - the path to the file on the device - _decode_ - True/False decode the data (base64) before returning it (default=False) """ driver = self._current_application() theFile = driver.pull_file(path) if decode: theFile = base64.b64decode(theFile) return str(theFile) def pull_folder(self, path, decode=False): """Retrieves a folder at `path`. Returns the folder's contents zipped. Android only. - _path_ - the path to the folder on the device - _decode_ - True/False decode the data (base64) before returning it (default=False) """ driver = self._current_application() theFolder = driver.pull_folder(path) if decode: theFolder = base64.b64decode(theFolder) return theFolder def push_file(self, path, data, encode=False): """Puts the data in the file specified as `path`. Android only. - _path_ - the path on the device - _data_ - data to be written to the file - _encode_ - True/False encode the data as base64 before writing it to the file (default=False) """ driver = self._current_application() data = to_bytes(data) if encode: data = base64.b64encode(data) driver.push_file(path, data) def get_activity(self): """Retrieves the current activity on the device. Android only. """ driver = self._current_application() return driver.current_activity def start_activity(self, appPackage, appActivity, **opts): """Opens an arbitrary activity during a test. If the activity belongs to another application, that application is started and the activity is opened. Android only. - _appPackage_ - The package containing the activity to start. - _appActivity_ - The activity to start. - _appWaitPackage_ - Begin automation after this package starts (optional). - _appWaitActivity_ - Begin automation after this activity starts (optional). - _intentAction_ - Intent to start (opt_ional). - _intentCategory_ - Intent category to start (optional). - _intentFlags_ - Flags to send to the intent (optional). - _optionalIntentArguments_ - Optional arguments to the intent (optional). - _dontStopAppOnReset_ - Should the app be stopped on reset (optional)? """ # Almost the same code as in appium's start activity, # just to keep the same keyword names as in open application arguments = { 'app_wait_package': 'appWaitPackage', 'app_wait_activity': 'appWaitActivity', 'intent_action': 'intentAction', 'intent_category': 'intentCategory', 'intent_flags': 'intentFlags', 'optional_intent_arguments': 'optionalIntentArguments', 'dont_stop_app_on_reset': 'dontStopAppOnReset' } data = {} for key, value in arguments.items(): if value in opts: data[key] = opts[value] driver = self._current_application() driver.start_activity(app_package=appPackage, app_activity=appActivity, **data) def wait_activity(self, activity, timeout, interval=1): """Wait for an activity: block until target activity presents or time out. Android only. - _activity_ - target activity - _timeout_ - max wait time, in seconds - _interval_ - sleep interval between retries, in seconds """ if not activity.startswith('.'): activity = ".%s" % activity driver = self._current_application() if not driver.wait_activity(activity=activity, timeout=float(timeout), interval=float(interval)): raise TimeoutException(msg="Activity %s never presented, current activity: %s" % (activity, self.get_activity())) def install_app(self, app_path, app_package): """ Install App via Appium Android only. - app_path - path to app - app_package - package of install app to verify """ driver = self._current_application() driver.install_app(app_path) return driver.is_app_installed(app_package) def set_location(self, latitude, longitude, altitude=10): """ Set location - _latitute_ - _longitude_ - _altitude_ = 10 [optional] Android only. New in AppiumLibrary 1.5 """ driver = self._current_application() driver.set_location(latitude,longitude,altitude)
Python
0
@@ -2985,24 +2985,40 @@ encode(data) +.decode('utf-8') %0A dri
9b9359c06e44fe5a8f5f16f662fcea2ef3e8f18d
Remove delay load hook
binding.gyp
binding.gyp
{ "targets" : [{ "target_name" : "ofe", "sources" : [ "ofe.cc" ], "include_dirs": [ '<!(node -e "require(\'nan\')")' ] }] }
Python
0.000001
@@ -123,15 +123,50 @@ )%22)'%0A%09%09%5D +,%0A%09%09%22win_delay_load_hook%22 : %22false%22 %0A%09%7D%5D%0A%7D%0A
a30eb4a1eaa3a9677950c37f273e7ac16cae698f
Change init method
DSTC2/basic.py
DSTC2/basic.py
# -*- coding:utf-8 -*- from sklearn.cross_validation import train_test_split from DSTC2.traindev.scripts import myLogger from DSTC2.traindev.scripts.model import bp from traindev.scripts import file_reader from traindev.scripts import initializer from traindev.scripts.initializer import Set __author__ = "JOHNKYON" global logger if __name__ == "__main__": global logger logger = myLogger.myLogger("basic") logger.info("Starting basic") # 选择模式 dataset = file_reader.get_dataset("dstc2_debug") logger.info("token check test begin") raw = initializer.raw_initializer(dataset) # Build token and dictionary token = initializer.token_initializer(raw["input"]) dictionary = initializer.dictionary_initializer(token) # Build input vector one_set = Set(token, dictionary, raw["output"]) # get model model = bp.bp_builder(one_set.dimension * one_set.sentence_dim, len(one_set.act_dict) * one_set.sentence_dim) # train X_train, X_test, y_train, y_test = train_test_split(one_set.input_mtr, one_set.output_mtr, test_size=0.2) model.fit(X_train, y_train, batch_size=2, nb_epoch=5) # test print model.evaluate(X_test, y_test, batch_size=2)
Python
0.000008
@@ -823,24 +823,108 @@ %5B%22output%22%5D)%0A + input_mtr, output_mtr = bp.bp_initialize(one_set.input_mtr, one_set.output_mtr)%0A # get mo @@ -1109,24 +1109,16 @@ t_split( -one_set. input_mt @@ -1116,32 +1116,24 @@ (input_mtr, -one_set. output_mtr,
2f4243b7d7e314834ac3f57877c06b034c138567
Use mcdir for writing file to repo. Cleanup the name of the file.
backend/mcapi/user/ud.py
backend/mcapi/user/ud.py
from ..mcapp import app from ..decorators import crossdomain, apikey, jsonp from flask import jsonify, g, request, send_from_directory from ..utils import mkdirp import rethinkdb as r import os.path import os from ..args import json_as_format_arg import tempfile from loader.tasks.db import load_data_dir, import_data_dir_to_repo from celery import chain from .. import access from .. import error from .. import dmutil from .. import validate from loader.model import datafile @app.route('/udqueue') @apikey @jsonp def get_udqueue(): user = access.get_user() selection = list(r.table('udqueue').filter({'owner': user}).run(g.conn)) return json_as_format_arg(selection) @app.route('/upload', methods=['POST']) @apikey @crossdomain(origin='*') def upload_file(): user = access.get_user() state_id = request.form['state_id'] mkdirp('/tmp/uploads') tdir = tempfile.mkdtemp(dir='/tmp/uploads') for key in request.files.keys(): datadir = request.form[key + "_datadir"] file = request.files[key] dir = os.path.join(tdir, datadir) mkdirp(dir) filepath = os.path.join(dir, file.filename) file.save(filepath) chain(load_data_dir.si(user, tdir, state_id)\ | import_data_dir_to_repo.si(tdir))() return jsonify({'success': True}) @app.route('/import', methods=['POST']) @apikey @crossdomain(origin='*') def import_file(): user = access.get_user() if 'file' not in request.files: return error.bad_request("No files to import") elif 'project' not in request.form: return error.bad_request('No project specified') elif 'datadir' not in request.form: return error.bad_request('No datadir specified') project = request.form['project'] datadir = request.form['datadir'] mkdirp('/tmp/uploads') file = request.files['file'] proj = validate.project_id_exists(project, user) if proj is None: return error.bad_request("Project doesn't exist %s" % (project)) ddir = validate.datadir_id_exists(datadir, user) if ddir is None: return error.bad_request( "Datadir doesn't exist %s" % (datadir)) if not datadir_in_project(ddir, proj): return error.bad_request( "Datadir %s is not in project %s" % (datadir, project)) if filename_in_datadir(ddir, file.filename): return error.bad_request( "File %s already exists in datadir %s" % (file.filename, datadir)) dfid = make_datafile(datadir, user, file.filename) filepath = os.path.join('/tmp/uploads', dfid) file.save(filepath) #load_data_file.delay(df, project, datadir) return jsonify({'id': dfid}) def datadir_in_project(ddir, proj): filter_by = {'project_id': proj['id'], 'datadir_id': ddir['id']} selection = list(r.table('project2datadir').filter(filter_by).run(g.conn)) if selection: return True return False def filename_in_datadir(ddir, filename): files = list(r.table('datafiles').filter({'name': filename}).run(g.conn)) if not files: return False for file in files: for ddir_id in file['datadirs']: if ddir_id == ddir['id']: return True return False def make_datafile(datadir, user, filename): df = datafile.DataFile(filename, "private", user) df.datadirs.append(datadir) dfid = dmutil.insert_entry_id('datafiles', df.__dict__) ddir = r.table('datadirs').get(datadir).run(g.conn) dfiles = ddir['datafiles'] dfiles.append(dfid) r.table('datadirs').get(datadir).update({'datafiles': dfiles}).run(g.conn) return dfid @app.route('/download/<path:datafile>') #@apikey def download_file(datafile): #user = access.get_user() return send_from_directory('/tmp', 'ReviewQueue.png', as_attachment=True) #df = r.table('datafiles').get(datafile).run(g.conn) #if not checkAccess(user, df): # return error_not_found_response() #return None
Python
0
@@ -437,16 +437,37 @@ alidate%0A +from .. import mcdir%0A from loa @@ -492,16 +492,16 @@ atafile%0A - %0A%0A@app.r @@ -1822,35 +1822,8 @@ r'%5D%0A - mkdirp('/tmp/uploads')%0A @@ -2548,30 +2548,35 @@ th.join( -'/tmp/uploads' +mcdir.for_uid(dfid) , dfid)%0A @@ -3301,24 +3301,41 @@ ataFile( +os.path.basename( filename , %22priva @@ -3326,16 +3326,17 @@ filename +) , %22priva
e36054ab878b10d9e2bc0b21a21d589a16945449
Add -Wno-unused-function to xcode flags
binding.gyp
binding.gyp
{ "targets": [ { "target_name": "bswap", "sources": [ "src/bswap.cc" ], "include_dirs" : [ "<!(node -e \"require('nan')\")" ], "cflags":[ "-march=native", "-falign-loops=32", # See readme; significant improvement for some cases "-Wno-unused-function", # CPU feature detection only used on Win "-Wno-unused-const-variable", # cpuid regs "-Wno-cast-function-type" # https://github.com/nodejs/nan/issues/807 ], "msvs_settings": { "VCCLCompilerTool": { "EnableEnhancedInstructionSet": 3 # /arch:AVX # 0-not set, 1-sse, 2-sse2, 3-avx, 4-ia32, 5-avx2 } }, "xcode_settings": { "OTHER_CPLUSPLUSFLAGS": [ "-march=native", "-Wno-unused-const-variable" ] } } ] }
Python
0.000001
@@ -760,32 +760,107 @@ -march=native%22,%0A + %22-Wno-unused-function%22, # CPU feature detection only used on Win%0A %22-Wno-
5fade792a45a7434f1b8c03e7835756ef8c2a672
Use pkg-config in node's binding.gyp
binding.gyp
binding.gyp
{ "targets" : [ { 'include_dirs': [ "<!(node -e \"require('nan')\")" ], 'cflags': [ '-std=c++0x', '-Wall', '-pthread', '-pedantic', '-g', '-zdefs' '-Werror' ], 'ldflags': [ '-g' ], 'link_settings': { 'libraries': [ '-lpthread', '-lgrpc', '-lgpr' ] }, "conditions": [ ['OS == "mac"', { 'xcode_settings': { 'MACOSX_DEPLOYMENT_TARGET': '10.9', 'OTHER_CFLAGS': [ '-std=c++11', '-stdlib=libc++' ] } }], ['OS != "mac"', { 'link_settings': { 'libraries': [ '-lrt' ] } }] ], "target_name": "grpc", "sources": [ "ext/byte_buffer.cc", "ext/call.cc", "ext/channel.cc", "ext/completion_queue_async_worker.cc", "ext/credentials.cc", "ext/node_grpc.cc", "ext/server.cc", "ext/server_credentials.cc", "ext/timeval.cc" ] } ] }
Python
0.000001
@@ -280,32 +280,296 @@ '-g'%0A %5D,%0A + %22conditions%22: %5B%0A %5B'OS != %22win%22', %7B%0A 'variables': %7B%0A 'has_pkg_config': '%3C!(command -v pkg-config %3E/dev/null 2%3E&1 && echo true %7C%7C echo false)'%0A %7D,%0A 'conditions': %5B%0A %5B'has_pkg_config == %22true%22', %7B%0A 'link_sett @@ -577,32 +577,40 @@ ngs': %7B%0A + + 'libraries': %5B%0A @@ -600,32 +600,408 @@ 'libraries': %5B%0A + '%3C!@(pkg-config --libs-only-l grpc)'%0A %5D%0A %7D,%0A 'cflags': %5B%0A '%3C!@(pkg-config --cflags grpc)'%0A %5D,%0A 'libraries': %5B%0A '%3C!@(pkg-config --libs-only-L grpc)'%0A %5D%0A %7D, %7B%0A 'link_settings': %7B%0A 'libraries': %5B%0A '-lpth @@ -999,32 +999,42 @@ '-lpthread',%0A + '-lgrp @@ -1043,24 +1043,34 @@ ,%0A + + '-lgpr'%0A @@ -1069,26 +1069,47 @@ pr'%0A + -%5D%0A + %5D,%0A %7D,%0A @@ -1103,33 +1103,43 @@ %7D,%0A -%22 + ' conditions%22: %5B%0A @@ -1124,37 +1124,46 @@ 'conditions -%22: +': %5B%0A + %5B'OS == @@ -1159,17 +1159,17 @@ %5B'OS -= +! = %22mac%22' @@ -1186,14 +1186,23 @@ -'xcode + 'link _set @@ -1203,32 +1203,42 @@ nk_settings': %7B%0A + 'MAC @@ -1238,72 +1238,76 @@ ' -MACOSX_DEPLOYMENT_TARGET': '10.9',%0A 'OTHER_CFLAGS': %5B +libraries': %5B%0A '-lrt'%0A %5D %0A @@ -1321,21 +1321,15 @@ -'-std=c++11', + %7D %0A @@ -1343,25 +1343,19 @@ -'-stdlib=libc++'%0A + %7D%5D%0A @@ -1366,33 +1366,62 @@ %5D%0A -%7D + %7D%0A %5D%0A %5D %0A %7D%5D,%0A @@ -1422,33 +1422,33 @@ %5D,%0A %5B'OS -! += = %22mac%22', %7B%0A @@ -1446,36 +1446,37 @@ ', %7B%0A ' -link +xcode _settings': %7B%0A @@ -1490,41 +1490,130 @@ ' -libraries': %5B%0A '-lrt +MACOSX_DEPLOYMENT_TARGET': '10.9',%0A 'OTHER_CFLAGS': %5B%0A '-std=c++11',%0A '-stdlib=libc++ '%0A
5578af9e99c6792e375db143f2423d92c847e001
update binding.gyp to compile new feature
binding.gyp
binding.gyp
{ "targets": [ { "target_name": "ons", "sources": [ "src/entry.cpp", "src/log_util.cpp", "src/ons_options.cpp", "src/consumer_ack.cpp", "src/consumer.cpp", "src/producer.cpp", "src/consumer_listener.cpp" ], "include_dirs": [ "src/third_party/include", "<!(node -e \"require('nan')\")" ], "conditions": [ ["OS==\"mac\"", { "cflags!": [ "-fno-exceptions" ], "cflags_cc!": [ "-fno-exceptions", "-pthread", "-Wl,--no-as-needed", "-ldl" ], "cflags_cc": [ "-Wno-ignored-qualifiers" ], "cflags": [ "-std=c++11", "-stdlib=libc++" ], "sources": [ "src/third_party/sole/sole.cpp" ], "include_dirs": [ "/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1", "src/third_party/sole" ], "xcode_settings": { "GCC_ENABLE_CPP_EXCEPTIONS": "YES" } }], ["OS==\"linux\"", { "sources": [ "src/third_party/sole/sole.cpp" ], "include_dirs": [ "src/third_party/sole" ], "libraries": [ "../src/third_party/lib/linux/libonsclient4cpp.a" ], "cflags_cc!": [ "-fno-exceptions", "-pthread", "-Wl,--no-as-needed", "-ldl" ], "cflags_cc": [ "-Wno-ignored-qualifiers" ], "cflags": [ "-std=c++11" ] }], ["OS==\"win\"", { "libraries": [ "../src/third_party/lib/windows/ONSClient4CPP.lib" ], "copies": [ { "destination": "<(module_root_dir)/build/Release/", "files": [ "<(module_root_dir)/src/third_party/lib/windows/ONSClient4CPP.dll" ] } ] }] ] } ] }
Python
0
@@ -227,32 +227,73 @@ /producer.cpp%22,%0A + %22src/real_producer_wrapper.cpp%22,%0A %22src/con @@ -1486,16 +1486,22 @@ d=c++11%22 +, %22-g%22 %5D%0A
d749361a7ee14b96c2235300b15fbba1222a6a9c
remove comment.
binding.gyp
binding.gyp
{ 'targets': [{ 'target_name': 'bitcoindjs', 'variables': { 'BOOST_INCLUDE': '<!(test -n "$BOOST_INCLUDE"'\ ' && echo "$BOOST_INCLUDE"'\ ' || test -e /usr/include/boost && echo /usr/include/boost' \ ' || echo ./include)', 'LEVELDB_INCLUDE': '<!(test -n "$LEVELDB_INCLUDE"'\ ' && echo "$LEVELDB_INCLUDE"'\ ' || test "$BITCOIN_DIR" && echo "${BITCOIN_DIR}/src/leveldb/include"' \ ' || echo ./include)', 'BITCOIN_DIR': '<!(test -n "$BITCOIN_DIR"'\ ' && echo "$BITCOIN_DIR"'\ ' || echo "${HOME}/bitcoin")', 'LIBBITCOIND': '<!(./platform/os.sh)', }, 'defines': [ 'ENABLE_WALLET=1', ], 'include_dirs' : [ '<(BOOST_INCLUDE)', '<(LEVELDB_INCLUDE)', '<(BITCOIN_DIR)/src', '<!(node -e "require(\'nan\')")', ], 'sources': [ './src/bitcoindjs.cc', ], 'cflags_cc': [ '-fexceptions', '-frtti', '-fpermissive', ], 'libraries': [ '-lboost_system', '-lboost_filesystem', '-lboost_program_options', '-lboost_thread', '-lboost_chrono', # XXX NEW '-lsecp256k1', '<(LIBBITCOIND)', ] }] }
Python
0
@@ -1124,24 +1124,8 @@ o',%0A - # XXX NEW%0A
d85adcabe11d797b5ccf55189150707eb9672f3e
access Y/N in addition to y/n
core/roslib/src/roslib/scriptutil.py
core/roslib/src/roslib/scriptutil.py
# Software License Agreement (BSD License) # # Copyright (c) 2008, Willow Garage, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Willow Garage, Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # Revision $Id$ # $Author$ """ Common ros script utilities, such as methods convenience methods for creating master xmlrpc proxies and executing rospack. This library is relatively immature and much of the functionality here will likely be moved elsewhere as the API solidifies. """ import itertools import os import re import string import subprocess import sys import roslib.exceptions import roslib.launcher import roslib.message import roslib.msgs import roslib.names import roslib.network import roslib.packages import roslib.rosenv PRODUCT = 'ros' ## caller ID for master calls where caller ID is not vital _GLOBAL_CALLER_ID = '/script' _is_interactive = False def set_interactive(interactive): """ General API for a script specifying that it is being run in an interactive environment. Many libraries may wish to change their behavior based on being interactive (e.g. disabling signal handlers on Ctrl-C). @param interactive: True if current script is being run in an interactive shell @type interactive: bool """ global _is_interactive _is_interactive = interactive def is_interactive(): """ General API for a script specifying that it is being run in an interactive environment. Many libraries may wish to change their behavior based on being interactive (e.g. disabling signal handlers on Ctrl-C). @return: True if interactive flag has been set @rtype: bool """ return _is_interactive def myargv(argv=None): """ Remove ROS remapping arguments from sys.argv arguments. @return: copy of sys.argv with ROS remapping arguments removed @rtype: [str] """ if argv is None: argv = sys.argv return [a for a in argv if not roslib.names.REMAP in a] def script_resolve_name(script_name, name): """ Name resolver for scripts. Supports ROS_NAMESPACE. Does not support remapping arguments. @param name: name to resolve @type name: str @param script_name: name of script. script_name must not contain a namespace. @type script_name: str @return: resolved name @rtype: str """ if not name: #empty string resolves to namespace return roslib.names.get_ros_namespace() #Check for global name: /foo/name resolves to /foo/name if roslib.names.is_global(name): return name #Check for private name: ~name resolves to /caller_id/name elif roslib.names.is_private(name): return ns_join(roslib.names.make_caller_id(script_name), name[1:]) return roslib.names.get_ros_namespace() + name def get_master(): """ Get an XMLRPC handle to the Master. It is recommended to use the `rosgraph.masterapi` library instead, as it provides many conveniences. @return: XML-RPC proxy to ROS master @rtype: xmlrpclib.ServerProxy """ try: import xmlrpc.client as xmlrpcclient #Python 3.x except ImportError: import xmlrpclib as xmlrpcclient #Python 2.x # #1730 validate URL for better error messages uri = roslib.rosenv.get_master_uri() try: roslib.network.parse_http_host_and_port(uri) except ValueError: raise roslib.exceptions.ROSLibException("invalid master URI: %s"%uri) return xmlrpcclient.ServerProxy(uri) def get_param_server(): """ @return: ServerProxy XML-RPC proxy to ROS parameter server @rtype: xmlrpclib.ServerProxy """ return get_master() def is_subscriber(topic, subscriber_id): """ Check whether or not master think subscriber_id subscribes to topic @return: True if still register as a subscriber @rtype: bool @raise roslib.exceptions.ROSLibException: if communication with master fails """ m = get_master() code, msg, state = m.getSystemState(_GLOBAL_CALLER_ID) if code != 1: raise roslib.exceptions.ROSLibException("Unable to retrieve master state: %s"%msg) _, subscribers, _ = state for t, l in subscribers: if t == topic: return subscriber_id in l else: return False def is_publisher(topic, publisher_id): """ Predicate to check whether or not master think publisher_id publishes topic @return: True if still register as a publisher @rtype: bool @raise roslib.exceptions.ROSLibException: if communication with master fails """ m = get_master() code, msg, state = m.getSystemState(_GLOBAL_CALLER_ID) if code != 1: raise roslib.exceptions.ROSLibException("Unable to retrieve master state: %s"%msg) pubs, _, _ = state for t, l in pubs: if t == topic: return publisher_id in l else: return False def ask_and_call(cmds, cwd=None): """ Pretty print cmds, ask if they should be run, and if so, runs them using subprocess.check_call. @param cwd: (optional) set cwd of command that is executed @type cwd: str @return: True if cmds were run. """ # Pretty-print a string version of the commands def quote(s): return '"%s"'%s if ' ' in s else s sys.stdout.write("Okay to execute:\n\n%s\n(y/n)?\n"%('\n'.join([' '.join([quote(s) for s in c]) for c in cmds]))) while 1: input = sys.stdin.readline().strip() if input in ['y', 'n']: break accepted = input == 'y' import subprocess if accepted: for c in cmds: if cwd: subprocess.check_call(c, cwd=cwd) else: subprocess.check_call(c) return accepted
Python
0.000004
@@ -6859,16 +6859,24 @@ .strip() +.lower() %0A @@ -6950,30 +6950,8 @@ 'y'%0A - import subprocess%0A
e3d3672b40cffd024c26e4fa441ede9dee3b345c
add two new attributes to annotator annotator_schema_version and quote.
annotator/model/couch.py
annotator/model/couch.py
from datetime import datetime import uuid import couchdb import couchdb.design from couchdb.mapping import Document, Mapping from couchdb.mapping import TextField, IntegerField, DateField, DictField from couchdb.mapping import ListField, DateTimeField, BooleanField, ViewField class Metadata(object): SERVER = None DB = None def init_model(config): Metadata.SERVER = couchdb.Server(config['COUCHDB_HOST']) Metadata.DB = setup_db(config['COUCHDB_DATABASE']) def setup_db(dbname): if dbname in Metadata.SERVER: db = Metadata.SERVER[dbname] setup_views(db) return db else: db = Metadata.SERVER.create(dbname) setup_views(db) return db def rebuild_db(dbname): if dbname in Metadata.SERVER: del Metadata.SERVER[dbname] return setup_db(dbname) class DomainObject(Document): def save(self): self.store(Metadata.DB) @classmethod def get(cls, id): return cls.load(Metadata.DB, id) def delete(self): Metadata.DB.delete(self) def to_dict(self): # TODO: use unwrap instead? out = dict(self.items()) out['id'] = self.id return out class Annotation(DomainObject): type = TextField(default='Annotation') uri = TextField() account_id = TextField() user = DictField() text = TextField() created = DateTimeField(default=datetime.now) ranges = ListField(DictField()) permissions = DictField( Mapping.build( read=ListField(TextField()), update=ListField(TextField()), delete=ListField(TextField()), admin=ListField(TextField()) )) def __init__(self, id=None, **values): if 'user' in values and isinstance(values['user'], basestring): values['user'] = { 'id': values['user'] } super(Annotation, self).__init__(id, **values) @property def userid(self): return user['id'] def update_from_dict(self, dict_): if 'id' in dict_: del dict_['id'] if '_id' in dict_: del dict_['_id'] if 'user' in dict_ and isinstance(dict_['user'], basestring): dict_['user'] = { 'id': dict_['user'] } attrnames = self._fields.keys() for k,v in dict_.items(): if k in attrnames: setattr(self, k, v) else: self[k] = v return self @classmethod def from_dict(cls, dict_): if 'id' in dict_: ann = Annotation.get(dict_['id']) else: ann = Annotation() ann.update_from_dict(dict_) return ann @classmethod def search(self, **kwargs): '''Search by arbitrary attributes. WARNING: at the moment use temporary views. ''' non_query_args = ['offset', 'limit', 'all_fields'] offset = int(kwargs.get('offset', 0)) limit = int(kwargs.get('limit', -1)) for k in non_query_args: if k in kwargs: del kwargs[k] terms = kwargs.keys() if terms: couchkey = '[%s]' % ','.join(['doc.' + x for x in terms]) else: couchkey = 'null' map_fun = '''function(doc) { if (doc.type == 'Annotation') emit(%s, null); }''' % couchkey wrapper = lambda x: Annotation.wrap(x['doc']) ourkwargs = dict( map_fun=map_fun, offset=offset, include_docs=True, wrapper=wrapper ) if limit >= 0: ourkwargs['limit'] = limit q = Metadata.DB.query(**ourkwargs) if terms: return q[ list(kwargs.values()) ] else: return q class Account(DomainObject): type = TextField(default='Account') username = TextField() pwdhash = TextField() email = TextField() activated = BooleanField(default=True) created = DateTimeField(default=datetime.now) secret = TextField(default=str(uuid.uuid4())) ttl = IntegerField() by_email = ViewField('account', '''\ function(doc) { if (doc.type=='Account') { emit(doc.email, doc); } }''') @classmethod def get_by_email(cls, email): out = cls.by_email(Metadata.DB, limit=1) return list(out[email]) # Required views # query by document # query by user # query by document and user # query # TODO: general, change from all_fields to include_docs=True ? # Remove offset ....? # limit the same # results format is different: {'total_rows': 3, 'offset':', 'rows': .. # as opposed to {'total': ,'results': ...} # could sort this out with a list function ... def setup_views(db): design_doc = 'annotator' view = couchdb.design.ViewDefinition(design_doc, 'all', ''' function(doc) { emit(doc._id, null); } ''' ) view.get_doc(db) view.sync(db) view = couchdb.design.ViewDefinition(design_doc, 'byuri', ''' function(doc) { if(doc.uri) { emit(doc.uri, null); } } ''' ) view.get_doc(db) view.sync(db) Account.by_email.get_doc(db) Account.by_email.sync(db)
Python
0
@@ -1259,24 +1259,82 @@ nnotation')%0A + annotator_schema_version = TextField(default=u'v1.0')%0A uri = Te @@ -1414,24 +1414,48 @@ TextField()%0A + quote = TextField()%0A created
50bfb78e3e384e341021662316c71ccd3ab27905
Delete test_event_str from the tests
core/tests/tests_event.py
core/tests/tests_event.py
#! /usr/bin/env python __author__ = 'Henri Buyse' import pytest from core.models import Event, Tournament def test_str(): e = Event.objects.create(name="VolleyBall Day") assert print(e) != None assert print(e) != '' assert print(e) == "VolleyBall Day" def test_get_all_tournaments_related(): assert True def test_get_vbuserprofile(): vbu = VBUserProfile.objects.create(user=User.objects.create_user(username='jdoe', email='jdoe@jdoe.fr', password='toto')) e = Event.objects.create(vbuserprofile=vbu) assert e.get_vbuserprofile() != None assert e.get_vbuserprofile() != VBUserProfile.objects.create() assert e.get_vbuserprofile() == vbu def get_maps_address(): assert True def test_get_all_events(): assert True def test_get_name(): e = Event(name="VolleyBall Day") assert e.get_name() != None assert e.get_name() != '' assert e.get_name() == "VolleyBall Day" def test_get_nb_terrains(): e = Event(nb_terrains=6) assert e.get_nb_terrains() != None assert e.get_nb_terrains() != int() assert e.get_nb_terrains() != 9 assert e.get_nb_terrains() == 6 def test_get_nb_gymnasiums(): e = Event(nb_gymnasiums=2) assert e.get_nb_gymnasiums() != None assert e.get_nb_gymnasiums() != int() assert e.get_nb_gymnasiums() != 9 assert e.get_nb_gymnasiums() == 2 def test_get_nb_teams(): e = Event(nb_teams=36) assert e.get_nb_teams() != None assert e.get_nb_teams() != int() assert e.get_nb_teams() != 9 assert e.get_nb_teams() == 36 def test_get_night(): e = Event() assert e.get_night() != None assert e.get_night() != True assert e.get_night() == False def test_get_surface(): assert True def test_get_name_gymnasium(): assert True def test_get_nb_in_street(): assert True def test_get_street(): assert True def test_get_city(): assert True def test_get_zip_code(): assert True def test_get_region(): assert True def test_get_country(): assert True def test_get_country_iso(): assert True def test_get_description(): assert True def test_get_website(): assert True def test_get_full(): e = Event() assert e.get_full() != None assert e.get_full() != True assert e.get_full() == False
Python
0.000007
@@ -117,177 +117,8 @@ %0D%0A%0D%0A -def test_str():%0D%0A e = Event.objects.create(name=%22VolleyBall Day%22)%0D%0A%0D%0A assert print(e) != None%0D%0A assert print(e) != ''%0D%0A assert print(e) == %22VolleyBall Day%22%0D%0A %0D%0A%0D%0A
bf799d6651fa43561bc7697f9bc4410aed30bca6
remove dead code
corehq/form_processor/tests/utils.py
corehq/form_processor/tests/utils.py
import functools from uuid import uuid4 from couchdbkit import ResourceNotFound from datetime import datetime from nose.tools import nottest from casexml.apps.case.models import CommCareCase from casexml.apps.phone.models import SyncLog from corehq.form_processor.backends.sql.dbaccessors import CaseAccessorSQL, FormAccessorSQL from corehq.form_processor.backends.sql.processor import FormProcessorSQL from corehq.form_processor.interfaces.processor import FormProcessorInterface, ProcessedForms from corehq.form_processor.parsers.form import process_xform_xml from couchforms.models import XFormInstance from dimagi.utils.couch.database import safe_delete from corehq.util.test_utils import unit_testing_only, run_with_multiple_configs, RunConfig from corehq.form_processor.models import XFormInstanceSQL, CommCareCaseSQL, CaseTransaction, Attachment from django.conf import settings class FormProcessorTestUtils(object): @classmethod @unit_testing_only def delete_all_cases(cls, domain=None): assert CommCareCase.get_db().dbname.startswith('test_') view_kwargs = {} if domain: view_kwargs = { 'startkey': [domain], 'endkey': [domain, {}], } cls._delete_all( CommCareCase.get_db(), 'cases_by_server_date/by_server_modified_on', **view_kwargs ) def _sql_delete(query, domain_filter): if domain is not None: query.filter(domain_filter) query.all().delete() FormProcessorTestUtils.delete_all_sql_cases(domain) @staticmethod def delete_all_sql_cases(domain=None): CaseAccessorSQL.delete_all_cases(domain) @classmethod @unit_testing_only def delete_all_xforms(cls, domain=None, user_id=None): view = 'couchforms/all_submissions_by_domain' view_kwargs = {} if domain and user_id: view = 'all_forms/view' view_kwargs = { 'startkey': ['submission user', domain, user_id], 'endkey': ['submission user', domain, user_id, {}], } elif domain: view_kwargs = { 'startkey': [domain], 'endkey': [domain, {}] } cls._delete_all( XFormInstance.get_db(), view, **view_kwargs ) FormProcessorTestUtils.delete_all_sql_forms(domain, user_id) @staticmethod def delete_all_sql_forms(domain=None, user_id=None): FormAccessorSQL.delete_all_forms(domain, user_id) @classmethod @unit_testing_only def delete_all_sync_logs(cls): cls._delete_all(SyncLog.get_db(), 'phone/sync_logs_by_user') @staticmethod @unit_testing_only def _delete_all(db, viewname, **view_kwargs): deleted = set() for row in db.view(viewname, reduce=False, **view_kwargs): doc_id = row['id'] if id not in deleted: try: safe_delete(db, doc_id) deleted.add(doc_id) except ResourceNotFound: pass run_with_all_backends = functools.partial( run_with_multiple_configs, run_configs=[ # run with default setting RunConfig( settings={ 'TESTS_SHOULD_USE_SQL_BACKEND': getattr(settings, 'TESTS_SHOULD_USE_SQL_BACKEND', False), }, post_run=lambda *args, **kwargs: args[0].tearDown() ), # run with inverse of default setting RunConfig( settings={ 'TESTS_SHOULD_USE_SQL_BACKEND': not getattr(settings, 'TESTS_SHOULD_USE_SQL_BACKEND', False), }, pre_run=lambda *args, **kwargs: args[0].setUp(), ), ] ) @unit_testing_only def post_xform(instance_xml, attachments=None, domain='test-domain'): """ create a new xform and releases the lock this is a testing entry point only and is not to be used in real code """ result = process_xform_xml(domain, instance_xml, attachments=attachments) with result.get_locked_forms() as xforms: FormProcessorInterface(domain).save_processed_models(xforms) return xforms[0] @nottest def create_form_for_test(domain, case_id=None, attachments=None, save=True): """ Create the models directly so that these tests aren't dependent on any other apps. Not testing form processing here anyway. :param case_id: create case with ID if supplied :param attachments: additional attachments dict :param save: if False return the unsaved form :return: form object """ from corehq.form_processor.utils import get_simple_form_xml form_id = uuid4().hex user_id = 'user1' utcnow = datetime.utcnow() form_xml = get_simple_form_xml(form_id, case_id) form = XFormInstanceSQL( form_id=form_id, xmlns='http://openrosa.org/formdesigner/form-processor', received_on=utcnow, user_id=user_id, domain=domain ) attachments = attachments or {} attachment_tuples = map( lambda a: Attachment(name=a[0], raw_content=a[1], content_type=a[1].content_type), attachments.items() ) attachment_tuples.append(Attachment('form.xml', form_xml, 'text/xml')) FormProcessorSQL.store_attachments(form, attachment_tuples) cases = [] if case_id: case = CommCareCaseSQL( case_id=case_id, domain=domain, type='', owner_id=user_id, opened_on=utcnow, modified_on=utcnow, modified_by=user_id, server_modified_on=utcnow, ) case.track_create(CaseTransaction.form_transaction(case, form)) cases = [case] if save: FormProcessorSQL.save_processed_models(ProcessedForms(form, None), cases) return form
Python
0.999454
@@ -1394,168 +1394,8 @@ )%0A%0A - def _sql_delete(query, domain_filter):%0A if domain is not None:%0A query.filter(domain_filter)%0A query.all().delete()%0A%0A
539f575832244e426d768b0901113a1e45b25f3f
modify python ext setup script
src/python_zfor/setup.py
src/python_zfor/setup.py
#!/usr/bin/env python from distutils.core import setup, Extension zformod = Extension('zfor', sources = ['src/zfor.c'], library_dirs = ['/usr/local/lib'], libraries = ['zfor'] ) setup(name = 'zfor', version = '0.1', description = 'Python zfor binding', author = ['Chris Goffinet'], author_email = ['cg@chrisgoffinet.com'], packages = [ 'zfor', ], package_dir = {'zfor' : 'src'}, ext_modules = [zformod], )
Python
0
@@ -80,23 +80,16 @@ tension( -'zfor', %0A @@ -85,34 +85,40 @@ on(%0A - +'zfor',%0A sour @@ -138,16 +138,70 @@ for.c'%5D, +%0A include_dirs = %5B'../libzfor'%5D,%0A librar @@ -230,31 +230,24 @@ lib' -%5D , -libraries = %5B' +'../lib zfor'%5D +, %0A @@ -247,38 +247,53 @@ '%5D,%0A - +libraries = %5B'zfor'%5D%0A )%0A%0Asetup @@ -293,16 +293,25 @@ %0A%0Asetup( +%0A name = ' @@ -323,16 +323,18 @@ ,%0A + + version @@ -342,16 +342,18 @@ '0.1',%0A + de @@ -389,24 +389,26 @@ ing',%0A + author = %5B'C @@ -426,24 +426,26 @@ et'%5D,%0A + + author_email @@ -475,24 +475,26 @@ om'%5D,%0A + packages = %5B @@ -498,32 +498,36 @@ = %5B%0A + + 'zfor',%0A %5D,%0A @@ -510,24 +510,30 @@ 'zfor',%0A + %5D,%0A @@ -529,16 +529,18 @@ %5D,%0A + pa @@ -575,16 +575,18 @@ ,%0A + + ext_modu @@ -602,16 +602,45 @@ ormod%5D,%0A - ) +)%0A%0A# vim:ft=python ts=4 sw=4 et%0A %0A
e346f70eb34a029642410a92e449915801d9f78f
use relative import
antimarkdown/__init__.py
antimarkdown/__init__.py
# -*- coding: utf-8 -*- """antimarkdown -- convert Markdown to HTML. """ from lxml import html from lxml.builder import E import handlers default_safe_tags = set('p blockquote i em strong b u a h1 h2 h3 h4 h5 h6 hr pre code div br img ul ol li span'.split()) default_safe_attrs = set('href src alt style title'.split()) def to_markdown(html_string, safe_tags=None, safe_attrs=None): """Convert the given HTML text fragment to Markdown. """ # out = StringIO() # for f in parse_fragments(html_string, safe_tags=None, safe_attrs=None): # handlers.process_tag_events(f, out) # return normalize(out.getvalue()) return handlers.render(*parse_fragments(html_string, safe_tags)) def parse_fragments(html_string, safe_tags=None, safe_attrs=None): """Parse HTML fragments from the given HTML fragment string. """ for f in html.fragments_fromstring(html_string): cf = clean_fragment(f, safe_tags=safe_tags, safe_attrs=safe_attrs) if cf is not None: yield cf def clean_fragment(subtree, safe_tags=None, safe_attrs=None): """Clean an HTML fragment subtree of unsafe tags and attrs. """ if isinstance(subtree, str): return E('p', subtree) if safe_tags is None: safe_tags = default_safe_tags if safe_attrs is None: safe_attrs = default_safe_attrs if subtree.tag not in safe_tags: if callable(subtree.tag): # A comment... return None p = html.Element('p') p.append(subtree) subtree = p for el in list(subtree.iter()): if el.tag not in safe_tags: el.drop_tag() else: for attr in list(el.attrib.keys()): if attr not in safe_attrs: el.attrib.pop(attr) return subtree
Python
0
@@ -116,16 +116,23 @@ port E%0A%0A +from . import h
c5a156921b7c50c1cf6dc0344b92b661e73c5e32
Build Results object from a dictionary
rnacentral/nhmmer/utils.py
rnacentral/nhmmer/utils.py
""" Copyright [2009-2014] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import datetime from django.conf import settings import django_rq from rq import get_current_job from nhmmer_search import NhmmerSearch from nhmmer_parse import NhmmerResultsParser from models import Results, Query from settings import EXPIRATION, MAX_RUN_TIME def save_results(filename, job_id): """ Parse nhmmer results file and save the data in the database. """ results = [] query = Query.objects.get(id=job_id) for record in NhmmerResultsParser(filename=filename)(): results.append(Results(query_id=query, result_id=record['result_id'], rnacentral_id=record['rnacentral_id'], description=record['description'], score=record['score'], bias=record['bias'], e_value=record['e_value'], query_start=record['query_start'], query_end=record['query_end'], target_length=record['target_length'], target_start=record['target_start'], target_end=record['target_end'], alignment=record['alignment'])) Results.objects.bulk_create(results, 999) def save_query(sequence, job_id): """ Create query object in the main database. """ query = Query(id=job_id, query=sequence, length=len(sequence)) query.save() def nhmmer_search(sequence): """ RQ worker function. """ job = get_current_job() filename = NhmmerSearch(sequence=sequence, job_id=job.id)() save_query(sequence, job.id) save_results(filename, job.id) def enqueue_job(query): """ Submit job to the queue and return job id. """ queue = django_rq.get_queue() job = queue.enqueue_call(func=nhmmer_search, args=(query,), timeout=MAX_RUN_TIME, result_ttl=EXPIRATION) job.meta['query'] = query job.meta['expiration'] = datetime.datetime.now() + \ datetime.timedelta(seconds=EXPIRATION) job.save() return job.id def get_job(job_id): """ Get job from local or remote queues. Return a tuple (job, remote_server), where * `job` - job object * `remote_server` - server where the job was run (None for localhost) """ rq_queues = getattr(settings, 'RQ_QUEUES', []) for queue_id, params in rq_queues.iteritems(): queue = django_rq.get_queue(queue_id) job = queue.fetch_job(job_id) if job: return (job, params['REMOTE_SERVER']) return (None, None)
Python
0.000055
@@ -1104,797 +1104,72 @@ re -sults.append(Results(query_id=query,%0A result_id=record%5B'result_id'%5D,%0A rnacentral_id=record%5B'rnacentral_id'%5D,%0A description=record%5B'description'%5D,%0A score=record%5B'score'%5D,%0A bias=record%5B'bias'%5D,%0A e_value=record%5B'e_value'%5D,%0A query_start=record%5B'query_start'%5D,%0A query_end=record%5B'query_end'%5D,%0A target_length=record%5B'target_length'%5D,%0A target_start=record%5B'target_start'%5D,%0A target_end=record%5B'target_end'%5D,%0A alignment=record%5B'alignment'%5D +cord%5B'query_id'%5D = query%0A results.append(Results(**record ))%0A
1a0828bd9dda4b17f6121174bd826ace1fe2eefb
Fix remove_item (#112)
Core/automation/lib/python/core/items.py
Core/automation/lib/python/core/items.py
# NOTE: Requires JythonItemProvider component from core.jsr223 import scope scope.scriptExtension.importPreset(None) import core from core import osgi, JythonItemProvider from core.log import logging, LOG_PREFIX from core.links import remove_all_links ItemBuilderFactory = osgi.get_service( "org.openhab.core.items.ItemBuilderFactory" ) or osgi.get_service( "org.eclipse.smarthome.core.items.ItemBuilderFactory" ) ManagedItemProvider = osgi.get_service( "org.openhab.core.items.ManagedItemProvider" ) or osgi.get_service( "org.eclipse.smarthome.core.items.ManagedItemProvider" ) log = logging.getLogger(LOG_PREFIX + ".core.items") __all__ = ["add_item", "remove_item"] def add_item(item_or_item_name, item_type=None, category=None, groups=None, label=None, tags=[], gi_base_type=None, group_function=None): try: if not isinstance(item_or_item_name, basestring) and not hasattr(item_or_item_name, 'name'): raise Exception("\"{}\" is not a string or Item".format(item_or_item_name)) item = item_or_item_name if isinstance(item_or_item_name, basestring): item_name = item_or_item_name if item_type is None: raise Exception("Must provide item_type when creating an Item by name") base_item = None if item_type != "Group" or gi_base_type is None else ItemBuilderFactory.newItemBuilder(gi_base_type, item_name + "_baseItem").build() group_function = None if item_type != "Group" else group_function item = ItemBuilderFactory.newItemBuilder(item_type, item_name)\ .withCategory(category)\ .withGroups(groups)\ .withLabel(label)\ .withBaseItem(base_item)\ .withGroupFunction(group_function)\ .withTags(set(tags))\ .build() JythonItemProvider.add(item) ManagedItemProvider.add(item) log.debug("Item added: [{}]".format(item)) except: import traceback log.error(traceback.format_exc()) return None else: return item def remove_item(item_or_item_name): try: item = item_or_item_name if isinstance(item, basestring): if scope.itemRegistry.getItems(item) == []: raise Exception("\"{}\" is not in the ItemRegistry".format(item)) else: item = scope.ir.getItem(item_or_item_name) elif not hasattr(item_or_item_name, 'name'): raise Exception("\"{}\" is not a string or Item".format(item_or_item_name)) if scope.itemRegistry.getItems(item.name) == []: raise Exception("\"{}\" is not in the ItemRegistry".format(item.name)) remove_all_links(item) JythonItemProvider.remove(item) ManagedItemProvider.remove(item) log.debug("Item removed: [{}]".format(item)) except: import traceback log.error(traceback.format_exc())
Python
0
@@ -3126,32 +3126,37 @@ ider.remove(item +.name )%0A log.de
08869ed4f1ac6190e5a3f6ec3439ce4cc7e010ba
add migration messenger
migrate2v2_dump.py
migrate2v2_dump.py
#!/usr/bin/env python import os from json import dump from bitfield import BitHandler, BitField from django import setup from django.core.serializers.json import DjangoJSONEncoder from django.db.models import ImageField class BatchSaveStreamList(list): def __init__(self, model_class, model_name, except_fields=None, list_map=None, *args, **kwargs): super().__init__(*args, **kwargs) self._model_class = model_class self._model_name = model_name self._except_fields = (except_fields or []) + ['id'] self._list_map = list_map or {} def _fields(self, obj): return {ob.name: self._list_map_fn(obj, ob) for ob in obj._meta.concrete_fields if ob.name not in self._except_fields} def __iter__(self): for d in self._model_class.objects.all().iterator(): yield { "model": self._model_name, "pk": d.pk, "fields": self._fields(d) } def _list_map_fn(self, obj, field): if field.is_relation: # fl = getattr(obj, field.name) val = getattr(obj, field.attname) return val elif field.name in self._list_map.keys(): val = getattr(obj, field.name) return self._list_map.get(val) elif isinstance(field, BitField): val = getattr(obj, field.name) # val is instance of BitHandler return int(val) elif isinstance(field, ImageField): val = getattr(obj, field.name) if val._file: return val.url else: return getattr(obj, field.name) or None def __len__(self): return 1 def batch_save(fname, *args, **kwargs): sa = BatchSaveStreamList(*args, **kwargs) with open(fname, 'w') as f: dump(sa, f, ensure_ascii=False, indent=2, cls=DjangoJSONEncoder) # --------------------- def dump_groups(): from group_app.models import Group batch_save("groups.json", Group, 'groupapp.group') def dump_accounts(): from accounts_app.models import UserProfile, BaseAccount, UserProfileLog batch_save('accounts_baseaccount.json', BaseAccount, 'profiles.baseaccount') batch_save('accounts_userprofile.json', UserProfile, 'profiles.userprofile') do_type_map = { 'cusr': 1, 'dusr': 2, 'cdev': 3, 'ddev': 4, 'cnas': 5, 'dnas': 6, 'csrv': 7, 'dsrv': 8 } batch_save('accounts_userprofilelog.json', UserProfileLog, 'profiles.userprofilelog', except_fields=['meta_info'], list_map={ 'do_type': do_type_map }) if __name__ == '__main__': os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djing.settings') setup() dump_accounts()
Python
0.000001
@@ -72,20 +72,8 @@ port - BitHandler, Bit @@ -2687,16 +2687,440 @@ %7D)%0A%0A%0A +def dump_messenger():%0A from messenger.models import Messenger, ViberMessenger, ViberMessage, ViberSubscriber%0A batch_save(%22messenger.json%22, Messenger, 'messenger.messenger')%0A batch_save(%22ViberMessenger.json%22, ViberMessenger, 'messenger.vibermessenger')%0A batch_save(%22ViberMessage.json%22, ViberMessage, 'messenger.vibermessage')%0A batch_save(%22ViberSubscriber.json%22, ViberSubscriber, 'messenger.vibersubscriber')%0A%0A%0A if __nam @@ -3229,19 +3229,20 @@ dump_ -accounts +messenger ()%0A
d00235a494b8dc5db18e0d0b7fa1135826ae49d3
Set template_debug to False.
restfulgrok/fancyhtmlview.py
restfulgrok/fancyhtmlview.py
from jinja2 import Template from pkg_resources import resource_string from view import GrokRestViewMixin from contenttype import ContentType, ContentTypesRegistry from contenttype import JsonContentType class HtmlContentType(ContentType): """ XHTML content type. Provides a dumps-method that uses a jinja2-template to generate a bootrap-styled HTML-document which is suitable as a default view for a REST API. """ mimetype = 'text/html' extension = 'html' description = 'Formatted HTML view with help for the REST API.' #: If ``True``, reload template on each request. If ``False``, cache the #: template data in the class after first read. template_debug = True #: The :func:`pkg_resources.resource_string` args for the template file. template_path = (__name__, 'fancyhtmltemplate.jinja.html') #: The :func:`pkg_resources.resource_string` args for the css file. css_path = (__name__, 'bootstrap.min.css') #: Variable forwarded to the template as ``pagetitle``. html_pagetitle = 'REST API' @classmethod def get_cached_file(cls, cacheattr, resource_string_path): """ Get file contents using :func:`pkg_resources.resource_string`. If :obj:`.template_debug` is ``False``, cache the data in the class attribute ``cacheattr`` and use the cache on subsequent calls. :param cacheattr: Attribute to use a cache of the file contents. :param resource_string: :func:`pkg_resources.resource_string` path to the file. """ if cls.template_debug: return resource_string(*resource_string_path) else: if not hasattr(cls, cacheattr): source = resource_string(*resource_string_path) setattr(cls, cacheattr, source) return getattr(cls, cacheattr) @classmethod def get_template_source(cls): """ Use :meth:`.get_cached_file` to get :obj:`.template_path`. """ return cls.get_cached_file('cache_template_source', cls.template_path) @classmethod def get_css_source(cls): """ Use :meth:`.get_cached_file` to get :obj:`.css_path`. """ return cls.get_cached_file('cache_template_source', cls.css_path) @classmethod def get_template_data(cls, pydata, view): """ Get the template data. :return: Template data. :rtype: dict """ jsondata = JsonContentType.dumps(pydata) return dict(jsondata=jsondata, css=cls.get_css_source(), content_types=view.content_types, pagetitle = cls.html_pagetitle) @classmethod def dumps(cls, pydata, view): template = Template(cls.get_template_source()) return template.render(**cls.get_template_data(pydata, view)).encode('utf-8') class GrokRestViewWithFancyHtmlMixin(GrokRestViewMixin): """ Adds :class:`HtmlContentType` to ``content_types``. """ content_types = GrokRestViewMixin.content_types + ContentTypesRegistry(HtmlContentType)
Python
0
@@ -700,19 +700,20 @@ debug = -Tru +Fals e%0A%0A #
d450cbd8563fd1a6cb8654d7ccdbe045fd1bf6cb
Fix theme tests
src/wirecloud/platform/tests/themes.py
src/wirecloud/platform/tests/themes.py
# -*- coding: utf-8 -*- # Copyright (c) 2016 CoNWeT Lab., Universidad Politécnica de Madrid # This file is part of Wirecloud. # Wirecloud is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # Wirecloud is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # You should have received a copy of the GNU Affero General Public License # along with Wirecloud. If not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals import errno import unittest from django.conf import settings from django.template import TemplateDoesNotExist from django.test.utils import override_settings from mock import MagicMock, Mock, patch from wirecloud.platform.themes import DEFAULT_THEME, get_active_theme_name, get_theme_chain, TemplateLoader class Theme(object): def __init__(self, id, **kwargs): self.id = id self.__file__ = "/%s/__init__.py" % id.replace('.', '/') if 'parent' in kwargs: self.parent = kwargs['parent'] def __repr__(self): return self.id DEFAULT_THEME_MODULE = Theme(DEFAULT_THEME, parent=None) CUSTOM_THEME_MODULE = Theme("customtheme") CUSTOMMOD_THEME_MODULE = Theme("custommodtheme", parent="customtheme") CUSTOM_ROOT_THEME_MODULE = Theme("customroottheme", parent=None) def import_module_tester(module): if module == DEFAULT_THEME: return DEFAULT_THEME_MODULE elif module == "customtheme": return CUSTOM_THEME_MODULE elif module == "custommodtheme": return CUSTOMMOD_THEME_MODULE elif module == "customroottheme": return CUSTOM_ROOT_THEME_MODULE else: raise ImportError @patch('wirecloud.platform.themes.import_module', new=import_module_tester) class ThemesTestCase(unittest.TestCase): tags = ('wirecloud-noselenium', 'wirecloud-themes') @override_settings(THEME_ACTIVE=None) def test_get_active_theme_name_default(self): del settings.THEME_ACTIVE self.assertEqual(get_active_theme_name(), "wirecloud.defaulttheme") @override_settings(THEME_ACTIVE="customtheme") def test_get_active_theme_name_custom_value(self): self.assertEqual(get_active_theme_name(), "customtheme") @override_settings(THEME_ACTIVE=DEFAULT_THEME) def test_get_theme_chain_default(self): self.assertEqual(get_theme_chain(), [DEFAULT_THEME_MODULE]) @override_settings(THEME_ACTIVE="customtheme") def test_get_theme_chain_basic_default_theme_modification(self): self.assertEqual(get_theme_chain(), [CUSTOM_THEME_MODULE, DEFAULT_THEME_MODULE]) @override_settings(THEME_ACTIVE="customroottheme") def test_get_theme_chain_basic_custom_root_theme(self): self.assertEqual(get_theme_chain(), [CUSTOM_ROOT_THEME_MODULE]) @override_settings(THEME_ACTIVE="invalidtheme") def test_get_theme_chain_basic_import_error(self): self.assertRaises(ValueError, get_theme_chain) @override_settings(THEME_ACTIVE="customtheme") def test_get_template_sources_basic(self): loader = TemplateLoader(Mock()) expected_paths = ['/customtheme/templates/a.html', '/wirecloud/defaulttheme/templates/a.html'] self.assertEqual([origin.name for origin in loader.get_template_sources("a.html")], expected_paths) @override_settings(THEME_ACTIVE="customtheme") def test_get_template_sources_abs_template_name(self): loader = TemplateLoader(Mock()) expected_paths = ['/customtheme/templates/a.html'] self.assertEqual([origin.name for origin in loader.get_template_sources("/customtheme/templates/a.html")], expected_paths) @override_settings(THEME_ACTIVE="customtheme") @patch('wirecloud.platform.themes.io.open') def test_get_contents(self, open_mock): open_mock().__enter__().read.return_value = 'contents' loader = TemplateLoader(Mock()) self.assertEqual(loader.get_contents(Mock()), 'contents') @override_settings(THEME_ACTIVE="customtheme") @patch('wirecloud.platform.themes.io.open') def test_get_contents_ENOENT(self, open_mock): open_mock.side_effect = IOError(errno.ENOENT, 'No such file or directory') loader = TemplateLoader(Mock()) self.assertRaises(TemplateDoesNotExist, loader.get_contents, Mock()) @override_settings(THEME_ACTIVE="customtheme") @patch('wirecloud.platform.themes.io.open') def test_get_contents_generic_IOError(self, open_mock): open_mock.side_effect = IOError(errno.EIO, 'I/O error') loader = TemplateLoader(Mock()) self.assertRaises(IOError, loader.get_contents, Mock()) @override_settings(THEME_ACTIVE="customtheme") @patch('wirecloud.platform.themes.io.open') def test_load_template_source_first_source_path(self, open_mock): open_mock().__enter__().read.return_value = 'file_content' loader = TemplateLoader(Mock()) self.assertEqual(loader.load_template_source("a.html"), ('file_content', '/customtheme/templates/a.html')) @override_settings(THEME_ACTIVE="customtheme") @patch('wirecloud.platform.themes.io.open') def test_load_template_source_second_source_path(self, open_mock): file_mock = MagicMock() file_mock.__enter__().read.return_value = "file_content" open_mock.side_effect = (IOError(errno.ENOENT, 'No such file or directory'), file_mock) loader = TemplateLoader(Mock()) self.assertEqual(loader.load_template_source("a.html"), ('file_content', '/wirecloud/defaulttheme/templates/a.html')) @override_settings(THEME_ACTIVE="customtheme") @patch('wirecloud.platform.themes.io.open') def test_load_template_source_not_found(self, open_mock): open_mock.side_effect = IOError(errno.ENOENT, 'No such file or directory') loader = TemplateLoader(Mock()) self.assertRaises(TemplateDoesNotExist, loader.load_template_source, "a.html")
Python
0.000001
@@ -1989,22 +1989,150 @@ Error%0A%0A%0A -@patch +get_available_themes_mock = Mock(return_value = (DEFAULT_THEME, %22customtheme%22, %22custommodtheme%22, %22customroottheme%22))%0A%0A%0A@patch.multiple ('wirecl @@ -2150,17 +2150,19 @@ m.themes -. +', import_m @@ -2170,14 +2170,8 @@ dule -', new =imp @@ -2187,16 +2187,64 @@ e_tester +, get_available_themes=get_available_themes_mock )%0Aclass
a25b720d46ba4d8322e0a3d87b526c95de071913
update cryo manager
pychron/hardware/lakeshore/model330.py
pychron/hardware/lakeshore/model330.py
# =============================================================================== # Copyright 2018 ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== from traitsui.api import Item, UItem, HGroup, VGroup, Spring from pychron.core.ui.lcd_editor import LCDEditor from pychron.hardware.lakeshore.base_controller import BaseLakeShoreController from pychron.hardware import get_float import time class Model330TemperatureController(BaseLakeShoreController): def set_setpoint(self, v, output=1, retries=3): self.set_range(v) for i in range(retries): self.tell('SETP {}'.format(v)) time.sleep(2) sp = self.read_setpoint(output, verbose=True) self.debug('setpoint set to={} target={}'.format(sp, v)) if sp == v: break time.sleep(1) else: self.warning_dialog('Failed setting setpoint to {}. Got={}'.format(v, sp)) def set_range(self, v): # if v <= 10: # self.tell('RANGE {},{}'.format(output, 1)) # elif 10 < v <= 30: # self.tell('RANGE {},{}'.format(output, 2)) # else: # self.tell('RANGE {},{}'.format(output, 3)) for r in self.range_tests: ra = r.test(v) if ra: self.tell('RANG {}'.format(ra)) break time.sleep(1) @get_float(default=0) def read_setpoint(self, output, verbose=False): if output is not None: return self.ask('SETP?', verbose=verbose) def get_control_group(self): grp = VGroup(Spring(height=10, springy=False), HGroup(Item('input_a', style='readonly', editor=LCDEditor(width=120, ndigits=6, height=30)), Item('setpoint1'), UItem('setpoint1_readback', editor=LCDEditor(width=120, height=30), style='readonly'), Spring(width=10, springy=False)), HGroup(Item('input_b', style='readonly', editor=LCDEditor(width=120, ndigits=6, height=30)), Spring(width=10, springy=False))) return grp # ============= EOF =============================================
Python
0
@@ -964,16 +964,17 @@ t time%0A%0A +%0A class Mo @@ -1027,24 +1027,26 @@ oller):%0A%0A + # def set_set @@ -1082,20 +1082,27 @@ ies=3):%0A + # %0A + # sel @@ -1115,24 +1115,26 @@ range(v)%0A + # for i i @@ -1150,24 +1150,26 @@ etries):%0A + # sel @@ -1195,24 +1195,26 @@ rmat(v))%0A + # tim @@ -1223,24 +1223,26 @@ sleep(2)%0A + # sp @@ -1283,24 +1283,26 @@ se=True)%0A + # sel @@ -1354,24 +1354,26 @@ (sp, v))%0A + # if @@ -1381,24 +1381,25 @@ p == v:%0A +# @@ -1394,24 +1394,25 @@ + break%0A @@ -1404,24 +1404,26 @@ break%0A + # tim @@ -1433,20 +1433,27 @@ leep(1)%0A + # %0A + # els @@ -1454,24 +1454,26 @@ else:%0A + # sel