commit
stringlengths
40
40
subject
stringlengths
1
1.49k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
new_contents
stringlengths
1
29.8k
old_contents
stringlengths
0
9.9k
lang
stringclasses
3 values
proba
float64
0
1
98b85a9fd8d5082e40996dfba0359b0ec32a9267
Add solution for "Cakes" kata https://www.codewars.com/kata/525c65e51bf619685c000059
codewars/cakes.py
codewars/cakes.py
# Cakes # https://www.codewars.com/kata/525c65e51bf619685c000059 import math import unittest from typing import Dict def cakes_1(recipe, available): # type: (Dict[str, int], Dict[str, int]) -> int lowest_available = math.inf for i, a in recipe.items(): if i in available.keys(): av = available.get(i) / a if lowest_available > av: lowest_available = av else: lowest_available = 0 break return int(lowest_available) def cakes(recipe, available): # type: (Dict[str, int], Dict[str, int]) -> int return min(available.get(i, 0) // recipe[i] for i in recipe) class TestCakes(unittest.TestCase): def test(self): recipe = {"flour": 500, "sugar": 200, "eggs": 1} available = {"flour": 1200, "sugar": 1200, "eggs": 5, "milk": 200} self.assertEquals(cakes(recipe, available), 2, 'Wrong result for example #1') recipe = {"apples": 3, "flour": 300, "sugar": 150, "milk": 100, "oil": 100} available = {"sugar": 500, "flour": 2000, "milk": 2000} self.assertEquals(cakes(recipe, available), 0, 'Wrong result for example #2')
Python
0.000191
5b97e6fc0446912d5b9b8da65e60d06165ed1b8b
Add profile tests
budgetsupervisor/users/tests/test_models.py
budgetsupervisor/users/tests/test_models.py
from users.models import Profile def test_profile_is_created_when_user_is_created(user_foo): assert len(Profile.objects.all()) == 1 assert hasattr(user_foo, "profile") def test_profile_is_not_created_when_user_is_updated(user_foo): assert len(Profile.objects.all()) == 1 user_foo.username = "abc" user_foo.save() assert len(Profile.objects.all()) == 1 def test_profile_str(user_foo): assert str(user_foo.profile) == str(user_foo)
Python
0.000001
275adbea5477bbc6938e59edab23e1df182435ea
Create split-array-with-equal-sum.py
Python/split-array-with-equal-sum.py
Python/split-array-with-equal-sum.py
# Time: O(n^2) # Space: O(n) class Solution(object): def splitArray(self, nums): """ :type nums: List[int] :rtype: bool """ if len(nums) < 7: return False accumulated_sum = [0] * len(nums) accumulated_sum[0] = nums[0] for i in xrange(1, len(nums)): accumulated_sum[i] = accumulated_sum[i-1] + nums[i] for j in xrange(3, len(nums)-3): lookup = set() for i in xrange(1, j-1): if accumulated_sum[i-1] == accumulated_sum[j-1] - accumulated_sum[i]: lookup.add(accumulated_sum[i-1]) for k in xrange(j+2, len(nums)-1): if accumulated_sum[-1] - accumulated_sum[k] == accumulated_sum[k-1] - accumulated_sum[j] and \ accumulated_sum[k - 1] - accumulated_sum[j] in lookup: return True return False
Python
0.009008
5e427315f46c026dd3b72b49349d3dcdbf04d138
add financial_insights
financial_insights.py
financial_insights.py
''' Created on Apr, 2017 @author: hugo ''' import numpy as np def calc_ranks(x): """Given a list of items, return a list(in ndarray type) of ranks. """ n = len(x) index = list(zip(*sorted(list(enumerate(x)), key=lambda d:d[1], reverse=True))[0]) rank = np.zeros(n) rank[index] = range(1, n + 1) return rank def rank_bank_topic(bank_doc_map, doc_topic_dist): """Rank topics for banks """ bank_topic_ranks = {} for each_bank in bank_doc_map: rank = [] for each_doc in bank_doc_map[each_bank]: rank.append(calc_ranks(doc_topic_dist[each_doc])) rank = np.r_[rank] # compute ranking score bank_topic_ranks[each_bank] = np.sum(1. / rank, axis=0) return bank_topic_ranks if __name__ == '__main__': n = 10 bank_doc_map = {'bank_0': ['doc_0', 'doc_1'], 'bank_1': ['doc_2', 'doc_3', 'doc_4']} doc_topic_dist = dict([('doc_%s' % i, np.random.randn(n)) for i in range(5)]) rank = rank_bank_topic(bank_doc_map, doc_topic_dist)
Python
0.000242
6c133d4de6a79eab6bfc2da9ff9a0045e0a0994d
add problem hanckerrank 009
hackerrank/009_sherlock_and_the_beast.py
hackerrank/009_sherlock_and_the_beast.py
#!/bin/python3 """ https://www.hackerrank.com/challenges/sherlock-and-the-beast?h_r=next-challenge&h_v=zen Sherlock Holmes suspects his archenemy, Professor Moriarty, is once again plotting something diabolical. Sherlock's companion, Dr. Watson, suggests Moriarty may be responsible for MI6's recent issues with their supercomputer, The Beast. Shortly after resolving to investigate, Sherlock receives a note from Moriarty boasting about infecting The Beast with a virus; however, he also gives him a clue—a number, N. Sherlock determines the key to removing the virus is to find the largest Decent Number having N digits. A Decent Number has the following properties: Its digits can only be 3's and/or 5's. The number of 3's it contains is divisible by 5. The number of 5's it contains is divisible by 3. If there are more than one such number, we pick the largest one. Moriarty's virus shows a clock counting down to The Beast's destruction, and time is running out fast. Your task is to help Sherlock find the key before The Beast is destroyed! Constraints 1<=T<=20 1<=N<=100000 Input Format The first line is an integer, T, denoting the number of test cases. The T subsequent lines each contain an integer, N, detailing the number of digits in the number. Output Format Print the largest Decent Number having N digits; if no such number exists, tell Sherlock by printing -1. Sample Input 4 1 3 5 11 Sample Output -1 555 33333 55555533333 Explanation For N = 1, there is no decent number having 1 digit (so we print -1). For N = 3, 555 is the only possible number. The number 5 appears three times in this number, so our count of 5's is evenly divisible by 3 (Decent Number Property 3). For N = 5, 33333 is the only possible number. The number 3 appears five times in this number, so our count of 3's is evenly divisible by 5 (Decent Number Property 2). For N = 11, 5555533333 and all permutations of these digits are valid numbers; among them, the given number is the largest one. """ import sys t = int(input().strip()) for a0 in range(t): n = int(input().strip())
Python
0.999803
ef1fa03d753f5d8a0b32831320a1b3e076ace363
Add a test runner for our jqplot demo too
moksha/apps/demo/MokshaJQPlotDemo/run_tests.py
moksha/apps/demo/MokshaJQPlotDemo/run_tests.py
#!/usr/bin/env python """ nose runner script. """ __requires__ = 'moksha' import pkg_resources import nose if __name__ == '__main__': nose.main()
Python
0
bd87286dccc22c6e7ac1470882550c2515df4882
Add support for eve-central
modules/evecentral.py
modules/evecentral.py
from core.Uusipuu import UusipuuModule from twisted.internet import defer from twisted.internet.defer import inlineCallbacks from twisted.web import client from xml.dom import minidom import locale class Module(UusipuuModule): def startup(self): self.db_open('evedb', 'data/db/evedb.sqlite') def shutdown(self): self.db_close('evedb') def findtype(self, key): if key.isdigit(): sql = 'SELECT typeName, typeID, groupID FROM invTypes' + \ ' WHERE typeID = ? COLLATE NOCASE' else: sql = 'SELECT typeName, typeID, groupID FROM invTypes' + \ ' WHERE typeName = ? COLLATE NOCASE' return self.db.runQuery(sql, (key,)) def findregion(self, key): sql = 'SELECT regionID, regionName FROM mapRegions' + \ ' WHERE regionName LIKE ? COLLATE NOCASE' param = '%' + key + '%' return self.db.runQuery(sql, (param,)) @inlineCallbacks def cmd_market(self, user, replyto, params): if not len(params.strip()): self.bot.msg(replyto, 'Usage: !market item, region') return pieces = params.strip().split(',') item, region = pieces[0], None if len(pieces) > 1: region = pieces[1].strip() type_id = yield self.findtype(item) if not len(type_id): self.bot.msg(replyto, 'Unknown item :(') self.log('Item not found [%s]' % (item,)) return type_id = type_id[0]['typeID'] type_id = int(type_id) url = 'http://api.eve-central.com/api/marketstat?typeid=%d' % \ (type_id,) if region: result = yield self.findregion(region) if not len(result): self.bot.msg(replyto, 'Unknown region :(') self.log('Region not found [%s]' % (region,)) return region = str(result[0]['regionName']) region_id = result[0]['regionID'] region_id = int(region_id) url += '&regionlimit=%d' % (region_id,) result = yield client.getPage(url) print result #print 'Consider searching market for %d' % (type_id,) try: dom = minidom.parseString(result) except: self.bot.msg(replyto, 'XML parse failed :(') self.log('XML parse failed (%s)' % item) return if not dom.getElementsByTagName('marketstat'): self.bot.msg(replyto, 'No market data found in the result :(') self.log('No market data found in the result (%s)' % item) return buy_volume = sell_volume = 0 buy_price = sell_price = 0 buy = dom.getElementsByTagName('buy')[0] buy_volume = buy.getElementsByTagName('volume') buy_volume = buy_volume[0].childNodes[0].nodeValue buy_volume = int(buy_volume) buy_price = buy.getElementsByTagName('max') buy_price = buy_price[0].childNodes[0].nodeValue buy_price = float(buy_price) sell = dom.getElementsByTagName('sell')[0] sell_volume = sell.getElementsByTagName('volume') sell_volume = sell_volume[0].childNodes[0].nodeValue sell_volume = int(sell_volume) sell_price = sell.getElementsByTagName('min') sell_price = sell_price[0].childNodes[0].nodeValue sell_price = float(sell_price) locale.setlocale(locale.LC_ALL, '.'.join(locale.getdefaultlocale())) if not region: region = 'Global' msg = '%s (%s): Buy %s, Sell %s' % \ (item, region, locale.format('%.*f', (2, buy_price), True), locale.format('%.*f', (2, sell_price), True)) self.bot.msg(replyto, msg) self.log('%s' % msg) dom.unlink() # vim: set et sw=4:
Python
0
3623b058670f19dc7a6aa3372a04b61bc96a8678
Create SenderNode.py
examples/SenderNode.py
examples/SenderNode.py
''' This is the Sender Node. It sends the start spike train and then simply receives and sends spikes using Brian NeuronGroups. At the end of the simulation the data collected is plotted. ''' from brian import * import argparse from brian_multiprocess_udp import BrianConnectUDP def main_NeuronGroup(input_Neuron_Group, simulation_clock): print "main_NeuronGroup!" #DEBUG! simclock = simulation_clock Nr=NeuronGroup(Number_of_Neurons, model='v:1', reset=0, threshold=0.5, clock=simclock) Nr.v=1 # SYNAPSES BETWEEN REAL NEURON NETWORK AND THE INPUT Syn_iNG_Nr=Synapses(input_Neuron_Group, Nr, model='w:1', pre='v+=w', clock=simclock) Syn_iNG_Nr[:,:]='i==j' # It is a one-to-one connection configuration Syn_iNG_Nr.w=1 # So it spikes every time it receives an spike (threshold is lower than one). MExt=SpikeMonitor(Nr) # Spikes sent by UDP Mdummy=SpikeMonitor(input_Neuron_Group) # Spikes received by UDP return ([Nr],[Syn_iNG_Nr],[MExt, Mdummy]) def post_simulation_function(input_NG, simulation_NG, simulation_SYN, simulation_MN): """ input_NG: the neuron group that receives the input spikes simulation_NG: the neuron groups list passed to the system by the user function (main_NeuronGroup) simulation_SYN: the synapses list passed to the system by the user function (main_NeuronGroup) simulation_MN: the monitors list passed to the system by the user function (main_NeuronGroup) This way it is possible to plot, save or do whatever you want with these objects after the end of the simulation! """ figure() ylim(-1,Number_of_Neurons) raster_plot(simulation_MN[0]) raster_plot(simulation_MN[1]) # plot(simulation_MN[0][0],'b.') # plot(simulation_MN[1][0],'g.') title("Spikes \"start to be\" Sent(blue)/Received(green) by UDP") show(block=True) # savefig('output.pdf') if __name__=="__main__": # Process the information received from the command line arguments. parser = argparse.ArgumentParser(description="Sets up and launch the SENDER node.") parser.add_argument("--neurons", help="The total number of neurons (INPUT/OUTPUT are the same size).", type=int, required=True) parser.add_argument("--output_port", help="Output port (integer).", type=int, required=True) parser.add_argument("--input_port", help="Input port (integer).", type=int, required=True) parser.add_argument("--output_ip", help="Output addresses(string).", type=str, required=True) parser.add_argument("--sim_clock", help="Simulation clock (float, milliseconds).", type=float, required=True) parser.add_argument("--input_clock", help="Input clock (float, milliseconds).", type=float, required=True) parser.add_argument("--brian_addr", help="A number from 0 to 255 used to identify the node.", type=int, default=0) parser.add_argument("--ttime", help="Total time of the simulation.", type=int, required=True) args=parser.parse_args() Number_of_Neurons = args.neurons my_simulation = BrianConnectUDP(main_NeuronGroup=main_NeuronGroup, post_simulation_function=post_simulation_function, NumOfNeuronsOutput=Number_of_Neurons, NumOfNeuronsInput=Number_of_Neurons, simclock_dt=args.sim_clock, inputclock_dt=args.input_clock, input_addresses=[('localhost', args.input_port, Number_of_Neurons)], output_addresses=[(args.output_ip, args.output_port)], TotalSimulationTime=args.ttime, brian_address=args.brian_addr)
Python
0
b261704bc0ada9cfae773eaf1e40b18dc49d6ceb
add outline of backgrond job processor and task interface
portality/background.py
portality/background.py
from portality import models from portality.core import app class BackgroundApi(object): @classmethod def execute(self, background_task): job = background_task.background_job ctx = None if job.user is not None: ctx = app.test_request_context("/") ctx.push() try: background_task.run() except: background_task.log() try: background_task.cleanup() except: background_task.log() background_task.report() job.save() if ctx is not None: ctx.pop() class BackgroundTask(object): """ All background tasks should extend from this object and override at least the following methods: - run - cleanup - report - log - prepare (class method) """ def __init__(self, background_job): self.background_job = background_job def run(self): """ Execute the task as specified by the background_jon :return: """ raise NotImplementedError() def cleanup(self): """ Cleanup after a successful OR failed run of the task :return: """ raise NotImplementedError() def report(self): """ Augment the background_job with information about the task run :return: """ raise NotImplementedError() def log(self): """ Log any exceptions or other errors in running the task :return: """ raise NotImplementedError() @classmethod def prepare(cls, **kwargs): """ Take an arbitrary set of keyword arguments and return an instance of a BackgroundJob, or fail with a suitable exception :param kwargs: arbitrary keyword arguments pertaining to this task type :return: a BackgroundJob instance representing this task """ raise NotImplementedError() @classmethod def submit(cls, background_job): """ Submit the specified BackgroundJob to the background queue :param background_job: the BackgroundJob instance :return: """ pass
Python
0
c8ec0689950a5fea0aff98afe54b172bd84e2ce9
Add example using Tom's registration code in scipy.
examples/coregister.py
examples/coregister.py
"""Example using Tom's registration code from scipy. """ from os import path from glob import glob import scipy.ndimage._registration as reg # Data files basedir = '/Users/cburns/data/twaite' anatfile = path.join(basedir, 'ANAT1_V0001.img') funcdir = path.join(basedir, 'fMRIData') fileglob = path.join(funcdir, 'FUNC1_V000?.img') # Get first 10 images if __name__ == '__main__': print 'Coregister anatomical:\n', anatfile print '\nWith these functional images:' funclist = glob(fileglob) for func in funclist: print func measures, imageF_anat, fmri_series = \ reg.demo_MRI_coregistration(anatfile, funclist[0:4])
Python
0
114f8012a7faec4fe107c1d68c2ead10cdd88fbe
update zero.1flow.io settings for sparks 2.x.
oneflow/settings/zero_1flow_io.py
oneflow/settings/zero_1flow_io.py
# -*- coding: utf-8 -*- # Settings for zero.1flow.io, a master clone used to validate migrations. from sparks.django.settings import include_snippets include_snippets( ( '000_nobother', '00_production', '1flow_io', 'common', 'db_common', 'db_production', 'cache_common', 'cache_production', 'mail_production', 'raven_development', 'common_production', ), __file__, globals() ) # Overide real production settings, to be able to distinguish. SITE_DOMAIN = 'zero.1flow.io' ALLOWED_HOSTS += ['localhost', SITE_DOMAIN]
# -*- coding: utf-8 -*- # Settings for zero.1flow.io, a master clone used to validate migrations. import os from sparks.django.settings import include_snippets include_snippets( os.path.dirname(__file__), ( '000_nobother', '00_production', '1flow_io', 'common', 'db_common', 'db_production', 'cache_common', 'cache_production', 'mail_production', 'raven_development', 'common_production', ), globals() ) # Overide real production settings, to be able to distinguish. SITE_DOMAIN = 'zero.1flow.io' ALLOWED_HOSTS += ['localhost', SITE_DOMAIN]
Python
0
db60219a1446bb75dd98bfbb12ee6ec4eda6d6bb
add structure for the pcaptotal API
web/api.py
web/api.py
from flask import jsonify, abort, make_response from flask.ext.httpauth import HTTPBasicAuth auth = HTTPBasicAuth() from app import app tasks = [ { 'id': 1, 'title': u'Buy groceries', 'description': u'Milk, Cheese, Pizza, Fruit, Tylenol', 'done': False }, { 'id': 2, 'title': u'Learn Python', 'description': u'Need to find a good Python tutorial on the web', 'done': False } ] @app.route('/todo/api/v1.0/tasks', methods=['GET']) #@auth.login_required def get_tasks(): return jsonify({'tasks': tasks}) @app.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET']) #@auth.login_required def get_task(task_id): task = [task for task in tasks if task['id'] == task_id] if len(task) == 0: abort(404) return jsonify({'task': task[0]}) @app.errorhandler(404) def not_found(error): return make_response(jsonify({'error': 'Not found'}), 404) @auth.get_password def get_password(username): if username == 'ask3m': return 'ask3m' return None @auth.error_handler def unauthorized(): return make_response(jsonify({'error': 'Unauthorized access'}), 401)
Python
0
2a386106dfaa90fc09ab5478711d203249c79691
Add script scripts/make_package.py to generate a file with extension brython.js that can be included in a page and add importable modules.
scripts/make_package.py
scripts/make_package.py
import json import os import re import ast import python_minifier class Visitor(ast.NodeVisitor): """Used to list all the modules imported by a script.""" def __init__(self, lib_path, package): self.imports = set() self.lib_path = lib_path self.package = package def visit_Import(self, node): for alias in node.names: self.imports.add(alias.name) def visit_ImportFrom(self, node): if node.level > 0: package = self.package[:] level = node.level - 1 while level: package.pop() level -= 1 module = ".".join(package) if node.module: module += "." + node.module else: module = node.module self.imports.add(module) for alias in node.names: if alias.name == "*": continue else: # Only keep "from X import Y" if X.Y is a module, not if Y # is a variable defined in X path = os.path.join(self.lib_path, *module.split("."), alias.name + ".py") if os.path.exists(path): self.imports.add(module + "." + alias.name) def make(package_name, package_path, exclude_dirs=None): print("Generating package {}".format(package_name)) VFS = {} nb = 0 if exclude_dirs is None: exclude_dirs = [] for dirpath, dirnames, filenames in os.walk(package_path): flag = False root_elts = dirpath.split(os.sep) for exclude in exclude_dirs: if exclude in root_elts: flag = True continue if flag: continue # skip these modules if '__pycache__' in dirnames: dirnames.remove("__pycache__") if dirpath == package_path: package = [] else: package = dirpath[len(package_path) + 1:].split(os.sep) for filename in filenames: ext = os.path.splitext(filename)[1] if ext not in ('.js', '.py'): continue if filename.endswith(".brython.js"): continue nb += 1 absname = os.path.join(dirpath, filename) with open(absname, encoding='utf-8') as f: data = f.read() if ext == '.py': data = python_minifier.minify(data, preserve_lines=True) path_elts = package[:] if os.path.basename(filename) != "__init__.py": path_elts.append(os.path.basename(filename)[:-3]) fqname = ".".join(path_elts) with open(absname, encoding="utf-8") as f: tree = ast.parse(f.read()) visitor = Visitor(package_path, package) visitor.visit(tree) imports = sorted(list(visitor.imports)) mod_name = filename.replace(os.sep, '.') mod_name = package_name + "." + mod_name mod_name, ext = os.path.splitext(mod_name) is_package = mod_name.endswith('__init__') if ext == ".py": if is_package: mod_name = mod_name[:-9] VFS[mod_name] = [ext, data, imports, 1] else: VFS[mod_name] = [ext, data, imports] else: VFS[mod_name] = [ext, data] print("adding {}".format(mod_name)) print('{} files'.format(nb)) with open(os.path.join(package_path, package_name + ".brython.js"), "w", encoding="utf-8") as out: out.write('__BRYTHON__.use_VFS = true;\n') out.write('var scripts = {}\n'.format(json.dumps(VFS))) out.write('__BRYTHON__.update_VFS(scripts)\n') if __name__ == "__main__": src_dir = os.path.join(os.path.dirname(os.getcwd()), "www", "src", "Lib", "browser", "widgets") make("widgets", src_dir)
Python
0
62b01c3c1614d5719cc69be951b2f6c660e40faa
Add generic function for iterating arrays.
pyldap/libldap/tools.py
pyldap/libldap/tools.py
def iterate_array(arr, f=None): i = 0 while True: if not arr[i]: break yield arr[i] if f is None else f(arr[i]) i += 1
Python
0
d8cd42940df8c1d2fc9ae28e9c5caa21995ca68c
Add word-counter.py
python3/word-counter.py
python3/word-counter.py
#!/usr/bin/env python3 from collections import Counter import argparse import re from itertools import islice import operator parser = argparse.ArgumentParser() parser.add_argument('--numWords',type=int,default=10) parser.add_argument('--maxTuples',type=int,default=4) parser.add_argument('--minWordLength',type=int,default=5) parser.add_argument('file',type=str) args = parser.parse_args() # Inspired by http://stackoverflow.com/questions/6822725 def window(seq, n): it = iter(seq) result = tuple(islice(it, n)) if len(result) == n: yield result for elem in it: result = result[1:] + (elem,) containsShortWord = False for i in result: if len(i) < args.minWordLength: containsShortWord = True break if not containsShortWord: yield result with open(args.file,'r') as f: content = f.read().replace('\n',' ') words = re.findall(r'\S+', content) for i in range(1,args.maxTuples+1): print("\n=== Sliding Window: {} ===".format(i)) for tup in Counter(window(words,i)).most_common(args.numWords): print(" {}: '{}'".format(tup[1]," ".join(tup[0])))
Python
0.002063
377a8be7c0b1e77f0e9c2dfd55f603e199727907
make pairs
files/v8/make_pairs.py
files/v8/make_pairs.py
import fileinput pops=[] for line in fileinput.input(): pops.append(line[:-1]) for i in range(len(pops)-1): for j in range(i+1, len(pops)): print pops[i]+","+pops[j]
Python
0.999365
9c0ddb1c4fff5fb3f44ad77192a7f435bc7a22fe
Create AdafruitMotorHat4Pi.py
service/AdafruitMotorHat4Pi.py
service/AdafruitMotorHat4Pi.py
# Start the services needed raspi = Runtime.start("raspi","RasPi") hat = Runtime.start("hat","AdafruitMotorHat4Pi") m1 = Runtime.start("m1","MotorHat4Pi") # Attach the HAT to i2c bus 1 and address 0x60 hat.attach("raspi","1","0x60") # Use the M1 motor port and attach the motor to the hat m1.setMotor("M1") m1.attach("hat") # Now everything is wired up and we run a few tests # Full speed forward m1.move(1) sleep(3) # half speed forward m1.move(.5) sleep(3) # Move backward at 60% speed m1.move(-.6) sleep(3) # Stop m1.move(0) # Now you should be able to use the GUI or a script to control the motor
Python
0
b376b0dca7ec73451ff36ebae1718fa11ec159f0
Add utils.py for general purpose functions
manyfaced/common/utils.py
manyfaced/common/utils.py
import time import pickle from socket import error as socket_error from common.status import CLIENT_TIMEOUT def dump_file(data): try: with file('temp.db') as f: string_file = f.read() db = pickle.loads(string_file) except: db = list() db.append(data) with open('temp.db', "w") as f: f.write(str(pickle.dumps(db))) def recv_timeout(the_socket, timeout=CLIENT_TIMEOUT): # make socket non blocking the_socket.setblocking(0) # total data partwise in an array total_data = [] # beginning time begin = time.time() while True: # if you got some data, then break after timeout if total_data and time.time() - begin > timeout: break # if you got no data at all, wait a little longer, twice the timeout elif time.time() - begin > timeout * 2: break # recv something try: data = the_socket.recv(8192) if data: total_data.append(data) # change the beginning time for measurement begin = time.time() else: # sleep for sometime to indicate a gap time.sleep(0.1) except socket_error: pass # join all parts to make final string return ''.join(total_data)
Python
0.000003
a3cbfeca2828ac3d210921524d850091dc775db7
Revert "Revert "New VMware Module to support configuring a VMware vmkernel IP…"
lib/ansible/modules/extras/cloud/vmware/vmware_vmkernel_ip_config.py
lib/ansible/modules/extras/cloud/vmware/vmware_vmkernel_ip_config.py
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Joseph Callen <jcallen () csc.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: vmware_vmkernel_ip_config short_description: Configure the VMkernel IP Address description: - Configure the VMkernel IP Address version_added: 2.0 author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)" notes: - Tested on vSphere 5.5 requirements: - "python >= 2.6" - PyVmomi options: hostname: description: - The hostname or IP address of the ESXi server required: True username: description: - The username of the ESXi server required: True aliases: ['user', 'admin'] password: description: - The password of the ESXi server required: True aliases: ['pass', 'pwd'] vmk_name: description: - VMkernel interface name required: True ip_address: description: - IP address to assign to VMkernel interface required: True subnet_mask: description: - Subnet Mask to assign to VMkernel interface required: True ''' EXAMPLES = ''' # Example command from Ansible Playbook - name: Configure IP address on ESX host local_action: module: vmware_vmkernel_ip_config hostname: esxi_hostname username: esxi_username password: esxi_password vmk_name: vmk0 ip_address: 10.0.0.10 subnet_mask: 255.255.255.0 ''' try: from pyVmomi import vim, vmodl HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False def configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask): host_config_manager = host_system.configManager host_network_system = host_config_manager.networkSystem for vnic in host_network_system.networkConfig.vnic: if vnic.device == vmk_name: spec = vnic.spec if spec.ip.ipAddress != ip_address: spec.ip.dhcp = False spec.ip.ipAddress = ip_address spec.ip.subnetMask = subnet_mask host_network_system.UpdateVirtualNic(vmk_name, spec) return True return False def main(): argument_spec = vmware_argument_spec() argument_spec.update(dict(vmk_name=dict(required=True, type='str'), ip_address=dict(required=True, type='str'), subnet_mask=dict(required=True, type='str'))) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) if not HAS_PYVMOMI: module.fail_json(msg='pyvmomi is required for this module') vmk_name = module.params['vmk_name'] ip_address = module.params['ip_address'] subnet_mask = module.params['subnet_mask'] try: content = connect_to_api(module, False) host = get_all_objs(content, [vim.HostSystem]) if not host: module.fail_json(msg="Unable to locate Physical Host.") host_system = host.keys()[0] changed = configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask) module.exit_json(changed=changed) except vmodl.RuntimeFault as runtime_fault: module.fail_json(msg=runtime_fault.msg) except vmodl.MethodFault as method_fault: module.fail_json(msg=method_fault.msg) except Exception as e: module.fail_json(msg=str(e)) from ansible.module_utils.vmware import * from ansible.module_utils.basic import * if __name__ == '__main__': main()
Python
0
b9759f60c9f107c3d2c319f53ed2985ee58dc319
Write test for mock_pose generator.
src/tests/test_mock_pose.py
src/tests/test_mock_pose.py
try: from unittest.mock import patch, MagicMock except ImportError: from mock import patch, MagicMock import pytest import rospy MockTf2 = MagicMock() modules = {"tf2_ros": MockTf2} patcher = patch.dict("sys.modules", modules) patcher.start() try: rospy.init_node("pytest", anonymous=True) except rospy.exceptions.ROSException: pass @pytest.fixture(scope="module") def teardown_module(): def fin(): patcher.stop() class TestPoseGenerator(object): def test_tf_and_pose_same(self): from mock_pose import PoseGenerator pose = PoseGenerator.generate_pose() transform = PoseGenerator.pose_to_tf(pose) assert transform.transform.translation.x == pose.pose.position.x assert transform.transform.translation.y == pose.pose.position.y assert transform.transform.translation.z == pose.pose.position.z assert transform.transform.rotation.x == pose.pose.orientation.x assert transform.transform.rotation.y == pose.pose.orientation.y assert transform.transform.rotation.z == pose.pose.orientation.z assert transform.transform.rotation.w == pose.pose.orientation.w
Python
0
1ce12ab6eb2a3b5578eba253929275bb3b394b76
Create line_follow.py
line_follow.py
line_follow.py
from Myro import * init("/dev/tty.scribbler") # To stop the Scribbler, wave your hand/something in front of the fluke while getObstacle('center') < 6300: # Get the reading from the line sensors on the bottom of Scribbler left, right = getLine() # If both left and right sensors are on track if left == 1 and right == 1: motors(-.1, -.1) # If just the right is on track, turn left elif right == 1: motors(.1,-.1) # If just the left is on track, turn right elif left == 1: motors(-.1,.1) # If both are off track, go backwards in a random direction. # randomNumber returns a number between 0 and 1, so I scale that to go slower elif left == 0 and right == 0: motors(.1*randomNumber(),.1*randomNumber()) # When it's done, stop and beep happily stop() beep(.1,600) beep(.1,650) beep(.1,700) beep(.1,750) beep(.1,800) beep(.1,850)
Python
0.000005
5fa39bb65f88fa3596cc3831890cd258cc5768e1
Add the module file
lantz/drivers/ni/__init__.py
lantz/drivers/ni/__init__.py
# -*- coding: utf-8 -*- """ lantz.drivers.ni ~~~~~~~~~~~~~~~~ :company: National Instruments :description: :website: http://www.ni.com/ ---- :copyright: 2012 by Lantz Authors, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from .daqe import NI6052E __all__ = ['NI6052E', ]
Python
0.000002
b3dcbe95d766d902d22a0c4c171cbbe5ce207571
Add test for longitivity-testing: both LED-s ON at the same time for extended periods of time
python/tests/stress_test.py
python/tests/stress_test.py
#!/usr/bin/env python import time import sys import os from random import randint # Hack to import from a parent dir # http://stackoverflow.com/a/11158224/401554 parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, parentdir) from octo import Octo octo = Octo('/dev/ttyACM0') octo.reset() # Test that the LEDs don't burn out or short or what knot during continious active state while True: time.sleep(1) octo.led0(randint(0,255),randint(0,255),randint(0,255)) time.sleep(1) octo.led1(randint(0,255),randint(0,255),randint(0,255)) time.sleep(randint(60,120)) octo.led1(randint(0,255),randint(0,255),randint(0,255)) time.sleep(1) octo.led0(randint(0,255),randint(0,255),randint(0,255)) time.sleep(randint(60,120))
Python
0.000001
772d05f25f3ba6ffe1ea4341401df90803b63715
add generic payload executor
pilot/control/payloads/generic.py
pilot/control/payloads/generic.py
#!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Authors: # - Mario Lassnig, mario.lassnig@cern.ch, 2016-2017 # - Daniel Drizhuk, d.drizhuk@gmail.com, 2017 # - Tobias Wegner, tobias.wegner@cern.ch, 2017 # - Paul Nilsson, paul.nilsson@cern.ch, 2017 # - Wen Guan, wen.guan@cern.ch, 2018 import subprocess import time from pilot.control.job import send_state import logging logger = logging.getLogger(__name__) class Executor(object): def __init__(self, args, job, out, err): self.__args = args self.__job = job self.__out = out self.__err = err def setup_payload(self, job, out, err): """ (add description) :param job: :param out: :param err: :return: """ # log = logger.getChild(str(job['PandaID'])) # try: # create symbolic link for sqlite200 and geomDB in job dir # for db_name in ['sqlite200', 'geomDB']: # src = '/cvmfs/atlas.cern.ch/repo/sw/database/DBRelease/current/%s' % db_name # link_name = 'job-%s/%s' % (job['PandaID'], db_name) # os.symlink(src, link_name) # except Exception as e: # log.error('could not create symbolic links to database files: %s' % e) # return False return True def run_payload(self, job, out, err): """ (add description) :param job: :param out: :param err: :return: """ log = logger.getChild(str(job['PandaID'])) # get the payload command from the user specific code # cmd = get_payload_command(job, queuedata) athena_version = job['homepackage'].split('/')[1] asetup = 'source $ATLAS_LOCAL_ROOT_BASE/user/atlasLocalSetup.sh --quiet; '\ 'source $AtlasSetup/scripts/asetup.sh %s,here; ' % athena_version cmd = job['transformation'] + ' ' + job['jobPars'] log.debug('executable=%s' % asetup + cmd) try: proc = subprocess.Popen(asetup + cmd, bufsize=-1, stdout=out, stderr=err, cwd=job['working_dir'], shell=True) except Exception as e: log.error('could not execute: %s' % str(e)) return None log.info('started -- pid=%s executable=%s' % (proc.pid, asetup + cmd)) return proc def wait_graceful(self, args, proc, job): """ (add description) :param args: :param proc: :param job: :return: """ log = logger.getChild(str(job['PandaID'])) breaker = False exit_code = None while True: for i in xrange(100): if args.graceful_stop.is_set(): breaker = True log.debug('breaking -- sending SIGTERM pid=%s' % proc.pid) proc.terminate() break time.sleep(0.1) if breaker: log.debug('breaking -- sleep 3s before sending SIGKILL pid=%s' % proc.pid) time.sleep(3) proc.kill() break exit_code = proc.poll() log.info('running: pid=%s exit_code=%s' % (proc.pid, exit_code)) if exit_code is not None: break else: send_state(job, args, 'running') continue return exit_code def run(self): """ (add description) :return: """ log = logger.getChild(str(self.__job['PandaID'])) exit_code = 1 if self.setup_payload(self.__job, self.__out, self.__err): log.debug('running payload') send_state(self.__job, self.__args, 'running') proc = self.run_payload(self.__job, self.__out, self.__err) if proc is not None: exit_code = self.wait_graceful(self.__args, self.__proc, self.__job) log.info('finished pid=%s exit_code=%s' % (proc.pid, exit_code)) return exit_code
Python
0.000006
d2a9f0cfd34f6a58c647d32b9a32fc517a210fd5
Create tests for ChartBasedAnnotationsAccumulator
projects/DensePose/tests/test_chart_based_annotations_accumulator.py
projects/DensePose/tests/test_chart_based_annotations_accumulator.py
# Copyright (c) Facebook, Inc. and its affiliates. import unittest import torch from detectron2.structures import Boxes, BoxMode, Instances from densepose.modeling.losses.utils import ChartBasedAnnotationsAccumulator from densepose.structures import DensePoseDataRelative, DensePoseList image_shape = (100, 100) instances = Instances(image_shape) n_instances = 3 instances.proposal_boxes = Boxes(torch.rand(n_instances, 4)) instances.gt_boxes = Boxes(torch.rand(n_instances, 4)) # instances.gt_densepose = None cannot happen because instances attributes need a length class TestChartBasedAnnotationsAccumulator(unittest.TestCase): def test_chart_based_annotations_accumulator_no_gt_densepose(self): accumulator = ChartBasedAnnotationsAccumulator() accumulator.accumulate(instances) expected_values = {"nxt_bbox_with_dp_index": 0, "nxt_bbox_index": n_instances} for key in accumulator.__dict__: self.assertEqual(getattr(accumulator, key), expected_values.get(key, [])) def test_chart_based_annotations_accumulator_gt_densepose_none(self): instances.gt_densepose = [None] * n_instances accumulator = ChartBasedAnnotationsAccumulator() accumulator.accumulate(instances) expected_values = {"nxt_bbox_with_dp_index": 0, "nxt_bbox_index": n_instances} for key in accumulator.__dict__: self.assertEqual(getattr(accumulator, key), expected_values.get(key, [])) def test_chart_based_annotations_accumulator_gt_densepose(self): data_relative_keys = [ DensePoseDataRelative.X_KEY, DensePoseDataRelative.Y_KEY, DensePoseDataRelative.I_KEY, DensePoseDataRelative.U_KEY, DensePoseDataRelative.V_KEY, DensePoseDataRelative.S_KEY, ] annotations = [DensePoseDataRelative({k: [0] for k in data_relative_keys})] * n_instances instances.gt_densepose = DensePoseList(annotations, instances.gt_boxes, image_shape) accumulator = ChartBasedAnnotationsAccumulator() accumulator.accumulate(instances) bbox_xywh_est = BoxMode.convert( instances.proposal_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS ) bbox_xywh_gt = BoxMode.convert( instances.gt_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS ) expected_values = { "s_gt": [ torch.zeros((3, DensePoseDataRelative.MASK_SIZE, DensePoseDataRelative.MASK_SIZE)) ] * n_instances, "bbox_xywh_est": bbox_xywh_est.split(1), "bbox_xywh_gt": bbox_xywh_gt.split(1), "point_bbox_with_dp_indices": [torch.tensor([i]) for i in range(n_instances)], "point_bbox_indices": [torch.tensor([i]) for i in range(n_instances)], "bbox_indices": list(range(n_instances)), "nxt_bbox_with_dp_index": n_instances, "nxt_bbox_index": n_instances, } default_value = [torch.tensor([0])] * 3 for key in accumulator.__dict__: to_test = getattr(accumulator, key) gt_value = expected_values.get(key, default_value) if key in ["nxt_bbox_with_dp_index", "nxt_bbox_index"]: self.assertEqual(to_test, gt_value) elif key == "bbox_indices": self.assertListEqual(to_test, gt_value) else: self.assertTrue(torch.allclose(torch.stack(to_test), torch.stack(gt_value)))
Python
0
ddfbdb9f5eea0cac448add25c61c5de3bb583a4d
Add the "github_add_upstream" script
github_add_upstream.py
github_add_upstream.py
#!/usr/bin/env python """ Add the GitHub "parent" repository to the current local repostory as the "upstream" remote. The "parent" GitHub repository is the repository from which the GitHub repo has been forked from. E.g. if your local repository contains this: # Output from `git remote -v` origin git@github.com:sitaktif/foo (fetch) origin git@github.com:sitaktif/foo (push) ...then this script will make it look like this: # Output from `git remote -v` origin git@github.com:sitaktif/foo (fetch) origin git@github.com:sitaktif/foo (push) upstream https://github.com/SirVer/foo (fetch) upstream https://github.com/SirVer/foo (push) Amend the `USER` and `GITHUB_TOKEN` constants accordingly before use. """ from __future__ import print_function import re import subprocess import requests # Mandatory parameters USER = 'your_user' # Change this to your user GITHUB_TOKEN = 'your_api_key' # Generate a key on GitHub # Other parameters ORIGIN_REMOTE = 'origin' UPSTREAM_REMOTE = 'upstream' # To check whether the user seems to own the 'origin' repo # Warning: it cannot really check whether it is a real Github repo or not # (because it could be an arbitrary GitHub Enterprise url) and will fail later # if it's not. RE_USER_OWNS_ORIGIN = re.compile(r'^(?P<remote>%s)\s+' # ORIGIN_REMOTE r'(?P<github_proto>\S+)(?:://|@)' # https:// or git@ r'(?P<github_url>\S+)[:/]' r'(?P<user>%s)/' # USER r'(?P<repo>[^/]+).git\s+' r'\(fetch\)\s*(?:$|\n)' r'' % (ORIGIN_REMOTE, USER)) # Does the repo have 'upstream' in its remotes RE_REMOTE_HAS_UPSTREAM = re.compile(r'(^|\n)%s\s+' % UPSTREAM_REMOTE) # Get information about the 'origin' repository RE_ORIGIN_REMOTE_INFO = re.compile(r'^(?P<remote>%s)\s+' # ORIGIN_REMOTE r'(?P<github_proto>\S+)(?:://|@)' # https:// or git@ r'(?P<github_url>\S+)[:/]' r'(?P<user>%s)/' # USER r'(?P<repo>[^/]+).git\s+' r'\(fetch\)\s*(?:$|\n)' r'' % (ORIGIN_REMOTE, USER)) class GitHub(object): """ GitHub object to abstract calls to the GH API (public and enterprise). """ ACCEPT_HEADER = {'Accept': 'application/vnd.github.v3+json'} @classmethod def _api_url(cls, github_url): if github_url == 'github.com': return 'https://api.github.com' else: return 'https://%s/api/v3' % github_url def _additional_headers(self): headers = self.ACCEPT_HEADER headers.update({'Authorization': 'token %s' % self.token}) return headers def __init__(self, url, user, token): """ Create a GH object (either GitHub or GitHub Enterprise) """ self.url = url self.api_url = self._api_url(self.url) self.user = user self.token = token def _get_foo(self, foo): url = '%s/%s' % (self.api_url, foo) print("Request: %s" % url) return requests.get(url, headers=self._additional_headers()) def get_repo(self, repo, user=None): if user is None: user = self.user return self._get_foo('repos/%s/%s' % (user, repo)) def main(): remote_out = subprocess.check_output('git remote -v', shell=True, universal_newlines=True) if RE_USER_OWNS_ORIGIN.match(remote_out) is None: print("Current repository's '%s' remote does not seem to be owned " "by user '%s'. Skipping." % (UPSTREAM_REMOTE, USER)) exit(0) if RE_REMOTE_HAS_UPSTREAM.search(remote_out): print("Repo already has a '%s' remote. Skipping." % UPSTREAM_REMOTE) exit(0) match = RE_ORIGIN_REMOTE_INFO.search(remote_out) if match is None: print("Could not parse the output of the repo's remote command:") print(remote_out) exit(1) # Get info about the current repo grps = match.groupdict() github_url, user, repo = grps['github_url'], grps['user'], grps['repo'] ghub = GitHub(github_url, user, GITHUB_TOKEN) # Extract the parent url and add the remote repo_res = ghub.get_repo(repo) repo_json = repo_res.json() parent_repo_url = repo_json['parent']['html_url'] subprocess.check_call('git remote add %s %s' % ( UPSTREAM_REMOTE, parent_repo_url), shell=True) if __name__ == '__main__': main()
Python
0.000073
9bc154d662464a0073b8b7cd3bcf39312a4ac1d7
add ifttt notification
ifttt_notification.py
ifttt_notification.py
import requests def Air_alert(): report = {} report["value1"] = "test" report["value2"] = "second" report["value3"] = "third" requests.post( "https://maker.ifttt.com/trigger/Air_Test/with/key/{user_key}".format(user_key=""), data=report) if __name__ == "__main__": Air_alert()
Python
0
407c08899eccea60a2ae534ab0c1b000c58708ab
Implement some tests for AgentAPI
tests/test_agent_api.py
tests/test_agent_api.py
# No shebang line, this module is meant to be imported # # Copyright 2013 Oliver Palmer # Copyright 2013 Ambient Entertainment GmbH & Co. KG # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from utcore import ModelTestCase from pyfarm.master.utility import dumps try: from json import loads except ImportError: from simplejson import loads class TestAgentAPI(ModelTestCase): def test_agents_schema(self): response = self.client.get("/api/v1/agents/schema") self.assert200(response) self.assertEquals(response.json, {"ram": "INTEGER", "free_ram": "INTEGER", "use_address": "INTEGER", "ip": "IPv4Address", "hostname": "VARCHAR(255)", "cpus": "INTEGER", "port": "INTEGER", "state": "INTEGER", "ram_allocation": "FLOAT", "cpu_allocation": "FLOAT", "id": "INTEGER", "remote_ip": "IPv4Address"}) def test_agent_read_write(self): response1 = self.client.post("/api/v1/agents", content_type="application/json", data = dumps({"cpu_allocation": 1.0, "cpus": 16, "free_ram": 133, "hostname": "testagent1", "ip": "10.0.200.1", "port": 64994, "ram": 2048, "ram_allocation": 0.8, "state": 8 })) self.assertStatus(response1, 201) id = loads(response1.data)['id'] response2 = self.client.get("/api/v1/agents/%d" % id) self.assert200(response2) agent_data = loads(response2.data) assert len(agent_data) == 12 assert response2.json == { "ram": 2048, "cpu_allocation": 1.0, "use_address": 22, "ip": "10.0.200.1", "hostname": "testagent1", "cpus": 16, "ram_allocation": 0.8, "port": 64994, "state": 8, "free_ram": 133, "id": id, "remote_ip": None } # TODO Test updating an agent
Python
0.000002
369676cfacd35c7b3321edaef97bf64f063e7d50
Add nephrectomy model
radar/radar/models/nephrectomy.py
radar/radar/models/nephrectomy.py
from sqlalchemy import Column, Integer, ForeignKey, Date, Index from sqlalchemy.orm import relationship from radar.database import db from radar.models.common import MetaModelMixin, IntegerLookupTable NEPHRECTOMY_SIDES = OrderedDict([ ('LEFT', 'Left'), ('RIGHT', 'Right'), ('BILATERAL', 'Bilateral'), ]) NEPHRECTOMY_KIDNEY_TYPES = OrderedDict([ ('TRANSPLANT', 'Transplant'), ('NATURAL', 'Natural'), ]) NEPHRECTOMY_ENTRY_TYPES = OrderedDict([ ('O', 'Open'), ('HA', 'Hand Assisted'), ('TPL', 'Transperitoneal Laparoscopic'), ('RPL', 'Retroperitoneal Laparoscopic'), ]) class Nephrectomy(db.Model, MetaModelMixin): __tablename__ = 'nephrectomy' id = Column(Integer, primary_key=True) patient_id = Column(Integer, ForeignKey('patients.id'), nullable=False) patient = relationship('Patient') data_source_id = Column(Integer, ForeignKey('data_sources.id'), nullable=False) data_source = relationship('DataSource') date = Column(Date, nullable=False) kidney_side = Column(String, nullable=False) kidney_type = Column(String, nullable=False) entry_type = Column(String, nullable=False Index('nephrectomy_patient_id_idx', Dialysis.patient_id)
Python
0.000007
0378a225c5519ad39fee6a132c455e1848151a44
Create run_test.py
recipes/django-braces/run_test.py
recipes/django-braces/run_test.py
import django from django.conf import settings settings.configure(INSTALLED_APPS=['braces', 'django.contrib.contenttypes', 'django.contrib.auth']) django.setup() import braces
Python
0.000004
7e9c90c179df8666a75eef1610dbda764add1408
Create elec_temp_join.py
elec_temp_join.py
elec_temp_join.py
import numpy as np import pandas as pd import geopandas as gpd from geopandas import tools utility = '/home/akagi/Desktop/electricity_data/Electric_Retail_Service_Ter.shp' util = gpd.read_file(utility) urbarea = '/home/akagi/GIS/census/cb_2013_us_ua10_500k/cb_2013_us_ua10_500k.shp' ua = gpd.read_file(urbarea) ua = ua.to_crs(util.crs) j = tools.sjoin(util, ua) grid = '/home/akagi/gridcells.shp' g = gpd.read_file(grid) coords = g.centroid.apply(lambda x: x.coords[0]) coordstr = coords.apply(lambda x: 'data_%s_%s' % (x[1], x[0])) g['coordstr'] = coordstr ua_g = tools.sjoin(ua, g) ua_g['grid_geom'] = ua_g['index_right'].map(g['geometry']) ua_g.apply(lambda x: (x['geometry'].centroid).distance(x['grid_geom'].centroid), axis=1) ua_g = ua_g.reset_index().loc[ua_g.reset_index().groupby('index').idxmin('dist')['FID'].values].set_index('index') j['grid_cell'] = j['index_right'].map(ua_g['coordstr'])
Python
0.000001
e9135583af7a862bd426b4a068743765c4604da3
add test for dials.convert_to_cbf (only works on dls computers)
test/command_line/test_convert_to_cbf.py
test/command_line/test_convert_to_cbf.py
from __future__ import absolute_import, division, print_function import glob import os import pytest import procrunner pytestmark = pytest.mark.skipif( not os.access("/dls/i04/data/2019/cm23004-1/20190109/Eiger", os.R_OK), reason="Test images not available", ) @pytest.mark.parametrize( "master_h5", [ "/dls/i04/data/2019/cm23004-1/20190109/Eiger/gw/Thaum/Thau_4/Thau_4_1_master.h5", "/dls/i04/data/2019/cm23004-1/20190109/Eiger/gw/Thaum/Thau_4/Thau_4_1.nxs", ], ) def test_convert_to_cbf(master_h5): result = procrunner.run(["dials.convert_to_cbf", master_h5]) assert result["exitcode"] == 0 assert result["stderr"] == "" g = glob.glob("as_cbf_*.cbf") assert len(g) == 900 # need a smaller test set!
Python
0
03e9a75d69538649df3e05d40a1c8e7d870fd807
Add gaussian_elimination.py for solving linear systems (#1448)
arithmetic_analysis/gaussian_elimination.py
arithmetic_analysis/gaussian_elimination.py
""" Gaussian elimination method for solving a system of linear equations. Gaussian elimination - https://en.wikipedia.org/wiki/Gaussian_elimination """ import numpy as np def retroactive_resolution(coefficients: np.matrix, vector: np.array) -> np.array: """ This function performs a retroactive linear system resolution for triangular matrix Examples: 2x1 + 2x2 - 1x3 = 5 2x1 + 2x2 = -1 0x1 - 2x2 - 1x3 = -7 0x1 - 2x2 = -1 0x1 + 0x2 + 5x3 = 15 >>> gaussian_elimination([[2, 2, -1], [0, -2, -1], [0, 0, 5]], [[5], [-7], [15]]) array([[2.], [2.], [3.]]) >>> gaussian_elimination([[2, 2], [0, -2]], [[-1], [-1]]) array([[-1. ], [ 0.5]]) """ rows, columns = np.shape(coefficients) x = np.zeros((rows, 1), dtype=float) for row in reversed(range(rows)): sum = 0 for col in range(row + 1, columns): sum += coefficients[row, col] * x[col] x[row, 0] = (vector[row] - sum) / coefficients[row, row] return x def gaussian_elimination(coefficients: np.matrix, vector: np.array) -> np.array: """ This function performs Gaussian elimination method Examples: 1x1 - 4x2 - 2x3 = -2 1x1 + 2x2 = 5 5x1 + 2x2 - 2x3 = -3 5x1 + 2x2 = 5 1x1 - 1x2 + 0x3 = 4 >>> gaussian_elimination([[1, -4, -2], [5, 2, -2], [1, -1, 0]], [[-2], [-3], [4]]) array([[ 2.3 ], [-1.7 ], [ 5.55]]) >>> gaussian_elimination([[1, 2], [5, 2]], [[5], [5]]) array([[0. ], [2.5]]) """ # coefficients must to be a square matrix so we need to check first rows, columns = np.shape(coefficients) if rows != columns: return [] # augmented matrix augmented_mat = np.concatenate((coefficients, vector), axis=1) augmented_mat = augmented_mat.astype("float64") # scale the matrix leaving it triangular for row in range(rows - 1): pivot = augmented_mat[row, row] for col in range(row + 1, columns): factor = augmented_mat[col, row] / pivot augmented_mat[col, :] -= factor * augmented_mat[row, :] x = retroactive_resolution( augmented_mat[:, 0:columns], augmented_mat[:, columns : columns + 1] ) return x if __name__ == "__main__": import doctest doctest.testmod()
Python
0.000001
12731a74be889eff48e0e505de666ef3180794fe
add missing file
rmake/plugins/plugin.py
rmake/plugins/plugin.py
# # Copyright (c) 2006 rPath, Inc. # # This program is distributed under the terms of the Common Public License, # version 1.0. A copy of this license should have been distributed with this # source file in a file called LICENSE. If it is not present, the license # is always available at http://www.opensource.org/licenses/cpl.php. # # This program is distributed in the hope that it will be useful, but # without any warranty; without even the implied warranty of merchantability # or fitness for a particular purpose. See the Common Public License for # full details. # """ Definition of plugins available for rmake plugins. Plugin writers should derive from one of these classes. The plugin will be called with the hooks described here, if the correct program is being run. For example, when running rmake-server, the server hooks will be run. """ from rmake.lib.pluginlib import Plugin TYPE_CLIENT = 0 TYPE_SERVER = 1 TYPE_SUBSCRIBER = 2 class ClientPlugin(Plugin): types = [TYPE_CLIENT] def client_preInit(self, main): """ Called right after plugins have been loaded. """ pass def client_preCommand(self, main, client): """ Called after the command-line client has instantiated, but before the command has been executed. """ pass class ServerPlugin(Plugin): types = [TYPE_SERVER] def server_preConfig(self, main): """ Called before the configuration file has been read in. """ pass def server_preInit(self, main, argv): """ Called before the server has been instantiated. """ pass def server_postInit(self, server): """ Called after the server has been instantiated but before serving is done. """ pass def server_pidDied(self, pid, status): """ Called when the server collects a child process that has died. """ pass def server_loop(self, server): """ Called once per server loop, between requests. """ pass def server_builderInit(self, server, builder): """ Called when the server instantiates a builder for a job. """ pass def server_shutDown(self, server): """ Called when the server is halting. """ pass class SubscriberPlugin(Plugin): types = [TYPE_SUBSCRIBER] protocol = None def subscriber_get(self, uri, name): """ Should return a child of the StatusSubscirber class. """ pass
Python
0.000003
3e0e898d0d3ab494edc5dbc65ccde4020f427be8
Create quiz-eliecer.py
laboratorios/quiz-eliecer.py
laboratorios/quiz-eliecer.py
base=5 altura=7 perimetro=2*5+2*7 print ("mi perimetro es" + str(perimetro)) area=5*7 print ("mi area es" + str (area)) metrop=perimetro/100 print ("mi perimetro en metro es" + str(metrop)) pulgadap=perimetro/2.54 print ("mi perimetro en pulgada es" + str(pulgadap)) metroa=area/100 print ("mi area en metro es" + str(metroa)) pulgadaa=area/2.54 print ("mi area en pulgada es" + str(pulgadaa))
Python
0.000001
ee554d89d1c822537345ce4d03d2bff8783d7f1b
Disable nacl_integration due to #261724.
chrome/test/nacl_test_injection/buildbot_nacl_integration.py
chrome/test/nacl_test_injection/buildbot_nacl_integration.py
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import subprocess import sys def Main(args): if sys.platform == 'darwin': print >> sys.stderr, "SKIPPING NACL INTEGRATION DUE TO BUG #261724." return 0 pwd = os.environ.get('PWD', '') is_integration_bot = 'nacl-chrome' in pwd # This environment variable check mimics what # buildbot_chrome_nacl_stage.py does. is_win64 = (sys.platform in ('win32', 'cygwin') and ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or '64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''))) # On the main Chrome waterfall, we may need to control where the tests are # run. # If there is serious skew in the PPAPI interface that causes all of # the NaCl integration tests to fail, you can uncomment the # following block. (Make sure you comment it out when the issues # are resolved.) *However*, it is much preferred to add tests to # the 'tests_to_disable' list below. #if not is_integration_bot: # return tests_to_disable = [] # In general, you should disable tests inside this conditional. This turns # them off on the main Chrome waterfall, but not on NaCl's integration bots. # This makes it easier to see when things have been fixed NaCl side. if not is_integration_bot: # http://code.google.com/p/nativeclient/issues/detail?id=2511 tests_to_disable.append('run_ppapi_ppb_image_data_browser_test') if sys.platform == 'darwin': # TODO(mseaborn) fix # http://code.google.com/p/nativeclient/issues/detail?id=1835 tests_to_disable.append('run_ppapi_crash_browser_test') if sys.platform in ('win32', 'cygwin'): # This one is only failing for nacl_glibc on x64 Windows # but it is not clear how to disable only that limited case. # See http://crbug.com/132395 tests_to_disable.append('run_inbrowser_test_runner') script_dir = os.path.dirname(os.path.abspath(__file__)) nacl_integration_script = os.path.join(script_dir, 'buildbot_chrome_nacl_stage.py') cmd = [sys.executable, nacl_integration_script, # TODO(ncbray) re-enable. # https://code.google.com/p/chromium/issues/detail?id=133568 '--disable_glibc', '--disable_tests=%s' % ','.join(tests_to_disable)] cmd += args sys.stdout.write('Running %s\n' % ' '.join(cmd)) sys.stdout.flush() return subprocess.call(cmd) if __name__ == '__main__': sys.exit(Main(sys.argv[1:]))
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import subprocess import sys def Main(args): pwd = os.environ.get('PWD', '') is_integration_bot = 'nacl-chrome' in pwd # This environment variable check mimics what # buildbot_chrome_nacl_stage.py does. is_win64 = (sys.platform in ('win32', 'cygwin') and ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or '64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''))) # On the main Chrome waterfall, we may need to control where the tests are # run. # If there is serious skew in the PPAPI interface that causes all of # the NaCl integration tests to fail, you can uncomment the # following block. (Make sure you comment it out when the issues # are resolved.) *However*, it is much preferred to add tests to # the 'tests_to_disable' list below. #if not is_integration_bot: # return tests_to_disable = [] # In general, you should disable tests inside this conditional. This turns # them off on the main Chrome waterfall, but not on NaCl's integration bots. # This makes it easier to see when things have been fixed NaCl side. if not is_integration_bot: # http://code.google.com/p/nativeclient/issues/detail?id=2511 tests_to_disable.append('run_ppapi_ppb_image_data_browser_test') if sys.platform == 'darwin': # TODO(mseaborn) fix # http://code.google.com/p/nativeclient/issues/detail?id=1835 tests_to_disable.append('run_ppapi_crash_browser_test') if sys.platform in ('win32', 'cygwin'): # This one is only failing for nacl_glibc on x64 Windows # but it is not clear how to disable only that limited case. # See http://crbug.com/132395 tests_to_disable.append('run_inbrowser_test_runner') script_dir = os.path.dirname(os.path.abspath(__file__)) nacl_integration_script = os.path.join(script_dir, 'buildbot_chrome_nacl_stage.py') cmd = [sys.executable, nacl_integration_script, # TODO(ncbray) re-enable. # https://code.google.com/p/chromium/issues/detail?id=133568 '--disable_glibc', '--disable_tests=%s' % ','.join(tests_to_disable)] cmd += args sys.stdout.write('Running %s\n' % ' '.join(cmd)) sys.stdout.flush() return subprocess.call(cmd) if __name__ == '__main__': sys.exit(Main(sys.argv[1:]))
Python
0.000002
33448340d278da7e0653701d78cbab317893279d
Add a simple analysis tool to get some structural properties about an AG's specfile.
AG/datasets/analyze.py
AG/datasets/analyze.py
#!/usr/bin/python import os import sys import lxml from lxml import etree import math class StatsCounter(object): prefixes = {} cur_tag = None def start( self, tag, attrib ): self.cur_tag = tag def end( self, tag ): pass #self.cur_tag = None def data( self, _data ): if self.cur_tag != "File" and self.cur_tag != "Dir": return data = _data.rstrip("/") if data == "": return dir_name = os.path.dirname( data ) if dir_name == "": return if not self.prefixes.has_key( dir_name ): self.prefixes[ dir_name ] = 0 self.prefixes[ dir_name ] += 1 def close( self ): return "closed!" if __name__ == "__main__": counter = StatsCounter() parser = etree.XMLParser( target=counter ) fd = open( sys.argv[1], "r" ) while True: buf = fd.read( 32768 ) if len(buf) == 0: break parser.feed( buf ) result = parser.close() order = counter.prefixes.keys() order.sort() size_bins = {} for path in order: count = counter.prefixes[path] print "% 15s %s" % (count, path) size_bin = int(math.log(count, 10)) if not size_bins.has_key( size_bin ): size_bins[ size_bin ] = 1 else: size_bins[ size_bin ] += 1 print "" print "sizes" max_bin = max( size_bins.keys() ) bin_fmt = r"1e%0" + str( int(math.log(max_bin, 10)) + 1 ) + "s" for size in xrange( 0, max_bin + 1 ): binsize = 0 if size_bins.has_key( size ): binsize = size_bins[size] bin_str = bin_fmt % size print "%s %s" % (bin_str, binsize)
Python
0
684779d818f27aa28d6068fa7998e65807fe7ac6
add a class that will group images on a file system by day, month or year
photomanip/grouper.py
photomanip/grouper.py
from collections import defaultdict, OrderedDict from datetime import datetime from pathlib import Path from photomanip import PAD, CROP from photomanip.metadata import ImageExif DATETIME_FMT = "%Y:%m:%d %H:%M:%S" DAILY_DATETIME_FMT = "%Y%m%d" MONTHLY_DATETIME_FMT = "%Y%m" YEARLY_DATETIME_FMT = "%Y" class Grouper: def __init__(self, *args, **kwargs): self.metadata_list = None def _date_extractor(self, keyword_grouper=None): raise NotImplementedError() def _height_width_extractor(self, metadata): raise NotImplementedError() def get_photo_list(self): raise NotImplementedError() def group_by_day(self): raise NotImplementedError() def group_by_month(self): raise NotImplementedError() def group_by_year(self): raise NotImplementedError() def get_common_dimension(self, combination_method): raise NotImplementedError() class FileSystemGrouper(Grouper): def __init__(self, image_directory, grouping_tag=None, grouping_fmt=DAILY_DATETIME_FMT, *args, **kwargs): super().__init__(*args, **kwargs) self.image_folder_path = Path(image_directory) self.exif_reader = ImageExif() self.exif_datetime_key = self.exif_reader.metadata_map['date_created'] self.exif_keywords_key = self.exif_reader.metadata_map['keywords'] self.exif_height_key = self.exif_reader.metadata_map['image_height'] self.exif_width_key = self.exif_reader.metadata_map['image_width'] self.photo_list = self.get_photo_list() self.metadata_list = \ self.exif_reader.get_metadata_batch(self.photo_list) self.datetime_dict = self.build_datetime_dict(grouping_tag, grouping_fmt) def _date_extractor(self, metadata, keyword_grouper=None, grouping_fmt=None): if keyword_grouper: # does the image have keywords? if self.exif_keywords_key in metadata: # is there a match for the grouper in the keywords? matches = self.exif_reader.get_tags_containing( metadata[self.exif_keywords_key], keyword_grouper ) if isinstance(matches, str): # convert to datetime matches = matches.replace(keyword_grouper, '') return datetime.strptime(matches, grouping_fmt) # get the date created from exif and gooooo exif_datetime = metadata[self.exif_datetime_key] return datetime.strptime(exif_datetime, DATETIME_FMT) def _height_width_extractor(self, metadata_list): image_heights = [item[self.exif_height_key] for item in metadata_list] image_widths = [item[self.exif_width_key] for item in metadata_list] return image_heights, image_widths def build_datetime_dict(self, grouping_tag, grouping_fmt): datetime_dict = defaultdict(list) for metadata in self.metadata_list: this_date = self._date_extractor(metadata, keyword_grouper=grouping_tag, grouping_fmt=grouping_fmt) datetime_dict[this_date].append(metadata) datetime_dict = OrderedDict(sorted(datetime_dict.items())) return datetime_dict def get_photo_list(self, extension='.jpg'): """ Gets a list of files with extension `extension` (default '.jpg') in a folder. """ onlyfiles = \ sorted(list(self.image_folder_path.glob(f'**/*{extension}'))) return onlyfiles def group_by_day(self): grouped = defaultdict(list) for date, meta in self.datetime_dict.items(): new_key = datetime(date.year, date.month, date.day) if len(meta) > 1: grouped[new_key].extend(meta) else: grouped[new_key].append(meta[0]) return grouped def group_by_month(self): grouped = defaultdict(list) for date, meta in self.datetime_dict.items(): new_key = datetime(date.year, date.month, 1) if len(meta) > 1: grouped[new_key].extend(meta) else: grouped[new_key].append(meta[0]) return grouped def group_by_year(self): grouped = defaultdict(list) for date, meta in self.datetime_dict.items(): new_key = datetime(date.year, 1, 1) if len(meta) > 1: grouped[new_key].extend(meta) else: grouped[new_key].append(meta[0]) return grouped def get_common_dimension(self, comb_method, metadata_list): """Computes the dimensions of the final output image based on specified combination method and lists of images widths and heights.""" image_heights, image_widths = \ self._height_width_extractor(metadata_list) if comb_method == PAD: max_w = max(image_widths) max_h = max(image_heights) expand_to = max(max_w, max_h) if (expand_to % 2) == 1: expand_to -= 1 return expand_to elif comb_method == CROP: min_w = min(image_widths) min_h = min(image_heights) crop_to = min(min_w, min_h) if (crop_to % 2) == 1: crop_to -= 1 return crop_to else: raise ValueError('invalid value for combination_method') class FlickrGrouper(Grouper): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) raise NotImplementedError()
Python
0.000001
cc8e38b46ee79b08c76b31d173301549722693b9
add examples of deployment object
examples/deployment_examples.py
examples/deployment_examples.py
# Copyright 2016 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from os import path import yaml from kubernetes import client, config DEPLOYMENT_NAME = "nginx-deployment" def create_deployment_object(): # Instantiate an empty deployment object deployment = client.ExtensionsV1beta1Deployment() # Fill required Deployment fields (apiVersion, kind and metadata) deployment.api_version = "extensions/v1beta1" deployment.kind = "Deployment" deployment.metadata = client.V1ObjectMeta(name=DEPLOYMENT_NAME) # Create and configurate a spec section spec = client.ExtensionsV1beta1DeploymentSpec() spec.replicas = 3 spec.template = client.V1PodTemplateSpec() spec.template.metadata = client.V1ObjectMeta(labels={"app": "nginx"}) spec.template.spec = client.V1PodSpec() # Configureate Pod template container container = client.V1Container() container.name = "nginx" container.image = "nginx:1.7.9" contianer.ports = [client.V1containerPort(container_port=80)] spec.template.spec.containers = [container] # Assign spec section into deployment.spec deployment.spec = spec return deployment def create_deployment(api_instance, deployment): # Create deployement api_response = api_instance.create_namespaced_deployment( body=deployment, namespace="default") print("Deployment created. status='%s'" % str(api_response.status)) def update_deployment(api_instance, deployment): # Update container image deployment.container.image = "nginx:1.9.1" # Update the deployment api_response = api_instance.replace_namespaced_deployment( name=DEPLOYMENT_NAME, namespace="default", body=deployment) print("Deployment updated. status='%s'" % str(api_response.status)) def roll_back_deployment(api_instance): # Instanciate an empty DeploymentRollback object rollback = client.ExtensionsV1beta1DeploymentRollback() # Fill required DeploymentRollback fields rollback.api_version = "extensions/v1beta1" rollback.kind = "DeploymentRollback" rollback.name = DEPLOYMENT_NAME # Configurate the rollback rollback.rollback_to = client.ExtensionsV1beta1RollbackConfig() rollback.rollback_to.revision = 0 # Execute the rollback api_response = api_instance.create_namespaced_deployment_rollback( name=DEPLOYMENT_NAME, namespace="default", body=rollback) print("Deployment rolled back. status='%s'" % str(api_response.status)) def delete_deployment(api_instance): # Delete deployment api_response = api_instance.delete_namespaced_deployment( name=DEPLOYMENT_NAME, namespace="default", client.V1DeleteOptions(propagation_policy='Foreground', grace_period_seconds=5)) print("Deployment deleted. status='%s'" % str(api_response.status)) def main(): # Configs can be set in Configuration class directly or using helper # utility. If no argument provided, the config will be loaded from # default location. config.load_kube_config() extensions_v1beta1 = client.ExtensionsV1beta1Api() # Create a deployment object with client-python API. The deployment we # created is same as the `nginx-deployment.yaml` in the /examples folder. deployment = create_deployment_object() create_deployment(extensions_v1beta1, deployment) update_deployment(extensions_v1beta1, deployment) roll_back_deployment(extensions_v1beta1) delete_deployment(extensions_v1beta1) if __name__ == '__main__': main()
Python
0
16165a9387807d54b91873e95a677bfe5d251aba
Add healthcheck module to contrib
kitnirc/contrib/healthcheck.py
kitnirc/contrib/healthcheck.py
import logging import sys import threading import time from kitnirc.modular import Module _log = logging.getLogger(__name__) class HealthcheckModule(Module): """A KitnIRC module which checks connection health. By default, this module will request a PONG response from the server if it hasn't seen any traffic in the past minute, and will assume the connection has dropped and exit the process if it doesn't see any traffic for 90 seconds. These delays can be changed by setting "delay" and "timeout" under the [healthcheck] configuration section. """ def __init__(self): config = self.controller.config if config.has_option("healthcheck", "delay"): self.delay = config.getint("healthcheck", "delay") else: self.delay = 60 if config.has_option("healthcheck", "timeout"): self.timeout = config.getint("healthcheck", "timeout") else: self.timeout = 90 assert self.timeout > self.delay self.last_activity = time.clock() self._stop = False self.thread = threading.Thread(target=self.loop, name='healthcheck') self.thread.daemon = True def start(self, *args, **kwargs): super(HealthcheckModule, self).start(*args, **kwargs) self._stop = False self.thread.start() def stop(self, *args, **kwargs): super(HealthcheckModule, self).stop(*args, **kwargs) self._stop = True # In any normal circumstances, the healthcheck thread should finish # in about a second or less. We'll give it a little extra buffer. self.thread.join(2.0) if self.thread.is_alive(): _log.warning("Healthcheck thread alive 2s after shutdown request.") def loop(self): _log.info("Healthcheck running: delay=%d timeout=%d", self.delay, self.timeout) while not self._stop: elapsed = time.clock() - self.last_activity if elapsed > self.timeout: _log.fatal("No incoming in last %d seconds - exiting.", elapsed) logging.shutdown() sys.exit(1) elif elapsed > self.delay: self.controller.client.send("PING") time.sleep(1) @Module.handle("LINE") def activity(self, line): self.last_activity = time.clock() module = HealthcheckModule # vim: set ts=4 sts=4 sw=4 et:
Python
0
a533cf1eeea7f0126df86b84f54208154685ab32
Implement color brightness and contrast ratio calculations in preparation for improving label handling of text colors
kivymd/theming_dynamic_text.py
kivymd/theming_dynamic_text.py
# -*- coding: utf-8 -*- # Two implementations. The first is based on color brightness obtained from:- # https://www.w3.org/TR/AERT#color-contrast # The second is based on relative luminance calculation for sRGB obtained from:- # https://www.w3.org/TR/2008/REC-WCAG20-20081211/#relativeluminancedef # and contrast ratio calculation obtained from:- # https://www.w3.org/TR/2008/REC-WCAG20-20081211/#contrast-ratiodef # # Preliminary testing suggests color brightness more closely matches the # Material Design spec suggested text colors, but the alternative implementation # is both newer and the current 'correct' recommendation, so is included here # as an option. ''' Implementation of color brightness method ''' def _color_brightness(c): brightness = c[0] * 299 + c[1] * 587 + c[2] * 114 brightness = brightness return brightness def _black_or_white_by_color_brightness(color): if _color_brightness(color) >= 500: return 'black' else: return 'white' ''' Implementation of contrast ratio and relative luminance method ''' def _normalized_channel(c): if c <= 0.03928: return c/12.92 else: return ((c + 0.055) / 1.055) ** 2.4 def _luminance(color): rg = _normalized_channel(color[0]) gg = _normalized_channel(color[1]) bg = _normalized_channel(color[2]) return 0.2126*rg + 0.7152*gg + 0.0722*bg def _black_or_white_by_contrast_ratio(color): l_color = _luminance(color) l_black = 0.0 l_white = 1.0 b_contrast = (l_color + 0.05) / (l_black + 0.05) w_contrast = (l_white + 0.05) / (l_color + 0.05) return 'white' if w_contrast >= b_contrast else 'black' def get_contrast_text_color(color, use_color_brightness=True): if use_color_brightness: c = _black_or_white_by_color_brightness(color) else: c = _black_or_white_by_contrast_ratio(color) if c == 'white': return (1, 1, 1, 1) else: return (0, 0, 0, 1) if __name__ == '__main__': from kivy.utils import get_color_from_hex from kivymd.color_definitions import colors, text_colors for c in ['Red', 'Pink', 'Purple', 'DeepPurple', 'Indigo', 'Blue', 'LightBlue', 'Cyan', 'Teal', 'Green', 'LightGreen', 'Lime', 'Yellow', 'Amber', 'Orange', 'DeepOrange', 'Brown', 'Grey', 'BlueGrey']: print("For the {} color palette:".format(c)) for h in ['50', '100', '200', '300', '400', '500', '600', '700', '800', '900', 'A100', 'A200', 'A400', 'A700']: hex = colors[c].get(h) if hex: col = get_color_from_hex(hex) col_bri = get_contrast_text_color(col) con_rat = get_contrast_text_color(col, use_color_brightness=False) text_color = text_colors[c][h] print(" The {} hue gives {} using color brightness, {} using contrast ratio, and {} from the MD spec" .format(h, col_bri, con_rat, text_color))
Python
0
cad7db237c68139d3f4f7dd691205b207edb0b79
Refactor plugin api code into its own module
confluent/pluginapi.py
confluent/pluginapi.py
# concept here that mapping from the resource tree and arguments go to # specific python class signatures. The intent is to require # plugin authors to come here if they *really* think they need new 'commands' # and hopefully curtail deviation by each plugin author # have to specify a standard place for cfg selection of *which* plugin # as well a standard to map api requests to python funcitons # e.g. <nodeelement>/power/state maps to some plugin HardwareManager.get_power/set_power # selected by hardwaremanagement.method # plugins can advertise a set of names if there is a desire for readable things # exceptions to handle os images # endpoints point to a class... usually, the class should have: # -create # -retrieve # -update # -delete # functions. Console is special and just get's passed through # see API.txt import os import sys pluginmap = {} def load_plugins(): # To know our plugins directory, we get the parent path of 'bin' path=os.path.dirname(os.path.realpath(__file__)) plugindir = os.path.realpath(os.path.join(path,'..','plugins')) sys.path.append(plugindir) plugins = set() #two passes, to avoid adding both py and pyc files for plugin in os.listdir(plugindir): plugin = os.path.splitext(plugin)[0] plugins.add(plugin) for plugin in plugins: if plugin.startswith('.'): continue tmpmod = __import__(plugin) if 'plugin_names' in tmpmod.__dict__: for name in tmpmod.plugin_names: pluginmap[name] = tmpmod else: pluginmap[plugin] = tmpmod nodetree = { '/': ['power/', 'boot/', 'console/', 'attributes/'], '/power/': ['state'], '/boot/': ['device'], '/console/': ['session', 'logging'], } # _ elements are for internal use (e.g. special console scheme) nodeelements = { '_console/session': { 'pluginattrs': ['console.method' ,'hardwaremanagement.method'], }, 'console/session': { 'pluginattrs': ['console.method' ,'hardwaremanagement.method'], }, 'power/state': { 'pluginattrs': ['hardwaremanagement.method'], }, 'boot/device': { 'pluginattrs': ['hardwaremanagement.method'], } } def handle_path(path, operation, configmanager): '''Given a full path request, return an object. The plugins should generally return some sort of iterator. An exception is made for console/session, which should return a class with read(), write(bytes), and close() ''' if (path.startswith("/node/") or path.startswith("/system/") or # single node requests path.startswith("/vm/")): nodeidx = path.find("/",1) + 1 node = path[nodeidx:] node, _, element = path.partition("/") if element not in nodeelements: raise Exception("Invalid element requested") plugroute = nodeelements[element] if 'pluginattrs' in plugroute: nodeattr = configmanager.get_node_attributes( [node], plugroute['pluginattrs']) for attrname in plugroute['pluginattrs']: if attrname in nodeattr: return pluginmap[nodeattr[attrname]].__dict__[operation]( node=(node), operation=operation, configmanager=configmanager)
Python
0
2cef2ff2eb9c415f374de0a2f8a7174fddca8836
add missing session.py file
lib/exabgp/configuration/bgp/session.py
lib/exabgp/configuration/bgp/session.py
# encoding: utf-8 """ session.py Created by Thomas Mangin on 2014-06-22. Copyright (c) 2014-2014 Exa Networks. All rights reserved. """ from exabgp.configuration.engine.registry import Raised from exabgp.configuration.engine.section import Section from exabgp.configuration.engine.parser import asn from exabgp.configuration.engine.parser import ip from exabgp.configuration.engine.parser import holdtime from exabgp.configuration.bgp.capability import syntax_capability from exabgp.configuration.bgp.capability import SectionCapability # ============================================================== syntax_session syntax_session = """\ session <name> { %s } """ % ( '\n\t'.join((_.replace(' <name>','') for _ in syntax_capability.split('\n'))) ) # =============================================================== RaisedSession class RaisedSession (Raised): syntax = syntax_session # ============================================================== SectionSession # class SectionSession (Section): syntax = syntax_session name = 'session' def exit (self,tokeniser): capability = self.get_unamed(tokeniser,'capability') if capability: if 'capability' in self.content: raise RaisedSession(tokeniser,'can not have unamed and named capability in a session') self.content['capability'] = capability if 'capability' not in self.content: raise RaisedSession(tokeniser,'section is missing a capability section') if 'router-id' not in self.content: # 0.0.0.0 is now a invlid router-id so it will be replaced by the bind ip self.content['router-id'] = ip(lambda:'0.0.0.0') if 'hold-time' not in self.content: self.content['hold-time'] = holdtime(lambda:'180') if 'asn-local' not in self.content: raise RaisedSession(tokeniser,'section is missing a local asn') if 'asn-peer' not in self.content: raise RaisedSession(tokeniser,'section is missing a peer asn') def router_id (self,tokeniser): try: self.content['router-id'] = ip(tokeniser) except ValueError,e: raise RaisedSession(tokeniser,'could not parse router-id, %s' % str(e)) def hold_time (self,tokeniser): try: self.content['hold-time'] = holdtime(tokeniser) except ValueError,e: raise RaisedSession(tokeniser,'could not parse hold-time, %s' % str(e)) def local_asn (self,tokeniser): try: self.content['asn-local'] = asn(tokeniser) except ValueError,e: raise RaisedSession(tokeniser,'could not parse local asn, %s' % str(e)) def peer_asn (self,tokeniser): try: self.content['asn-peer'] = asn(tokeniser) except ValueError,e: raise RaisedSession(tokeniser,'could not parse peer asn, %s' % str(e)) def capability (self,tokeniser): section = self.get_section(SectionCapability.name,tokeniser) if section: self.content['capability'] = section else: return False @classmethod def register (cls,registry,location): registry.register_class(cls) registry.register(SectionCapability,location+['capability']) registry.register_hook(cls,'action',location+['capability'],'capability') registry.register_hook(cls,'enter',location,'enter') registry.register_hook(cls,'exit',location,'exit') registry.register_hook(cls,'action',location+['router-id'],'router_id') registry.register_hook(cls,'action',location+['hold-time'],'hold_time') asn = location + ['asn'] registry.register_hook(cls,'enter',asn,'enter_unamed_section') registry.register_hook(cls,'action',asn+['local'],'local_asn') registry.register_hook(cls,'action',asn+['peer'],'peer_asn') registry.register_hook(cls,'exit', asn,'exit_unamed_section')
Python
0.000001
5c8caad82cd43044152171973e2386d655a1fa3a
Add tests for the way "between" works if no limits are set up
tests/test_recurrences_without_limits.py
tests/test_recurrences_without_limits.py
from datetime import datetime from recurrence import Recurrence, Rule import recurrence RULE = Rule( recurrence.DAILY ) PATTERN = Recurrence( rrules=[RULE] ) def test_between_without_dtend_and_dtstart(): occurrences = [ instance for instance in PATTERN.between( datetime(2014, 1, 1, 0, 0, 0), datetime(2014, 1, 4, 0, 0, 0) ) ] # We get back nothing, since dtstart and dtend will have defaulted # to the current time, and January 2014 is in the past. assert occurrences == [] def test_between_with_dtend_and_dtstart_dtend_lower_than_end(): occurrences = [ instance for instance in PATTERN.between( datetime(2014, 1, 1, 0, 0, 0), datetime(2014, 1, 6, 0, 0, 0), dtstart=datetime(2014, 1, 1, 0, 0, 0), dtend=datetime(2014, 1, 4, 0, 0, 0), ) ] assert occurrences == [ datetime(2014, 1, 2, 0, 0, 0), datetime(2014, 1, 3, 0, 0, 0), datetime(2014, 1, 4, 0, 0, 0), ] def test_between_with_dtend_and_dtstart_dtend_higher_than_end(): occurrences = [ instance for instance in PATTERN.between( datetime(2014, 1, 1, 0, 0, 0), datetime(2014, 1, 6, 0, 0, 0), dtstart=datetime(2014, 1, 1, 0, 0, 0), dtend=datetime(2014, 1, 8, 0, 0, 0), ) ] assert occurrences == [ datetime(2014, 1, 2, 0, 0, 0), datetime(2014, 1, 3, 0, 0, 0), datetime(2014, 1, 4, 0, 0, 0), datetime(2014, 1, 5, 0, 0, 0), ] def test_between_with_dtend_and_dtstart_limits_equal_exclusive(): occurrences = [ instance for instance in PATTERN.between( datetime(2014, 1, 1, 0, 0, 0), datetime(2014, 1, 6, 0, 0, 0), dtstart=datetime(2014, 1, 2, 0, 0, 0), dtend=datetime(2014, 1, 6, 0, 0, 0), ) ] assert occurrences == [ datetime(2014, 1, 2, 0, 0, 0), datetime(2014, 1, 3, 0, 0, 0), datetime(2014, 1, 4, 0, 0, 0), datetime(2014, 1, 5, 0, 0, 0), ] def test_between_with_dtend_and_dtstart_limits_equal_inclusive(): occurrences = [ instance for instance in PATTERN.between( datetime(2014, 1, 1, 0, 0, 0), datetime(2014, 1, 6, 0, 0, 0), dtstart=datetime(2014, 1, 1, 0, 0, 0), dtend=datetime(2014, 1, 6, 0, 0, 0), inc=True ) ] assert occurrences == [ datetime(2014, 1, 1, 0, 0, 0), datetime(2014, 1, 2, 0, 0, 0), datetime(2014, 1, 3, 0, 0, 0), datetime(2014, 1, 4, 0, 0, 0), datetime(2014, 1, 5, 0, 0, 0), datetime(2014, 1, 6, 0, 0, 0), ] def test_between_with_dtend_and_dtstart_dtstart_lower_than_start(): occurrences = [ instance for instance in PATTERN.between( datetime(2014, 1, 2, 0, 0, 0), datetime(2014, 1, 6, 0, 0, 0), dtstart=datetime(2014, 1, 1, 0, 0, 0), dtend=datetime(2014, 1, 6, 0, 0, 0), ) ] assert occurrences == [ datetime(2014, 1, 3, 0, 0, 0), datetime(2014, 1, 4, 0, 0, 0), datetime(2014, 1, 5, 0, 0, 0), ] def test_between_with_dtend_and_dtstart_dtstart_higher_than_start(): occurrences = [ instance for instance in PATTERN.between( datetime(2014, 1, 1, 0, 0, 0), datetime(2014, 1, 6, 0, 0, 0), dtstart=datetime(2014, 1, 2, 0, 0, 0), dtend=datetime(2014, 1, 6, 0, 0, 0), ) ] assert occurrences == [ datetime(2014, 1, 2, 0, 0, 0), datetime(2014, 1, 3, 0, 0, 0), datetime(2014, 1, 4, 0, 0, 0), datetime(2014, 1, 5, 0, 0, 0), ]
Python
0.000004
ad7cdd6bdd0e364b1c270d26e102e791b6ea1f4b
Add tests for new dimensionality finder
pymatgen/analysis/tests/test_dimensionality.py
pymatgen/analysis/tests/test_dimensionality.py
from pymatgen.core.structure import Structure from pymatgen.analysis.local_env import CrystalNN from pymatgen.analysis.dimensionality import ( get_dimensionality_gorai, get_dimensionality_cheon, get_dimensionality_larsen, calculate_dimensionality_of_site, get_structure_component_info) from pymatgen.util.testing import PymatgenTest class LarsenDimensionalityTest(PymatgenTest): def setUp(self): cnn = CrystalNN() self.lifepo = cnn.get_bonded_structure(self.get_structure('LiFePO4')) self.graphite = cnn.get_bonded_structure(self.get_structure('Graphite')) self.cscl = cnn.get_bonded_structure(self.get_structure('CsCl')) tricky_structure = Structure( [5.79, 0., 0., 0, 5.79, 0., 0., 0., 5.79], ['B', 'C', 'C', 'C', 'C', 'N', 'N', 'N', 'N', 'Ag'], [[0.0, 0.0, 0.0], [0.842, 0.842, 0.842], [0.158, 0.842, 0.158], [0.158, 0.158, 0.842], [0.842, 0.158, 0.158], [0.726, 0.726, 0.726], [0.274, 0.726, 0.274], [0.274, 0.274, 0.726], [0.726, 0.274, 0.274], [0.5, 0.5, 0.5]]) self.tricky_structure = cnn.get_bonded_structure(tricky_structure) def test_get_dimensionality(self): self.assertEqual(get_dimensionality_larsen(self.lifepo), 3) self.assertEqual(get_dimensionality_larsen(self.graphite), 2) self.assertEqual(get_dimensionality_larsen(self.cscl), 3) def test_tricky_structure(self): """ Test for a tricky structure that other dimensionality finders say is 2D but is actually an interpenetrated 3D structure. """ self.assertEqual(get_dimensionality_larsen(self.tricky_structure), 3) def test_get_structure_component_info(self): # test components are returned correctly with the right keys components = get_structure_component_info(self.tricky_structure) self.assertEqual(len(components), 1) self.assertEqual(components[0]['dimensionality'], 3) self.assertTrue(isinstance(components[0]['structure'], Structure)) self.assertEqual(components[0]['structure'].num_sites, 10) # test 2D structure and get orientation information components = get_structure_component_info( self.graphite, inc_orientation=True) self.assertEqual(len(components), 2) self.assertEqual(components[0]['dimensionality'], 2) self.assertTrue(isinstance(components[0]['structure'], Structure)) self.assertEqual(components[0]['structure'].num_sites, 2) self.assertEqual(components[0]['orientation'], (0, 0, 1)) def test_calculate_dimensionality_of_site(self): dimen = calculate_dimensionality_of_site(self.tricky_structure, 0) self.assertEqual(dimen, 3) # test vertices returned correctly dimen, vertices = calculate_dimensionality_of_site( self.cscl, 0, inc_vertices=True) self.assertEqual(dimen, 3) self.assertEqual(len(vertices), 4) self.assertEqual(vertices[0], (-1, 1, 0)) class CheonDimensionalityTest(PymatgenTest): def test_get_dimensionality(self): s = self.get_structure('LiFePO4') self.assertEqual(get_dimensionality_cheon(s), 'intercalated ion') s = self.get_structure('Graphite') self.assertEqual(get_dimensionality_cheon(s), '2D') def test_get_dimensionality_with_bonds(self): s = self.get_structure('CsCl') self.assertEqual(get_dimensionality_cheon(s), 'intercalated ion') self.assertEqual( get_dimensionality_cheon(s, ldict={"Cs": 3.7, "Cl": 3}), '3D') def test_tricky_structure(self): tricky_structure = Structure( [5.79, 0., 0., 0, 5.79, 0., 0., 0., 5.79], ['B', 'C', 'C', 'C', 'C', 'N', 'N', 'N', 'N', 'Ag'], [[0.0, 0.0, 0.0], [0.842, 0.842, 0.842], [0.158, 0.842, 0.158], [0.158, 0.158, 0.842], [0.842, 0.158, 0.158], [0.726, 0.726, 0.726], [0.274, 0.726, 0.274], [0.274, 0.274, 0.726], [0.726, 0.274, 0.274], [0.5, 0.5, 0.5]]) # cheon dimensionality gets wrong structure self.assertEqual(get_dimensionality_cheon(tricky_structure), '2D') class GoraiDimensionalityTest(PymatgenTest): def test_get_dimensionality(self): s = self.get_structure('LiFePO4') self.assertEqual(get_dimensionality_gorai(s), 3) s = self.get_structure('Graphite') self.assertEqual(get_dimensionality_gorai(s), 2) def test_get_dimensionality_with_bonds(self): s = self.get_structure('CsCl') self.assertEqual(get_dimensionality_gorai(s), 1) self.assertEqual(get_dimensionality_gorai(s, bonds={("Cs", "Cl"): 3.7}), 3)
Python
0
a9ca7f2f22551256213ecd32047022048c72db5c
Add Python 3 Script for Converting Image Types
scripts/convert_svgs.py
scripts/convert_svgs.py
import cairosvg import os # MUST RUN IN PYTHON 3 and pip install cairosvg file_dir = '../data/hough_test/Test_Set_1/' svgs = os.listdir(os.path.join(file_dir, 'SVGs')) for svg in svgs: name = svg.split('.svg')[0] cairosvg.svg2png(url=os.path.join(file_dir, 'SVGs', svg), write_to=os.path.join(file_dir, 'PNGs', '{0}.png'.format(name)), dpi=600) # cairosvg.svg2pdf(url=os.path.join(file_dir, 'SVGs', svg), # write_to=os.path.join(file_dir, 'PDFs', '{0}.pdf'.format(name)), dpi=600)
Python
0
392be3310efc812686c0d43f7ca884d9c730a879
Add stop-by-time script to end simulation after a specified amount of simulated time
scripts/stop-by-time.py
scripts/stop-by-time.py
# End ROI after x nanoseconds # Usage: -s stop-by-time:1000000 # End after 1 ms of simulated time import sim class StopByTime: def setup(self, args): args = dict(enumerate((args or '').split(':'))) self.time = long(args.get(0, 1e6)) self.done = False sim.util.Every(self.time * sim.util.Time.NS, self.periodic, roi_only = True) def periodic(self, time, time_delta): if self.done: return elif time >= self.time: print '[STOPBYTIME] Ending ROI after %.0f nanoseconds' % (time / 1e6) sim.control.set_roi(False) self.done = True sim.control.abort() sim.util.register(StopByTime())
Python
0
df7155dad442d079d792b69206a9760f980454fd
Add tests for prob.continuous
metabench/tests/test_prob_continuous.py
metabench/tests/test_prob_continuous.py
import pytest import metabench as mb def _perform_continuous_problem_test(cp): s = cp.generate_solution() cp.evaluate(s) for n, m in cp.get_neighbors(s, 0.5): cp.evaluate(n, m) def test_ackleys(): cp = mb.prob.Ackleys(10) _perform_continuous_problem_test(cp) def test_bukin6(): cp = mb.prob.Bukin6() _perform_continuous_problem_test(cp) def test_crossintray(): cp = mb.prob.CrossInTray() _perform_continuous_problem_test(cp) def test_drop_wave(): cp = mb.prob.DropWave() _perform_continuous_problem_test(cp) def test_eggholder(): cp = mb.prob.Eggholder() _perform_continuous_problem_test(cp) def test_gramacy_lee(): cp = mb.prob.GramacyLee() _perform_continuous_problem_test(cp) def test_griewank(): cp = mb.prob.Griewank(10) _perform_continuous_problem_test(cp) def test_holder_table(): cp = mb.prob.HolderTable() _perform_continuous_problem_test(cp) def test_langermann(): cp = mb.prob.Langermann() _perform_continuous_problem_test(cp) def test_levy(): cp = mb.prob.Levy(10) _perform_continuous_problem_test(cp) def test_levy13(): cp = mb.prob.Levy13() _perform_continuous_problem_test(cp) def test_rastrigin(): cp = mb.prob.Rastrigin(10) _perform_continuous_problem_test(cp) def test_schaffer2(): cp = mb.prob.Schaffer2() _perform_continuous_problem_test(cp) def test_schaffer4(): cp = mb.prob.Schaffer4() _perform_continuous_problem_test(cp) def test_schwefel(): cp = mb.prob.Schwefel(10) _perform_continuous_problem_test(cp) def test_shubert(): cp = mb.prob.Shubert() _perform_continuous_problem_test(cp) def test_bohachevsky(): cp = mb.prob.Bohachevsky(1) _perform_continuous_problem_test(cp) cp = mb.prob.Bohachevsky(2) _perform_continuous_problem_test(cp) cp = mb.prob.Bohachevsky(3) _perform_continuous_problem_test(cp) with pytest.raises(ValueError): mb.prob.Bohachevsky(4) def test_perm0(): cp = mb.prob.Perm0(10, 0.7) _perform_continuous_problem_test(cp) cp = mb.prob.Perm0(10, -23.) _perform_continuous_problem_test(cp) def test_rotated_hyper_ellipsoid(): cp = mb.prob.RotatedHyperEllipsoid(10) _perform_continuous_problem_test(cp) def test_sphere(): cp = mb.prob.Sphere(10) _perform_continuous_problem_test(cp) def test_sum_diff_power(): cp = mb.prob.SumDiffPower(10) _perform_continuous_problem_test(cp) def test_sum_square(): cp = mb.prob.SumSquare(10) _perform_continuous_problem_test(cp) def test_trid(): cp = mb.prob.Trid(10) _perform_continuous_problem_test(cp) def test_booth(): cp = mb.prob.Booth() _perform_continuous_problem_test(cp) def test_matyas(): cp = mb.prob.Matyas() _perform_continuous_problem_test(cp) def test_mc_cormick(): cp = mb.prob.McCormick() _perform_continuous_problem_test(cp) def test_power_sum(): cp = mb.prob.PowerSum() _perform_continuous_problem_test(cp) def test_zahkarov(): cp = mb.prob.Zakharov(10) _perform_continuous_problem_test(cp) def test_three_hump_camel(): cp = mb.prob.ThreeHumpCamel() _perform_continuous_problem_test(cp) def test_six_hump_camel(): cp = mb.prob.SixHumpCamel() _perform_continuous_problem_test(cp) def test_dixon_price(): cp = mb.prob.DixonPrice(10) _perform_continuous_problem_test(cp) def test_rosenbrock(): cp = mb.prob.Rosenbrock(10) _perform_continuous_problem_test(cp) def test_dejong5(): cp = mb.prob.DeJong5() _perform_continuous_problem_test(cp) def test_easom(): cp = mb.prob.Easom() _perform_continuous_problem_test(cp) def test_michalewicz(): cp = mb.prob.Michalewicz(10) _perform_continuous_problem_test(cp) def test_beale(): cp = mb.prob.Beale() _perform_continuous_problem_test(cp) def test_branin(): cp = mb.prob.Branin() _perform_continuous_problem_test(cp) def test_colville(): cp = mb.prob.Colville() _perform_continuous_problem_test(cp) def test_forrester(): cp = mb.prob.Forrester() _perform_continuous_problem_test(cp) def test_goldstein_price(): cp = mb.prob.GoldsteinPrice(False) _perform_continuous_problem_test(cp) cp = mb.prob.GoldsteinPrice(True) _perform_continuous_problem_test(cp) def test_hartmann3d(): cp = mb.prob.Hartmann3D() _perform_continuous_problem_test(cp) def test_hartmann6d(): cp = mb.prob.Hartmann6D(False) _perform_continuous_problem_test(cp) cp = mb.prob.Hartmann6D(True) _perform_continuous_problem_test(cp) def test_perm(): cp = mb.prob.Perm(10, 0.7) _perform_continuous_problem_test(cp)
Python
0.000004
d195d67fe3e9c3e12bb978bfaa98276e8f9f7140
allow loading ctx from expression
script_runner/ctx_server.py
script_runner/ctx_server.py
######### # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. import argparse import os import sys import importlib import re from ctx_proxy import UnixCtxProxy class CtxProxyServer(object): def __init__(self, ctx, socket_path=None): self.ctx = ctx self.proxy = UnixCtxProxy(ctx, socket_path) self.stopped = False def close(self): self.proxy.close() def stop(self): self.stopped = True def serve(self): while not self.stopped: try: self.proxy.poll_and_process(timeout=0.1) except RuntimeError, e: print 'ignoring: {}'.format(e) def parse_args(args=None): parser = argparse.ArgumentParser() parser.add_argument('-s', '--socket-path', default=None) parser.add_argument('-e', '--expression', default=None) parser.add_argument('-p', '--module-path', default=None) return parser.parse_args(args) def load_ctx_from_module_path(module_path): module_dir = os.path.dirname(module_path) if module_dir not in sys.path: sys.path.append(module_dir) ctx_module = importlib.import_module( os.path.basename(os.path.splitext(module_path)[0])) ctx_module = reload(ctx_module) return getattr(ctx_module, 'ctx') # impl taken from 'pythonpy' package def load_ctx_from_expression(expression, prefix=''): regex = r"({}[a-zA-Z_][a-zA-Z0-9_]*)\.?".format(prefix) matches = set(re.findall(regex, expression)) for module_name in matches: try: module = importlib.import_module(module_name) globals()[module_name] = module load_ctx_from_expression(expression, prefix='{}.'.format(module_name)) except ImportError as e: pass if not prefix: return eval(expression) def load_ctx(load_ctx_function, **kwargs): ctx = load_ctx_function() if callable(ctx): ctx = ctx(**kwargs) return ctx def admin_function(ctx_server, load_ctx_function): def admin(action, **kwargs): if action == 'load': ctx = load_ctx(load_ctx_function, **kwargs) ctx._admin_ = admin_function(ctx_server, load_ctx_function) ctx_server.proxy.ctx = ctx elif action == 'stop': ctx_server.stop() else: raise RuntimeError('unknown action: {}'.format(action)) return admin def main(): args = parse_args() if (args.module_path and args.expression) or not \ (args.module_path or args.expression): sys.exit('ctx-server: error: use either --module-path or --expression') if args.module_path: def load_ctx_function(): return load_ctx_from_module_path(args.module_path) else: def load_ctx_function(): return load_ctx_from_expression(args.expression) ctx = load_ctx(load_ctx_function) server = CtxProxyServer(ctx, args.socket_path) ctx._admin_ = admin_function(server, load_ctx_function) print server.proxy.socket_url server.serve() if __name__ == '__main__': main()
######### # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. import argparse import os import sys import importlib from ctx_proxy import UnixCtxProxy class CtxProxyServer(object): def __init__(self, ctx, socket_path=None): self.ctx = ctx self.proxy = UnixCtxProxy(ctx, socket_path) self.stopped = False def close(self): self.proxy.close() def stop(self): self.stopped = True def serve(self): while not self.stopped: try: self.proxy.poll_and_process(timeout=0.1) except RuntimeError, e: print 'ignoring: {}'.format(e) def parse_args(args=None): parser = argparse.ArgumentParser() parser.add_argument('-s', '--socket-path', default=None) parser.add_argument('module_path') return parser.parse_args(args) def load_ctx(module_path, **kwargs): module_dir = os.path.dirname(module_path) if module_dir not in sys.path: sys.path.append(module_dir) ctx_module = importlib.import_module( os.path.basename(os.path.splitext(module_path)[0])) ctx_module = reload(ctx_module) ctx = getattr(ctx_module, 'ctx') if callable(ctx): ctx = ctx(**kwargs) return ctx def admin_function(ctx_server, module_path): def admin(action, **kwargs): if action == 'load': ctx = load_ctx(module_path, **kwargs) ctx._admin_ = admin_function(ctx_server, module_path) ctx_server.proxy.ctx = ctx elif action == 'stop': ctx_server.stop() else: raise RuntimeError('unknown action: {}'.format(action)) return admin def main(): args = parse_args() ctx = load_ctx(args.module_path) server = CtxProxyServer(ctx, args.socket_path) ctx._admin_ = admin_function(server, args.module_path) print server.proxy.socket_url server.serve() if __name__ == '__main__': main()
Python
0
30a0b17d028f279a9877150ac4eb60b1ce135fa2
Add check script for MultiplyHueAndSaturation
checks/check_multiply_hue_and_saturation.py
checks/check_multiply_hue_and_saturation.py
from __future__ import print_function, division import numpy as np import imgaug as ia from imgaug import augmenters as iaa def main(): image = ia.quokka_square((128, 128)) images_aug = [] for mul in np.linspace(0.0, 2.0, 10): aug = iaa.MultiplyHueAndSaturation(mul) image_aug = aug.augment_image(image) images_aug.append(image_aug) for mul_hue in np.linspace(0.0, 5.0, 10): aug = iaa.MultiplyHueAndSaturation(mul_hue=mul_hue) image_aug = aug.augment_image(image) images_aug.append(image_aug) for mul_saturation in np.linspace(0.0, 5.0, 10): aug = iaa.MultiplyHueAndSaturation(mul_saturation=mul_saturation) image_aug = aug.augment_image(image) images_aug.append(image_aug) ia.imshow(ia.draw_grid(images_aug, rows=3)) images_aug = [] images_aug.extend(iaa.MultiplyHue().augment_images([image] * 10)) images_aug.extend(iaa.MultiplySaturation().augment_images([image] * 10)) ia.imshow(ia.draw_grid(images_aug, rows=2)) if __name__ == "__main__": main()
Python
0
45ec6d5451062c8e3310332c0246eebb231233ef
Add script
scripts/migrate_duplicate_external_accounts.py
scripts/migrate_duplicate_external_accounts.py
from __future__ import absolute_import import logging import sys from dropbox.rest import ErrorResponse from dropbox.client import DropboxClient from framework.mongo import database as db from framework.transactions.context import TokuTransaction from website.app import init_app from website.addons.github.api import GitHubClient from website.addons.github.exceptions import GitHubError from website.oauth.models import ExternalAccount from scripts import utils as script_utils logger = logging.getLogger(__name__) def creds_are_valid(ea_id): ea = ExternalAccount.load(ea_id) if ea.provider == 'github': try: GitHubClient(external_account=ea).user() except (GitHubError, IndexError): logger.info('Invalid creds: {}'.format(ea_id)) return False elif ea.provider == 'dropbox': try: DropboxClient(ea.oauth_key).account_info() except (ValueError, IndexError, ErrorResponse): logger.info('Invalid creds: {}'.format(ea_id)) return False else: raise Exception('Unexpected provider: {}'.format(ea.provider)) logger.info('Valid creds: {}'.format(ea_id)) return True def swap_references_and_rm(to_keep, to_swap, provider): logger.info('Swapping {} references to {} with {}'.format(provider, to_swap, to_keep)) db['{}nodesettings'.format(provider)].find_and_modify( {'external_account': to_swap}, {'$set': { 'external_account': to_keep }} ) us_map = {us['_id']: us['external_accounts'] for us in db['{}usersettings'.format(provider)].find({'external_accounts': to_swap})} for usid in us_map.keys(): ealist = us_map[usid] ealist.remove(to_swap) ealist.append(to_keep) db['{}usersettings'.format(provider)].find_and_modify( {'_id': usid}, {'$set': { 'external_accounts': ealist }} ) u_map = {u['_id']: u['external_accounts'] for u in db['user'].find({'external_accounts': to_swap})} for uid in u_map.keys(): ealist = u_map[uid] ealist.remove(to_swap) ealist.append(to_keep) db['user'].find_and_modify( {'_id': uid}, {'$set': { 'external_accounts': ealist }} ) logger.info('Removing EA {}'.format(to_swap)) db.externalaccount.remove({'_id': to_swap}) def migrate(): possible_collisions = db.externalaccount.aggregate([{'$match': {'provider_id': {'$type': 16}}}])['result'] pc_map = {'dropbox': [], 'github': [], 'figshare': []} for pc in possible_collisions: pc_map[pc['provider']].append(pc) collisions = [] for provider in pc_map: for pc in pc_map[provider]: if db.externalaccount.find({'provider': provider, 'provider_id': str(pc['provider_id'])}).count(): collisions.append([provider, pc['_id'], db.externalaccount.find({'provider': provider, 'provider_id': str(pc['provider_id'])})[0]['_id']]) ns_map = {'github': db.githubnodesettings, 'dropbox': db.dropboxnodesettings} eas_no_ns = [] problem_ids = [] for cols in collisions: provider = cols[0] int_ea = cols[1] str_ea = cols[2] if ns_map[provider].find({'external_account': int_ea}).count() == 0: eas_no_ns.append(int_ea) swap_references_and_rm(str_ea, int_ea, provider) elif ns_map[provider].find({'external_account': str_ea}).count() == 0: eas_no_ns.append(str_ea) swap_references_and_rm(int_ea, str_ea, provider) else: problem_ids.append([int_ea, str_ea]) if creds_are_valid(int_ea) and not creds_are_valid(str_ea): swap_references_and_rm(int_ea, str_ea, provider) else: swap_references_and_rm(str_ea, int_ea, provider) def main(): dry = '--dry' in sys.argv script_utils.add_file_logger(logger, __file__) init_app(set_backends=True, routes=False) with TokuTransaction(): migrate() if dry: raise RuntimeError('Dry run -- Transaction rolled back') if __name__ == '__main__': main()
Python
0.000002
fb18da6685948de4e66c310af8b850739034c1e9
Add move.py
move.py
move.py
import Tkinter as tk import os class Application(tk.Frame): """ The Application class manages the GUI. The create_widgets() method creates the widgets to be inserted in the frame. A button triggers an event that calls the perform_move() method. This function calls the recursive method move_directories(). """ def __init__(self, master): tk.Frame.__init__(self, master) self.grid() self.create_widgets() def create_widgets(self): self.intro = tk.Label(self, text = "Do you want to move all files in one folder to another folder ?") self.intro.grid(row = 0, column = 0, columnspan = 3, sticky = tk.W) self.src_label = tk.Label(self, text = "Enter the complete path of the source (with single backslash b/w directories) ") self.src_label.grid(row = 2, column = 0, columnspan = 3, sticky = tk.W) self.src_entry = tk.Entry(self, width = 50) self.src_entry.grid(row = 3, column = 1, sticky = tk.W) self.dest_label = tk.Label(self, text = "Enter the complete path of the (already existing, but empty) destination (with single backslash b/w directories) ") self.dest_label.grid(row = 5, column = 0, columnspan = 3, sticky = tk.W) self.dest_entry = tk.Entry(self, width = 50) self.dest_entry.grid(row = 6, column = 1, sticky = tk.W) self.move_button = tk.Button(self, text = "MOVE IT !", command = self.perform_move) self.move_button.grid(row = 8, column = 1, sticky = tk.W) self.text = tk.Text(self, width = 50, height = 10, wrap = tk.WORD) self.text.grid(row = 12, column = 0, columnspan = 2, sticky = tk.W) def perform_move(self): msg = "" try: src = self.src_entry.get() dest = self.dest_entry.get() walker = os.walk(src) root, dirs, files = next(walker) for filename in files: old = os.path.join(root, filename) new = os.path.join(dest, filename) os.rename(old, new) for directory in dirs: self.move_directories(src, directory, dest) msg = "Done !!" except WindowsError: msg = "Give an existing empty directory for the destination." except StopIteration: msg = "Give a valid directory for the source." finally: self.text.delete(0.0, tk.END) self.text.insert(0.0, msg) def move_directories(self, src, directory, dest): src = os.path.join(src, directory) dest = os.path.join(dest, directory) os.mkdir(dest) walker = os.walk(src) root, dirs, files = next(walker) for filename in files: old = os.path.join(src, filename) new = os.path.join(dest, filename) os.rename(old, new) for directory in dirs: self.move_directories(src, directory, dest) os.rmdir(src) # To create the frame and create the Application class object def main(): root = tk.Tk() root.title("Packers & Movers") root.geometry("700x500") app = Application(root) root.mainloop() if __name__ == '__main__': main()
Python
0.000003
769036ffd7a21477a9133c58b352711d85c7a7a0
add regression test for monkey patching of Queue
test/test_queue_monkeypatch.py
test/test_queue_monkeypatch.py
from __future__ import absolute_import import unittest import urllib3 from urllib3.exceptions import EmptyPoolError import Queue class BadError(Exception): """ This should not be raised. """ pass Queue.Empty = BadError class TestConnectionPool(unittest.TestCase): """ """ def test_queue_monkeypatching(self): http = urllib3.HTTPConnectionPool(host="localhost", block=True) first_conn = http._get_conn(timeout=1) with self.assertRaises(EmptyPoolError): second_conn = http._get_conn(timeout=1) if __name__ == '__main__': unittest.main()
Python
0
338559737e34ca395cec895ac8e822fc3147c7aa
Add basic code
tule.py
tule.py
def calcLength(x,y,z=0): return (x**2 + y**2 + z**2)**0.5 #lengths are in feet L = 92; W=68; H=22; MidXY=(W/2,L/2) def feetToYards(inFeet): return inFeet/3.0 def yardsToFeet(inYards): return inYards * 3.0 #widthOfStrand is how wide the tule piece (in feet) def findTotal(widthOfStrand,z=0,printTotal=False): ''' Find total in yards. Input: widthOfStrand (number of feet, width of tule) z=0 (how many feet it will "drape" down linearly) printTotal=False (Friendly print) Output: tuple -> The length needed (in yards), list of strand lengths (in yards) ''' #Length of each break points strandLengths = [] #Total length total = 0 i=0 #find along width alongWidth = 0 while(alongWidth <= W): newX,newY = (MidXY[0] - alongWidth,MidXY[1]-L) total += calcLength(newX,newY,z) alongWidth += widthOfStrand # print "width on %d: %f %f" %(i,alongWidth,total); i+=1 #find along length, around gym alongLength = 0; i=0 while(alongLength <= L): newX,newY = (MidXY[0] - W,MidXY[1]- alongLength) # Length of strand needed (in yards) strandLength = calcLength(newX,newY,z) # Add Break point length strandLengths.append(strandLength) # Total length total += strandLength alongLength += widthOfStrand # print "length on %d: %f %f" %(i,alongLength,total); i+=1 #convert to yards total = feetToYards(total) strandLengths = map(feetToYards,strandLengths) #all the strand lengths strandLengths *=2 if printTotal: print '\nTotal Length For Room: %.2f yards' %(2*total) # Return total length in yards and a list of strand lengths needed return (2*total , strandLengths) def totalCost(costPerYard,widthOfStrandInFeet,drapingInFeet,printTotal=False): total = findTotal(widthOfStrandInFeet,drapingInFeet,printTotal) cost = total * costPerYard print "Total length %.2f yards for $%.2f (@ $%.2f per yard)" %(total,cost,costPerYard) return cost print "Imported 'tule.py'"
Python
0
242479ace03928b20dc86806f7592ec1148b615b
Add integration test for DraftService.
service/test/integration/test_draft_service.py
service/test/integration/test_draft_service.py
# # Copyright (c) 2014 ThoughtWorks, Inc. # # Pixelated is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Pixelated is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Pixelated. If not, see <http://www.gnu.org/licenses/>. from twisted.internet import defer from test.support.integration import SoledadTestBase, MailBuilder class DraftServiceTest(SoledadTestBase): @defer.inlineCallbacks def test_store_and_load_draft(self): input_mail = MailBuilder().with_body('some test text').build_input_mail() stored_draft = yield self.draft_service.create_draft(input_mail) draft = yield self.mail_store.get_mail(stored_draft.ident, include_body=True) self.assertEqual('some test text', draft.body)
Python
0
c58be8c77fdad5ec8b6c9da9ba6cfc45ae0f6d07
fix typo in fuzz_addresses.py
fuzz-addresses.py
fuzz-addresses.py
import sys import csv from dateutil.parser import parse from datetime import datetime #Take a csv with datetime stamps and addresses split across remaing cells and output #a fuzzed csv that contains two columns. A datetime stamp of the first day of the year #in which the location occured, and the country. #call it with python fuzz_addresses.py inputfile.csv outputfile.csv #open the CSV output file inputfile = open((sys.argv[1]), "r") reader = csv.reader(inputfile) #open the CSV output file outputfile = open((sys.argv[2]), "wb") writer = csv.writer(outputfile) for row in reader: #for the date, which is the first column, strip it down to the year, #and make everything January 1 if you don't care about fuzzing the date, #change the line below to when=row[0] when=parse(str( parse(row[0]).year ) +'-1-1') #for the address, get rid of everything but the country which is the last column that has an entry country='' while country=='': country = row.pop(-1) where=country #write the new row writer.writerow([when, where]) #close the files inputfile.close() outputfile.close()
Python
0.998582
0ff705b6bbe2d2844d6b947ca2aa8fc9cc9ead66
Create PedidoDeletar.py
backend/Models/Grau/PedidoDeletar.py
backend/Models/Grau/PedidoDeletar.py
from Framework.Pedido import Pedido from Framework.ErroNoHTTP import ErroNoHTTP class PedidoDeletar(Pedido): def __init__(self,variaveis_do_ambiente): super(PedidoDeletar, self).__init__(variaveis_do_ambiente) try: self.id = self.corpo['id'] except: raise ErroNoHTTP(400) def getId(self): return self.id
Python
0
d85442d5961602ae91c385a65e9503c409316b3f
Scrub stale data from redis
bin/scrub_stale_lists.py
bin/scrub_stale_lists.py
#!/usr/bin/env python import sys import os import time import redis import requests import logging from urlparse import urlparse from datetime import timedelta def main(rds): pf = "coalesce.v1." tasks_removed = 0 lists_removed = 0 list_keys = rds.smembers(pf + "list_keys") for key in list_keys: logging.debug("Inspecting list: " + pf + key) coalesce_list = rds.lrange(pf + "lists." + key, start=0, end=-1) for taskId in coalesce_list: logging.debug(" - inspecting task: " + taskId) if not is_pending(taskId): logging.debug("Removing stale task: " + taskId) rds.lrem(pf + 'lists.' + key, taskId, num=0) tasks_removed += 1 if not rds.llen(pf + "lists." + key): logging.debug("Removing stale list key: " + key) rds.srem(pf + "list_keys", key) lists_removed += 1 return tasks_removed, lists_removed def is_pending(taskId): url = 'https://queue.taskcluster.net/v1/task/%s/status' % (taskId) try: r = requests.get(url, timeout=3) if r.status_code == 404: logging.debug("Queue service returned 404 for task: " + taskId) return False if not r.json()['status']['state'] == 'pending': return False except: logging.debug("Failed to get status") return True if __name__ == '__main__': logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.DEBUG) try: redis_url = urlparse(os.environ['REDIS_URL']) except KeyError: logging.exception("Missing REDIS_URL env variable") sys.exit(1) rds = redis.Redis(host=redis_url.hostname, port=redis_url.port, password=redis_url.password) try: start = time.time() logging.info("Starting scrub task") tasks_removed, lists_removed = main(rds) elapsed = time.time() - start logging.info("Completed scrub task in %s" % (str(timedelta(seconds=elapsed)))) logging.info("Removed %s lists and %s tasks" % (tasks_removed, lists_removed)) except Exception: logging.exception("Fatal error in main loop")
Python
0.000001
b2b81ac5222a5be7a310c8ef7774f9e2887b412e
optimise lags implemented
optimiseLDA.py
optimiseLDA.py
import cPickle, string, numpy, getopt, sys, random, time, re, pprint import numpy as np import glob from gensim import corpora, models import pickle import nltk from nltk.stem.wordnet import WordNetLemmatizer from nltk.corpus import wordnet as wn import logging import matplotlib.pyplot as plt import copy from matplotlib.backends.backend_pdf import PdfPages import os #Andrew O'Harney 17/03/14 #Script to perform grid search of vocab size and number of topics on corpus #Will save each generated gensim topic model and perplexity matrix #Metric used for model performance is 90/10 train/test split of perplexity #Full credit for the difficult part of this (LDA classification goes to Radim Řehůřek for #the excellent gensim toolkit http://radimrehurek.com/gensim/) #Turn on gensim logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) dictionaryName = #Save path for dictionary (gensim encoded) vocabName = #Save name for complete vocab (plain text of vocab) corpName = #Save path for corpus (gensim encoded) documentsName= #Save path for corpus (plain text) stopsPath = #Path to list of stop words ####################################################### #Useless stop words to be removed (contain no topic information) stops = [re.findall(r"[\w']+",word.lower()) for word in open(stopsPath).readlines()] stemmer = WordNetLemmatizer() fnames = [] #Train LDA algorithm from vocab of SUN dataset all_obj = '/home/andy/workspace/ecog/Data/cm/sun.db/object_labels.txt' documents = [re.findall(r"[\w']+",word) for word in open(all_obj).readlines()] #Documents/images #Stem words documents=[list(set([stemmer.lemmatize(word,'n') for word in document if [word] not in stops])) for document in documents] #Get frequency counts of each word freqCount = {} for document in documents: for word in document: freqCount[word] = freqCount.get(word,0)+1 random.shuffle(documents) print 'Creating Vocab' dictionary = corpora.Dictionary(documents) print 'Number of unique objects %d'%len(dictionary) dictionary.save(fname=dictionaryName) # store the dictionary, for future reference # print 'Creating training corpus' print 'Total number of images: %d'%len(documents) pickle.dump(documents,open(documentsName,'w')) corp = [dictionary.doc2bow(doc) for doc in documents] corpora.MmCorpus.serialize(corpName, corp) ###################################### #Train and test corp = list(corp) perplexityName = #Save path for perplexity matrix nwords = np.arange(200,900,100) #Range of vocab lengths to perform grid search ntopics = np.arange(10,32,2) #Range of topic lengths to perform grid search perplexity = np.zeros([len(ntopics),len(nwords)]) testSize = 0.9 #fraction of total documents to train on resultsRoot = #Output folder paths for each of the lda classifiers for i,num_words in enumerate(nwords): # #Use only num_words number of words in documents print 'Creating training corpus' t_dict = copy.copy(dictionary) t_dict.filter_extremes(no_below=0.001*len(documents),no_above=1,keep_n=num_words) #Removes low occurring words (less than num_words and occurring in less than 0.1% of documents) t_docs = filter(None,[[word for word in doc if word in t_dict.values()] for doc in documents]) #Remove any words no longer accounted for and empty documents t_corp = [t_dict.doc2bow(doc) for doc in t_docs] #Create training corpus # #Training and test sets s = int(testSize*len(t_corp)) corpTrain = t_corp[:s] corpTest = t_corp[s:] # for j,num_topics in enumerate(ntopics): name = resultsRoot+'vocab_'+str(len(t_dict))+'_topics_'+str(num_topics) os.mkdir(name) print '# Words: %d \t # Topics: %d'%(len(t_dict),num_topics) #Train model lda = models.ldamodel.LdaModel(corpus=t_corp, id2word=t_dict, num_topics=num_topics, update_every=0, chunksize=len(t_corp), passes=50,alpha='auto') #Save data for this model lda.save(fname=name+'/model') t_dict.save(fname=name+'/dictionary') # store the dictionary, for future reference pickle.dump(t_docs,open(name+'/documents','w')) corpora.MmCorpus.serialize(name+'/corp', t_corp) perplexity[j,i]= np.exp2(-lda.bound(corpTest) / sum(cnt for document in corpTest for _, cnt in document)) #Normalized test perplexity # #Show word distributions for top 100 words for each topic pp = PdfPages(name+'/wordDistribution.pdf') for tn in np.arange(num_topics): plt.figure() plt.title('Topic %d'%(tn+1)) # ldaOut=[s.split('*') for s in lda.print_topic(tn,100).split('+')] word_probs = [float(a) for (a,b) in ldaOut] words = [b for (a,b) in ldaOut] # plt.plot(np.arange(len(ldaOut)),word_probs) plt.xticks(np.arange(len(ldaOut)),words,rotation=45,size=2) pp.savefig() plt.close() pp.close() np.save(open(perplexityName,'w'), perplexity)
Python
0
28100da7b41d8617ef240179acfce7b25f698efc
add selenium web driver
snippets/create-Selenium-python-wd-unittest.py
snippets/create-Selenium-python-wd-unittest.py
# -*- coding: utf-8 -*- import unittest from selenium import webdriver from selenium.webdriver.support.wait import WebDriverWait def every_downloads_chrome(driver): if not driver.current_url.startswith("chrome://downloads"): driver.get("chrome://downloads/") return driver.execute_script(""" var elements = document.querySelector('downloads-manager') .shadowRoot.querySelector('#downloadsList') .items if (elements.every(e => e.state === 'COMPLETE')) return elements.map(e => e.filePath || e.file_path || e.fileUrl || e.file_url); """) # class Create(unittest.TestCase): # def setUp(self): # chrome_options = webdriver.ChromeOptions() # prefs = {'download.default_directory': '/Users/chenxinlu/Downloads/tron-wallet/'} # chrome_options.add_experimental_option('prefs', prefs) # self.driver = webdriver.Chrome(options=chrome_options, executable_path="/usr/local/bin/chromedriver") # self.driver.implicitly_wait(10) # self.verificationErrors = [] # self.accept_next_alert = True # # def test_create(self): # driver = self.driver # # Label: Test # driver.get("https://tronscan.org/#/walletwizard") # driver.find_element_by_name("password_input").click() # driver.find_element_by_name("password_input").clear() # driver.find_element_by_name("password_input").send_keys( # "innocentkitfaithpaperhorsearmysaydistancekingdomboardtouristmeat") # driver.find_element_by_css_selector("button.btn.btn-dark.btn-lg.ml-auto > span").click() # driver.find_element_by_css_selector("button.btn.btn-lg.btn-block > span").click() # # waits for all the files to be completed and returns the paths # paths = WebDriverWait(driver, 120, 1).until(every_downloads_chrome) # print(paths) # # def is_element_present(self, how, what): # try: # self.driver.find_element(by=how, value=what) # except NoSuchElementException as e: # return False # return True # # def is_alert_present(self): # try: # self.driver.switch_to_alert() # except NoAlertPresentException as e: # return False # return True # # def close_alert_and_get_its_text(self): # try: # alert = self.driver.switch_to_alert() # alert_text = alert.text # if self.accept_next_alert: # alert.accept() # else: # alert.dismiss() # return alert_text # finally: # self.accept_next_alert = True # # def tearDown(self): # # self.driver.quit() # # self.driver.close() # self.assertEqual([], self.verificationErrors) class Create(object): def __init__(self): chrome_options = webdriver.ChromeOptions() prefs = {'download.default_directory': '/Users/chenxinlu/Downloads/tron-wallet/'} chrome_options.add_experimental_option('prefs', prefs) self.driver = webdriver.Chrome(options=chrome_options, executable_path="/usr/local/bin/chromedriver") self.driver.implicitly_wait(10) # self.accept_next_alert = True # self.verificationErrors = [] def test_create(self): driver = self.driver # Label: Test driver.get("https://tronscan.org/#/walletwizard") driver.find_element_by_name("password_input").click() driver.find_element_by_name("password_input").clear() driver.find_element_by_name("password_input").send_keys( "innocentkitfaithpaperhorsearmysaydistancekingdomboardtouristmeat") driver.find_element_by_css_selector("button.btn.btn-dark.btn-lg.ml-auto > span").click() driver.find_element_by_css_selector("button.btn.btn-lg.btn-block > span").click() # waits for all the files to be completed and returns the paths paths = WebDriverWait(driver, 120, 1).until(every_downloads_chrome) print(paths[0]) def close(self): self.driver.quit() # def is_element_present(self, how, what): # try: # self.driver.find_element(by=how, value=what) # except NoSuchElementException as e: # return False # return True # # def is_alert_present(self): # try: # self.driver.switch_to_alert() # except NoAlertPresentException as e: # return False # return True # # def close_alert_and_get_its_text(self): # try: # alert = self.driver.switch_to_alert() # alert_text = alert.text # if self.accept_next_alert: # alert.accept() # else: # alert.dismiss() # return alert_text # finally: # self.accept_next_alert = True # # def tearDown(self): # # self.driver.quit() # # self.driver.close() # self.assertEqual([], self.verificationErrors) if __name__ == "__main__": create_wallet = Create() for i in range(0, 100): create_wallet.test_create() create_wallet.close()
Python
0.000001
9870fdd4b0996254216ff85a4dc0f9706843ca50
Add test for nested while with exc and break.
tests/basics/while_nest_exc.py
tests/basics/while_nest_exc.py
# test nested whiles within a try-except while 1: print(1) try: print(2) while 1: print(3) break except: print(4) print(5) break
Python
0
6a541b8d5b7c2c742420bbbe758866daef804e90
Add a unit test to verify controller objects do not persist across classes. (#483)
tests/mobly/test_suite_test.py
tests/mobly/test_suite_test.py
# Copyright 2018 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import mock import shutil import tempfile from future.tests.base import unittest from mobly import base_test from mobly import config_parser from mobly import records from mobly import test_runner from tests.lib import mock_controller from tests.lib import utils class TestSuiteTest(unittest.TestCase): """Tests for use cases of creating Mobly test suites. Tests here target a combination of test_runner and base_test code. """ def setUp(self): self.tmp_dir = tempfile.mkdtemp() self.mock_test_cls_configs = config_parser.TestRunConfig() self.summary_file = os.path.join(self.tmp_dir, 'summary.yaml') self.mock_test_cls_configs.summary_writer = records.TestSummaryWriter( self.summary_file) self.mock_test_cls_configs.log_path = self.tmp_dir self.mock_test_cls_configs.user_params = {"some_param": "hahaha"} self.mock_test_cls_configs.reporter = mock.MagicMock() self.base_mock_test_config = config_parser.TestRunConfig() self.base_mock_test_config.test_bed_name = 'SampleTestBed' self.base_mock_test_config.controller_configs = {} self.base_mock_test_config.user_params = { 'icecream': 42, 'extra_param': 'haha' } self.base_mock_test_config.log_path = self.tmp_dir def tearDown(self): shutil.rmtree(self.tmp_dir) def test_controller_object_not_persistent_across_classes_in_the_same_run( self): self.foo_test_controller_obj_id = None self.bar_test_controller_obj_id = None test_run_config = self.base_mock_test_config.copy() test_run_config.controller_configs = {'MagicDevice': [{'serial': 1}]} class FooTest(base_test.BaseTestClass): def setup_class(cls): cls.controller = cls.register_controller(mock_controller)[0] self.foo_test_controller_obj_id = id(cls.controller) class BarTest(base_test.BaseTestClass): def setup_class(cls): cls.controller = cls.register_controller(mock_controller)[0] self.bar_test_controller_obj_id = id(cls.controller) tr = test_runner.TestRunner(self.tmp_dir, test_run_config.test_bed_name) tr.add_test_class(test_run_config, FooTest) tr.add_test_class(test_run_config, BarTest) tr.run() self.assertNotEqual(self.foo_test_controller_obj_id, self.bar_test_controller_obj_id) if __name__ == "__main__": unittest.main()
Python
0
56cb3c6c046c403e186a5191e30b8b982900c57c
Add cli project tests
tests/test_cli/test_project.py
tests/test_cli/test_project.py
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function from mock import patch from tests.test_cli.utils import BaseCommandTestCase from polyaxon_cli.cli.project import project class TestProject(BaseCommandTestCase): @patch('polyaxon_client.api.project.ProjectApi.create_project') def test_create_project(self, create_project): self.runner.invoke(project, 'create') assert create_project.call_count == 0 self.runner.invoke(project, ['create', '--name=foo']) assert create_project.call_count == 1 @patch('polyaxon_client.api.project.ProjectApi.list_projects') def test_list_projects(self, list_projects): self.runner.invoke(project, ['list']) assert list_projects.call_count == 1 @patch('polyaxon_client.api.project.ProjectApi.get_project') def test_get_project(self, get_project): self.runner.invoke(project, ['-p admin/foo', 'get']) assert get_project.call_count == 1 @patch('polyaxon_client.api.project.ProjectApi.update_project') def test_update_project(self, update_project): self.runner.invoke(project, ['update']) assert update_project.call_count == 0 self.runner.invoke(project, ['-p admin/foo', 'update', '--description=foo']) assert update_project.call_count == 1 @patch('polyaxon_client.api.project.ProjectApi.list_experiment_groups') def test_project_groups(self, list_experiment_groups): self.runner.invoke(project, ['-p admin/foo', 'groups']) assert list_experiment_groups.call_count == 1 @patch('polyaxon_client.api.project.ProjectApi.list_jobs') def test_project_jobs(self, list_jobs): self.runner.invoke(project, ['-p admin/foo', 'jobs']) assert list_jobs.call_count == 1 @patch('polyaxon_client.api.project.ProjectApi.list_experiments') def test_project_experiment(self, list_experiments): self.runner.invoke(project, ['-p admin/foo', 'experiments']) assert list_experiments.call_count == 1 @patch('polyaxon_client.api.project.ProjectApi.list_builds') def test_project_builds(self, list_builds): self.runner.invoke(project, ['-p admin/foo', 'builds']) assert list_builds.call_count == 1 @patch('polyaxon_client.api.project.ProjectApi.list_tensorboards') def test_project_builds(self, list_tensorboards): self.runner.invoke(project, ['-p admin/foo', 'tensorboards']) assert list_tensorboards.call_count == 1 @patch('polyaxon_client.api.project.ProjectApi.download_repo') def test_project_download_repo(self, download_repo): self.runner.invoke(project, ['-p admin/foo', 'download']) assert download_repo.call_count == 1 @patch('polyaxon_client.api.project.ProjectApi.bookmark') def test_project_bookmark(self, bookmark): self.runner.invoke(project, ['-p admin/foo', 'bookmark']) assert bookmark.call_count == 1 @patch('polyaxon_client.api.project.ProjectApi.unbookmark') def test_project_unbookmark(self, unbookmark): self.runner.invoke(project, ['-p admin/foo', 'unbookmark']) assert unbookmark.call_count == 1
Python
0
bf28aa8fbe9aa735d017f935aefb89c5ed48f836
Add bubble sort implementation
aids/sorting_and_searching/bubble_sort.py
aids/sorting_and_searching/bubble_sort.py
''' In this module, we implement bubble sort Time complexity: O(n ^ 2) ''' def bubble_sort(arr): ''' Sort array using bubble sort ''' for index_x in xrange(len(arr)): for index_y in xrange(len(arr) - 1, index_x, -1): if arr[index_y] < arr[index_y - 1]: arr[index_y], arr[index_y - 1] = arr[index_y - 1], arr[index_y]
Python
0
73cab4c1e0a591504176011b53b9774c8782238e
test kalman
tests/tsdb/test_tsdb_kalman.py
tests/tsdb/test_tsdb_kalman.py
from tsdb import TSDBClient, TSDB_REST_Client import timeseries as ts import numpy as np import subprocess import unittest import asyncio import asynctest import time class Test_TSDB_Kalman(asynctest.TestCase): def setUp(self): ############# ### SETUP ### ############# # We'll use a subprocess to run our server script, according to: # http://stackoverflow.com/questions/3781851/run-a-python-script-from-another-python-script-passing-in-args # We need this log file for some reason, it throws exceptions without it self.server_log_file = open('.tsdb_server.log.test','w') self.server_proc = subprocess.Popen(['python', 'go_server.py'] ,stdout=self.server_log_file,stderr=subprocess.STDOUT) time.sleep(1) # This needs to be separate in case the test # fails and then the server will never be shut down def tearDown(self): ################ ### SHUTDOWN ### ################ # Shuts down the server self.server_proc.terminate() self.server_log_file.close() time.sleep(1) async def test_simple_run(self): client = TSDBClient() await client.add_trigger('KalmanFilter', 'insert_ts', ['sig_epsilon_estimate', 'sig_eta_estimate'], None)#['mean', 'std'], None)# sigeta_para = 1 sigeps_para = 10 sigeta = np.random.normal(0,sigeta_para,2000) sigeps = np.random.normal(0,sigeps_para,2000) mus = np.cumsum(sigeta)+20 y = mus + sigeps ats = ts.TimeSeries(y,np.arange(2000)) await client.insert_ts(1,ats) await client.upsert_meta(1, {'order': 1}) status, payload = await client.select({'order':{'==':1}}, ['sig_epsilon_estimate', 'sig_eta_estimate'], None) assert(np.isclose(payload['1']['sig_epsilon_estimate'], sigeps_para, rtol=0.1)) assert(np.isclose(payload['1']['sig_eta_estimate'], sigeta_para, rtol=0.1))
Python
0.000006
763581798b89b5105e55b692a0c4f1fd0890e459
Add unit tests to ensure a valid provider
tests/unit/utils/gitfs_test.py
tests/unit/utils/gitfs_test.py
# -*- coding: utf-8 -*- ''' These only test the provider selection and verification logic, they do not init any remotes. ''' # Import python libs from __future__ import absolute_import # Import Salt Testing libs from salttesting import skipIf, TestCase from salttesting.mock import MagicMock, patch, NO_MOCK, NO_MOCK_REASON from salttesting.helpers import ensure_in_syspath ensure_in_syspath('../../') # Import salt libs import salt.utils.gitfs from salt.exceptions import FileserverConfigError # GLOBALS OPTS = {'cachedir': '/tmp/gitfs-test-cache'} @skipIf(NO_MOCK, NO_MOCK_REASON) class TestGitFSProvider(TestCase): def test_provider_case_insensitive(self): ''' Ensure that both lowercase and non-lowercase values are supported ''' provider = 'GitPython' for role_name, role_class in ( ('gitfs', salt.utils.gitfs.GitFS), ('git_pillar', salt.utils.gitfs.GitPillar), ('winrepo', salt.utils.gitfs.WinRepo)): key = '{0}_provider'.format(role_name) with patch.object(role_class, 'verify_gitpython', MagicMock(return_value=True)): with patch.object(role_class, 'verify_pygit2', MagicMock(return_value=False)): with patch.object(role_class, 'verify_dulwich', MagicMock(return_value=False)): args = [OPTS] if role_name == 'winrepo': args.append('/tmp/winrepo-dir') with patch.dict(OPTS, {key: provider}): # Try to create an instance with uppercase letters in # provider name. If it fails then a # FileserverConfigError will be raised, so no assert is # necessary. role_class(*args) # Now try to instantiate an instance with all lowercase # letters. Again, no need for an assert here. role_class(*args) def test_valid_provider(self): ''' Ensure that an invalid provider is not accepted, raising a FileserverConfigError. ''' def _get_mock(verify, provider): ''' Return a MagicMock with the desired return value ''' return MagicMock(return_value=verify.endswith(provider)) for role_name, role_class in ( ('gitfs', salt.utils.gitfs.GitFS), ('git_pillar', salt.utils.gitfs.GitPillar), ('winrepo', salt.utils.gitfs.WinRepo)): key = '{0}_provider'.format(role_name) for provider in salt.utils.gitfs.VALID_PROVIDERS: verify = 'verify_gitpython' mock1 = _get_mock(verify, provider) with patch.object(role_class, verify, mock1): verify = 'verify_pygit2' mock2 = _get_mock(verify, provider) with patch.object(role_class, verify, mock2): verify = 'verify_dulwich' mock3 = _get_mock(verify, provider) with patch.object(role_class, verify, mock3): args = [OPTS] if role_name == 'winrepo': args.append('/tmp/winrepo-dir') with patch.dict(OPTS, {key: provider}): if role_name == 'gitfs' \ or (role_name != 'gitfs' and provider != 'dulwich'): # This is a valid provider, so this should # pass without raising an exception. role_class(*args) else: # Dulwich is not supported for git_pillar nor # winrepo, so trying to use it should raise an # exception. self.assertRaises( FileserverConfigError, role_class, *args ) with patch.dict(OPTS, {key: 'foo'}): # Set the provider name to a known invalid provider # and make sure it raises an exception. self.assertRaises( FileserverConfigError, role_class, *args ) if __name__ == '__main__': from integration import run_tests run_tests(TestGitFSProvider, needs_daemon=False)
Python
0
faef1804e1781365fc027ecf08d61fbab56a4679
Add migratioss I forgot to commit out of saltyness
magic/migrations/0003_auto_20170929_0229.py
magic/migrations/0003_auto_20170929_0229.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.5 on 2017-09-29 05:29 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('magic', '0002_auto_20170929_0159'), ] operations = [ migrations.AlterField( model_name='card', name='power', field=models.CharField(blank=True, max_length=255, null=True, verbose_name='power'), ), migrations.AlterField( model_name='card', name='toughness', field=models.CharField(blank=True, max_length=255, null=True, verbose_name='toughness'), ), ]
Python
0
e61dcb055fb4767e6e662648c89cbdfda4422c97
Update expected error message in this test
docs/source/examples/test_no_depends_fails.py
docs/source/examples/test_no_depends_fails.py
from pych.extern import Chapel @Chapel(sfile="users.onlyonce.chpl") def useTwoModules(x=int, y=int): return int if __name__ == "__main__": print(useTwoModules(2, 4)) import testcase # contains the general testing method, which allows us to gather output import os.path def test_using_multiple_modules(): out = testcase.runpy(os.path.realpath(__file__)) # Ensure that when a used module is nowhere near the exported function, we # get an error message to that effect. assert "error: Cannot find module or enum \'M1\'" in out
from pych.extern import Chapel @Chapel(sfile="users.onlyonce.chpl") def useTwoModules(x=int, y=int): return int if __name__ == "__main__": print(useTwoModules(2, 4)) import testcase # contains the general testing method, which allows us to gather output import os.path def test_using_multiple_modules(): out = testcase.runpy(os.path.realpath(__file__)) # Ensure that when a used module is nowhere near the exported function, we # get an error message to that effect. assert "error: Cannot find module \'M1\'" in out
Python
0
2e7048d8feae5ed2c244e617077235b5b771f326
Add deleted selenium runner
test/selenium/src/run_selenium.py
test/selenium/src/run_selenium.py
#!/usr/bin/env python2.7 # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: miha@reciprocitylabs.com # Maintained By: miha@reciprocitylabs.com """ Basic selenium test runner This script is used for running all selenium tests against the server defined in the configuration yaml file. The script will wait a defined time for the server to start before running the test. If the server fails to start before its grace time is up, the script will return with an error code of 3. Error codes 1 and 2 are reserved by pytest and status 0 is returned only if all the tests pass. """ import logging import os import sys import time import urllib import pytest from lib import constants from lib import file_ops from lib import log from lib import environment PROJECT_ROOT_PATH = os.path.dirname(os.path.abspath(__file__)) + "/../" logger = logging.getLogger("selenium.webdriver.remote.remote_connection") def wait_for_server(): """ Wait for the server to return a 200 response """ sys.stdout.write("Wating on server: ") for _ in xrange(environment.SERVER_WAIT_TIME): try: if urllib.urlopen(environment.APP_URL).getcode() == 200: print "[Done]" return True except IOError: sys.stdout.write(".") sys.stdout.flush() time.sleep(1) print "[Failed]" return False if __name__ == "__main__": if not wait_for_server(): sys.exit(3) file_ops.create_directory(environment.LOG_PATH) file_ops.delete_directory_contents(environment.LOG_PATH) log.set_default_file_handler( logger, PROJECT_ROOT_PATH + constants.path.LOGS_DIR + constants.path.TEST_RUNNER ) logger.setLevel(environment.LOGGING_LEVEL) sys.exit(pytest.main())
Python
0
3bb65466d40c1b59faebe0db40eced260ff60010
Create SampleFunction.py
src/Python/ImplicitFunctions/SampleFunction.py
src/Python/ImplicitFunctions/SampleFunction.py
#!/usr/bin/env python import vtk def main(): value = 2.0 colors = vtk.vtkNamedColors() implicitFunction = vtk.vtkSuperquadric() implicitFunction.SetPhiRoundness(2.5) implicitFunction.SetThetaRoundness(.5) # Sample the function. sample = vtk.vtkSampleFunction() sample.SetSampleDimensions(50,50,50) sample.SetImplicitFunction(implicitFunction) xmin, xmax, ymin, ymax, zmin, zmax = -value, value, -value, value, -value, value sample.SetModelBounds(xmin, xmax, ymin, ymax, zmin, zmax) # Create the 0 isosurface. contours = vtk.vtkContourFilter() contours.SetInputConnection(sample.GetOutputPort()) contours.GenerateValues(1, 2.0, 2.0) # Map the contours to graphical primitives. contourMapper = vtk.vtkPolyDataMapper() contourMapper.SetInputConnection(contours.GetOutputPort()) contourMapper.SetScalarRange(0.0, 1.2) # Create an actor for the contours. contourActor = vtk.vtkActor() contourActor.SetMapper(contourMapper) # Create a box around the function to indicate the sampling volume. #Create outline. outline = vtk.vtkOutlineFilter() outline.SetInputConnection(sample.GetOutputPort()) # Map it to graphics primitives. outlineMapper = vtk.vtkPolyDataMapper() outlineMapper.SetInputConnection(outline.GetOutputPort()) # Create an actor. outlineActor = vtk.vtkActor() outlineActor.SetMapper(outlineMapper) outlineActor.GetProperty().SetColor(0,0,0) # Visualize. renderer = vtk.vtkRenderer() renderWindow = vtk.vtkRenderWindow() renderWindow.AddRenderer(renderer) interactor = vtk.vtkRenderWindowInteractor() interactor.SetRenderWindow(renderWindow) renderer.AddActor(contourActor) renderer.AddActor(outlineActor) renderer.SetBackground(colors.GetColor3d("Tan")) # Enable user interface interactor renderWindow.Render() interactor.Start() if __name__ == '__main__': main()
Python
0.000001
95a890b988435fb019083026572ddb7042dc1379
Create generate.py
PrettyBits/generate.py
PrettyBits/generate.py
# coding=UTF-8 # # generate a list of all 8 bit binary strings, # sort them by how "pretty" they will be in # Core Rope Memory # # mmcgaley@gmail.com # 2015.04.19 # WORDLEN = 7 import math def addLeadingZeros(binaryString) : return ((WORDLEN - len(binaryString)) * "0") + binaryString def maxZeros(binaryString) : zeroCount = 0 maxZeroCount = 0 for bit in list(binaryString) : if bit is "0" : zeroCount += 1 else : if (zeroCount > maxZeroCount) : maxZeroCount = zeroCount zeroCount = 0 if (zeroCount > maxZeroCount) : maxZeroCount = zeroCount return maxZeroCount def totalZeros(binaryString) : return len([x for x in binaryString if x is '0']) def zeroBalance(binaryString) : zeroCount = 0 symmetricalCentre = int(math.ceil(len(binaryString)/2.0)) firstHalf = binaryString[:len(binaryString)/2] secondHalf = binaryString[symmetricalCentre:] return abs(totalZeros(firstHalf) - totalZeros(secondHalf)) # absolute value def charSortKey(tableEntry): (c, _) = tableEntry if c.islower(): return 0 + ord(c) elif c.isupper(): return 1000 + ord(c) elif c.isspace(): return 2000 + ord(c) elif c.isdigit(): return 3000 + ord(c) else: # Cf. https://docs.python.org/2/library/stdtypes.html#str.decode u = c.decode('utf-8') return 4000 + ord(u) ## main :) binaryStringsList = [] #for b in range(1, 256): starting at 1 because 0000000 is a special character for b in range(1, pow(2, WORDLEN)): binaryStringsList.append(addLeadingZeros(bin(b)[2:])) # strip "0b" from beginning binaryStringsList.sort(key=totalZeros) binaryStringsList.sort(key=maxZeros) binaryStringsList.sort(key=zeroBalance) # character ordering based on frequency_table.py in this directory characterList = ['e', 't', 'a', 'o', 'n', 'i', 's', 'r', 'h', 'l', 'd', 'c', 'u', 'm', 'f', 'p', 'g', 'y', 'w', 'b', 'v', 'k', ' ', 'T', 'S', 'A', 'M', 'C', 'I', 'N', 'B', 'R', 'P', 'E', 'D', 'H', 'x', 'W', 'L', 'O', 'F', 'Y', 'G', 'J', 'z', 'j', 'U', 'q', 'K', 'V', 'Q', 'X', 'Z', 'á', 'é', 'í', 'ó', 'ú', 'Á', 'É', 'Í', 'Ó', 'Ú', ',', '.', '0', '1', '5', '2', '"', '9', '-', "'", '4', '3', '8', '6', '7', ':', ')', '(', '$', ';', '*', '?', '/', '&', '!', '%', '+', '>', '<', '=', '#', '@'] characterBinaryTable = zip(characterList, binaryStringsList) characterBinaryTable.sort(key=charSortKey) for mapping in characterBinaryTable: print "\'%s\' => \'%s\'," % mapping # for PHP array creation
Python
0.000002
524f47a4d4e0db5b76dfb7ebf9447b6199e48b6d
Add data utils tests.
tests/test_data_utils_filetree.py
tests/test_data_utils_filetree.py
from uuid import uuid1 import json import pytest from flask_jsondash.data_utils import filetree def test_path_hierarchy(tmpdir): uid = uuid1() tmpfile = tmpdir.mkdir('{}'.format(uid)) data = filetree.path_hierarchy(tmpfile.strpath) assert json.dumps(data) for key in ['type', 'name', 'path']: assert key in data def test_path_hierarchy_invalid_path(tmpdir): with pytest.raises(OSError): filetree.path_hierarchy('invalid-path')
Python
0
8d6959a6d950e243ebd2c930ae176a3debf4cc9c
Add package-updates-metric.py
plugins/system/package-updates-metric.py
plugins/system/package-updates-metric.py
#!/usr/bin/env python #coding=utf-8 import apt import apt_pkg import json import os import subprocess import sys """ security-updates-metric is used to check avaliable package updates for Debian or Ubuntu system. The program is inspired by /usr/lib/update-notifier/apt_check.py """ SYNAPTIC_PINFILE = "/var/lib/synaptic/preferences" DISTRO = subprocess.check_output(["lsb_release", "-c", "-s"], universal_newlines=True).strip() # The packages in BLACKLIST WON'T be checked. BLACKLIST = ['linux-virtual', 'linux-image-virtual', 'linux-headers-virtual',] def clean(cache,depcache): """ unmark (clean) all changes from the given depcache """ # mvo: looping is too inefficient with the new auto-mark code # for pkg in cache.Packages: # depcache.MarkKeep(pkg) depcache.init() def saveDistUpgrade(cache,depcache): """ this functions mimics a upgrade but will never remove anything """ depcache.upgrade(True) if depcache.del_count > 0: clean(cache,depcache) depcache.upgrade() def isSecurityUpgrade(ver): """ check if the given version is a security update (or masks one) """ security_pockets = [("Ubuntu", "%s-security" % DISTRO), ("gNewSense", "%s-security" % DISTRO), ("Debian", "%s-updates" % DISTRO)] for (file, index) in ver.file_list: for origin, archive in security_pockets: if (file.archive == archive and file.origin == origin): return True return False def get_update_packages(): """ Return a list of dict about package updates """ pkgs = [] apt_pkg.init() # force apt to build its caches in memory for now to make sure # that there is no race when the pkgcache file gets re-generated apt_pkg.config.set("Dir::Cache::pkgcache","") try: cache = apt_pkg.Cache(apt.progress.base.OpProgress()) except SystemError as e: sys.stderr.write("Error: Opening the cache (%s)" % e) sys.exit(-1) depcache = apt_pkg.DepCache(cache) # read the pin files depcache.read_pinfile() # read the synaptic pins too if os.path.exists(SYNAPTIC_PINFILE): depcache.read_pinfile(SYNAPTIC_PINFILE) # init the depcache depcache.init() try: saveDistUpgrade(cache,depcache) except SystemError as e: sys.stderr.write("Error: Marking the upgrade (%s)" % e) sys.exit(-1) for pkg in cache.packages: if not (depcache.marked_install(pkg) or depcache.marked_upgrade(pkg)): continue inst_ver = pkg.current_ver cand_ver = depcache.get_candidate_ver(pkg) if cand_ver == inst_ver: # Package does not have available update continue if not inst_ver or not cand_ver: # Some packages are not installed(i.e. linux-headers-3.2.0-77) # skip these updates continue if pkg.name in BLACKLIST: # skip the package in blacklist continue record = {"name": pkg.name, "security": isSecurityUpgrade(cand_ver), "current_version": inst_ver.ver_str, "candidate_version": cand_ver.ver_str} pkgs.append(record) return pkgs def package_check_metric(): """ Return output and exit status as Sensu required. OK 0: no updates WARNING 1: available normal updates CRITICAL 2: available security updates UNKNOWN 3: exceptions or errors """ try: pkgs = get_update_packages() security_pkgs = filter(lambda p: p.get('security'), pkgs) except Exception as e: # Catch all unknown exceptions print str(e) sys.exit(3) if not pkgs: # No available update print json.dumps(pkgs) sys.exit(0) elif not security_pkgs: # Has available updates print json.dumps(pkgs) sys.exit(1) else: # Has available security updates print json.dumps(pkgs) sys.exit(2) if __name__ == '__main__': package_check_metric()
Python
0.000001
37d0843c76b558d6d7a1892963a30e9a56d73f24
Document typical Stylesheet attributes
praw/models/stylesheet.py
praw/models/stylesheet.py
"""Provide the Stylesheet class.""" from .base import PRAWBase class Stylesheet(PRAWBase): """Represent a stylesheet. **Typical Attributes** This table describes attributes that typically belong to objects of this class. Since attributes are dynamically provided (see :ref:`determine-available-attributes-of-an-object`), there is not a guarantee that these attributes will always be present, nor is this list necessarily comprehensive. ======================= =================================================== Attribute Description ======================= =================================================== ``images`` A ``list`` of images used by the stylesheet. ``stylesheet`` The contents of the stylesheet, as CSS. ======================= =================================================== """
"""Provide the Stylesheet class.""" from .base import PRAWBase class Stylesheet(PRAWBase): """Represent a stylesheet."""
Python
0
4cde1f2fcc21ff83daabdb5221c462f44991c73f
Create remove-boxes.py
Python/remove-boxes.py
Python/remove-boxes.py
# Time: O(n^3) ~ O(n^4) # Space: O(n^3) # Given several boxes with different colors represented by different positive numbers. # You may experience several rounds to remove boxes until there is no box left. # Each time you can choose some continuous boxes with the same color (composed of k boxes, k >= 1), # remove them and get k*k points. # Find the maximum points you can get. # # Example 1: # Input: # # [1, 3, 2, 2, 2, 3, 4, 3, 1] # Output: # 23 # Explanation: # [1, 3, 2, 2, 2, 3, 4, 3, 1] # ----> [1, 3, 3, 4, 3, 1] (3*3=9 points) # ----> [1, 3, 3, 3, 1] (1*1=1 points) # ----> [1, 1] (3*3=9 points) # ----> [] (2*2=4 points) # Note: The number of boxes n would not exceed 100. class Solution(object): def removeBoxes(self, boxes): """ :type boxes: List[int] :rtype: int """ def dfs(boxes, l, r, k, lookup): if l > r: return 0 if lookup[l][r][k]: return lookup[l][r][k] ll, kk = l, k while l < r and boxes[l+1] == boxes[l]: l += 1 k += 1 result = dfs(boxes, l+1, r, 0, lookup) + (k+1) ** 2 for i in xrange(l+1, r+1): if boxes[i] == boxes[l]: result = max(result, dfs(boxes, l+1, i-1, 0, lookup) + dfs(boxes, i, r, k+1, lookup)) lookup[ll][r][kk] = result return result lookup = [[[0]*len(boxes) for _ in xrange(len(boxes)) ] for _ in xrange(len(boxes)) ] return dfs(boxes, 0, len(boxes)-1, 0, lookup)
Python
0.000002
06fb2c0371b9cfb5980351d45665d41fdcfae3b5
Add MemoryMetric to Memory measurement
tools/perf/measurements/memory.py
tools/perf/measurements/memory.py
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from metrics import histogram from metrics import memory from telemetry.page import page_measurement MEMORY_HISTOGRAMS = [ {'name': 'V8.MemoryExternalFragmentationTotal', 'units': 'percent'}, {'name': 'V8.MemoryHeapSampleTotalCommitted', 'units': 'kb'}, {'name': 'V8.MemoryHeapSampleTotalUsed', 'units': 'kb'}, {'name': 'Memory.RendererUsed', 'units': 'kb'}] BROWSER_MEMORY_HISTOGRAMS = [ {'name': 'Memory.BrowserUsed', 'units': 'kb'}] class Memory(page_measurement.PageMeasurement): def __init__(self): super(Memory, self).__init__('stress_memory') self.histograms = ( [histogram.HistogramMetric( h, histogram.RENDERER_HISTOGRAM) for h in MEMORY_HISTOGRAMS] + [histogram.HistogramMetric( h, histogram.BROWSER_HISTOGRAM) for h in BROWSER_MEMORY_HISTOGRAMS]) self._memory_metric = None def DidStartBrowser(self, browser): self._memory_metric = memory.MemoryMetric(browser) self._memory_metric.Start() def DidNavigateToPage(self, page, tab): for h in self.histograms: h.Start(page, tab) def CustomizeBrowserOptions(self, options): options.AppendExtraBrowserArg('--enable-stats-collection-bindings') options.AppendExtraBrowserArg('--enable-memory-benchmarking') # For a hard-coded set of Google pages (such as GMail), we produce custom # memory histograms (V8.Something_gmail) instead of the generic histograms # (V8.Something), if we detect that a renderer is only rendering this page # and no other pages. For this test, we need to disable histogram # customizing, so that we get the same generic histograms produced for all # pages. options.AppendExtraBrowserArg('--disable-histogram-customizer') options.AppendExtraBrowserArg('--memory-metrics') # Old commandline flags used for reference builds. options.AppendExtraBrowserArg('--dom-automation') options.AppendExtraBrowserArg( '--reduce-security-for-dom-automation-tests') def CanRunForPage(self, page): return hasattr(page, 'stress_memory') def MeasurePage(self, page, tab, results): for h in self.histograms: h.GetValue(page, tab, results) if tab.browser.is_profiler_active('tcmalloc-heap'): # The tcmalloc_heap_profiler dumps files at regular # intervals (~20 secs). # This is a minor optimization to ensure it'll dump the last file when # the test completes. tab.ExecuteJavaScript(""" if (chrome && chrome.memoryBenchmarking) { chrome.memoryBenchmarking.heapProfilerDump('final', 'renderer'); chrome.memoryBenchmarking.heapProfilerDump('final', 'browser'); } """) def DidRunTest(self, tab, results): self._memory_metric.Stop() self._memory_metric.AddResults(tab, results)
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from metrics import histogram from telemetry.page import page_measurement MEMORY_HISTOGRAMS = [ {'name': 'V8.MemoryExternalFragmentationTotal', 'units': 'percent'}, {'name': 'V8.MemoryHeapSampleTotalCommitted', 'units': 'kb'}, {'name': 'V8.MemoryHeapSampleTotalUsed', 'units': 'kb'}, {'name': 'Memory.RendererUsed', 'units': 'kb'}] BROWSER_MEMORY_HISTOGRAMS = [ {'name': 'Memory.BrowserUsed', 'units': 'kb'}] class Memory(page_measurement.PageMeasurement): def __init__(self): super(Memory, self).__init__('stress_memory') self.histograms = ( [histogram.HistogramMetric( h, histogram.RENDERER_HISTOGRAM) for h in MEMORY_HISTOGRAMS] + [histogram.HistogramMetric( h, histogram.BROWSER_HISTOGRAM) for h in BROWSER_MEMORY_HISTOGRAMS]) def DidNavigateToPage(self, page, tab): for h in self.histograms: h.Start(page, tab) def CustomizeBrowserOptions(self, options): options.AppendExtraBrowserArg('--enable-stats-collection-bindings') options.AppendExtraBrowserArg('--enable-memory-benchmarking') # For a hard-coded set of Google pages (such as GMail), we produce custom # memory histograms (V8.Something_gmail) instead of the generic histograms # (V8.Something), if we detect that a renderer is only rendering this page # and no other pages. For this test, we need to disable histogram # customizing, so that we get the same generic histograms produced for all # pages. options.AppendExtraBrowserArg('--disable-histogram-customizer') options.AppendExtraBrowserArg('--memory-metrics') # Old commandline flags used for reference builds. options.AppendExtraBrowserArg('--dom-automation') options.AppendExtraBrowserArg( '--reduce-security-for-dom-automation-tests') def CanRunForPage(self, page): return hasattr(page, 'stress_memory') def MeasurePage(self, page, tab, results): for h in self.histograms: h.GetValue(page, tab, results) if tab.browser.is_profiler_active('tcmalloc-heap'): # The tcmalloc_heap_profiler dumps files at regular # intervals (~20 secs). # This is a minor optimization to ensure it'll dump the last file when # the test completes. tab.ExecuteJavaScript(""" if (chrome && chrome.memoryBenchmarking) { chrome.memoryBenchmarking.heapProfilerDump('final', 'renderer'); chrome.memoryBenchmarking.heapProfilerDump('final', 'browser'); } """)
Python
0.000001
9a5bfb7f5bf114bb4bcf2dd4c88ddd8924a97ed9
add menu function to menu.py
menu.py
menu.py
#!/usr/bin/end python # Text-based menu for use in pyWype.py def menu(): """ Menu prompt for user to select program option """ while True: print 'I' print 'II' print 'III' print 'IV' print 'V' choice = raw_input('Select an option (I, II, III, IV, V): ') if choice in ('I', 'II', 'III', 'IV', 'V'): return choice menu()
Python
0.000003
fb61398b6a0cdd4f40d16729ab2ff0ca47730526
Add the main file
relay_api/__main__.py
relay_api/__main__.py
from relay_api.api.server import server from relay_api.conf.config import relays import relay_api.api.server as api @server.route("/relay-api/relays", methods=["GET"]) def get_relays(): return api.get_relays(relays) @server.route("/relay-api/relays/<int:relay_id>", methods=["GET"]) def get_relay(relay_id): return api.get_relay(relays, relay_id)
Python
0.000004
72a0d635e497f0f4c6c58d84f7001ec04063ea90
Add mfnd.py that prints todays date
mfnd.py
mfnd.py
#!/usr/bin/env python3 """ MFND - A simple to-do list application """ import datetime today = datetime.date.today() print( today.strftime('MFND - %B %d, %Y') )
Python
0
d3469fd3ab39eeee381457588931636bf0987ea9
Create impossible_bet.py
impossible_bet.py
impossible_bet.py
import random def play_bet(participants=100, times=1000, checks=50): """Simulate the bet x times with x participants.""" wins = 0 losses = 0 for time in range(times): boxes = list(range(1, participants + 1)) random.shuffle(boxes) for participant in range(1, participants + 1): found = False count = 0 to_open = participant while found == False and count < checks: if boxes[to_open - 1] == participant: found = True else: to_open = boxes[to_open - 1] count += 1 if found == False: losses += 1 break elif found == True and participant == participants: wins += 1 return (wins, losses) def results(wins, losses): total = wins + losses win_percentage = (wins / total) * 100 lose_percentage = (losses / total) * 100 return win_percentage, lose_percentage if __name__ == '__main__': participants = int(input(print('participants'))) times = int(input(print('times'))) checks = int(input(print('checks'))) print(results(*play_bet(participants=participants, times=times, checks=checks)))
Python
0.000821
aeabe6bb89a359e644c5adcb4c6456fd3428f6de
Stop using intersphinx
doc/source/conf.py
doc/source/conf.py
# -*- coding: utf-8 -*- # # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'oslosphinx', ] # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'python-ironicclient' copyright = u'OpenStack Foundation' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['ironicclient.'] # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. #html_theme_path = ["."] #html_theme = '_theme' #html_static_path = ['_static'] # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ( 'index', '%s.tex' % project, u'%s Documentation' % project, u'OpenStack LLC', 'manual' ), ]
# -*- coding: utf-8 -*- # # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode', 'oslosphinx', ] # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'python-ironicclient' copyright = u'OpenStack Foundation' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['ironicclient.'] # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. #html_theme_path = ["."] #html_theme = '_theme' #html_static_path = ['_static'] # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ( 'index', '%s.tex' % project, u'%s Documentation' % project, u'OpenStack LLC', 'manual' ), ] # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None}
Python
0.000001
e8c2406cbcff96196d2404e9df167cc96f468779
add sources api
mcp/interface/sources.py
mcp/interface/sources.py
import json from mcp import sources from mcp.interface import common class SourcesHandler(common.AuthorizedHandler): def forbidden(self): return True def do_get(self): return 200, json.dumps(list(iter(sources.source_db))) class SourceHandler(common.AuthorizedHandler): def __init__(self, request, response, groups): common.AuthorizedHandler.__init__(self, request, response, groups) self.source = sources.get(self.groups[0]) class SourceInfoHandler(SourceHandler): def do_get(self): return 200, json.dumps({'name': self.source.source, 'url': self.source.url, 'revision': self.source.revision}) sources_base = '/sources/' source_base = sources_base + '(' + sources.sources_allowed + ')' routes = {sources_base: SourcesHandler, source_base: SourceInfoHandler}
Python
0
5dabba3941f870f3f365e186fdf852e834649595
Move config to docs
homeassistant/components/sensor/eliqonline.py
homeassistant/components/sensor/eliqonline.py
""" homeassistant.components.sensor.eliqonline ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Monitors home energy use for the eliq online service. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/sensor.eliqonline/ """ import logging from homeassistant.helpers.entity import Entity from homeassistant.const import (STATE_UNKNOWN, CONF_ACCESS_TOKEN, CONF_NAME) _LOGGER = logging.getLogger(__name__) REQUIREMENTS = ['eliqonline==1.0.11'] DEFAULT_NAME = "ELIQ Energy Usage" def setup_platform(hass, config, add_devices, discovery_info=None): """ Set up the Eliq sensor. """ import eliqonline access_token = config.get(CONF_ACCESS_TOKEN) name = config.get(CONF_NAME, DEFAULT_NAME) channel_id = config.get("channel_id") if access_token is None: _LOGGER.error( "Configuration Error: " "Please make sure you have configured your access token " "that can be aquired from https://my.eliq.se/user/settings/api") return False api = eliqonline.API(access_token) add_devices([EliqSensor(api, channel_id, name)]) class EliqSensor(Entity): """ Implements a Eliq sensor. """ def __init__(self, api, channel_id, name): self._name = name self._unit_of_measurement = "W" self._state = STATE_UNKNOWN self.api = api self.channel_id = channel_id self.update() @property def name(self): """ Returns the name. """ return self._name @property def unit_of_measurement(self): """ Unit of measurement of this entity, if any. """ return self._unit_of_measurement @property def state(self): """ Returns the state of the device. """ return self._state def update(self): """ Gets the latest data. """ response = self.api.get_data_now(channelid=self.channel_id) self._state = int(response.power)
""" homeassistant.components.sensor.eliqonline ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ monitors home energy use for the eliq online service api documentation: https://my.eliq.se/knowledge/sv-SE/49-eliq-online/299-eliq-online-api access to api access token: https://my.eliq.se/user/settings/api current energy use: https://my.eliq.se/api/datanow?accesstoken=<token> history: https://my.eliq.se/api/data?startdate=2015-12-14&intervaltype=6min&accesstoken=<token> """ import logging from homeassistant.helpers.entity import Entity from homeassistant.const import (STATE_UNKNOWN, CONF_ACCESS_TOKEN, CONF_NAME) _LOGGER = logging.getLogger(__name__) REQUIREMENTS = ['eliqonline==1.0.11'] DEFAULT_NAME = "ELIQ Energy Usage" def setup_platform(hass, config, add_devices, discovery_info=None): """ Set up the sensors """ import eliqonline access_token = config.get(CONF_ACCESS_TOKEN) name = config.get(CONF_NAME, DEFAULT_NAME) channel_id = config.get("channel_id") if access_token is None: _LOGGER.error( "Configuration Error: " "Please make sure you have configured your access token " "that can be aquired from https://my.eliq.se/user/settings/api") return False api = eliqonline.API(access_token) add_devices([EliqSensor(api, channel_id, name)]) class EliqSensor(Entity): """ Implements a Eliq sensor. """ def __init__(self, api, channel_id, name): self._name = name self._unit_of_measurement = "W" self._state = STATE_UNKNOWN self.api = api self.channel_id = channel_id self.update() @property def name(self): """ Returns the name. """ return self._name @property def unit_of_measurement(self): """ Unit of measurement of this entity, if any. """ return self._unit_of_measurement @property def state(self): """ Returns the state of the device. """ return self._state def update(self): """ Gets the latest data """ response = self.api.get_data_now(channelid=self.channel_id) self._state = int(response.power)
Python
0.000001
b2aa91648fe3ae915381e68cac95e5c3f6e5a182
add zhihu_login.py
spider/login/zhihu_login.py
spider/login/zhihu_login.py
# coding:utf-8 # 模拟账户登录
Python
0
c505927ae756fe1740e8603aadf23dae0ad12ff5
Create 01.CenturiesToMinutes.py
TechnologiesFundamentals/ProgrammingFundamentals/DataTypesAndVariables-Lab/01.CenturiesToMinutes.py
TechnologiesFundamentals/ProgrammingFundamentals/DataTypesAndVariables-Lab/01.CenturiesToMinutes.py
centuries = int(input()) years = centuries * 100 days = int(years * 365.2422) hours = days * 24 minutes = hours * 60 print("%d centuries = %d years = %d days = %d hours %d = minutes" % (centuries, years, days, hours, minutes))
Python
0
dca52ab05af01b96610e5044c45bd7c0655c17ec
introduce an Exporter API
pynodegl-utils/pynodegl_utils/export.py
pynodegl-utils/pynodegl_utils/export.py
import os import platform import subprocess import pynodegl as ngl from PyQt5 import QtGui, QtCore class _PipeThread(QtCore.QThread): def __init__(self, fd, unused_fd, w, h, fps): super(_PipeThread, self).__init__() self.fd = fd self.unused_fd = unused_fd self.w, self.h, self.fps = w, h, fps def run(self): try: ret = self.run_with_except() except: raise finally: # very important in order to prevent deadlock if one thread fails # one way or another os.close(self.fd) class _ReaderThread(_PipeThread): def __init__(self, fd, unused_fd, w, h, fps, filename): super(_ReaderThread, self).__init__(fd, unused_fd, w, h, fps) self._filename = filename def run_with_except(self): cmd = ['ffmpeg', '-r', str(self.fps), '-nostats', '-nostdin', '-f', 'rawvideo', '-video_size', '%dx%d' % (self.w, self.h), '-pixel_format', 'rgba', '-i', 'pipe:%d' % self.fd, '-vf', 'vflip', '-y', self._filename] #print 'Executing: ' + ' '.join(cmd) # Closing the unused file descriptor of the pipe is mandatory between # the fork() and the exec() of ffmpeg in order to prevent deadlocks return subprocess.call(cmd, preexec_fn=lambda: os.close(self.unused_fd)) class Exporter(QtCore.QObject): progressed = QtCore.pyqtSignal(int) def export(self, scene, filename, w, h, duration, fps): fd_r, fd_w = os.pipe() from pynodegl import Pipe, Scale scene = Pipe(scene, fd_w, w, h) reader = _ReaderThread(fd_r, fd_w, w, h, fps, filename) reader.start() # Surface Format gl_format = QtGui.QSurfaceFormat() gl_format.setVersion(3, 3) gl_format.setProfile(QtGui.QSurfaceFormat.CoreProfile) gl_format.setDepthBufferSize(24); gl_format.setStencilBufferSize(8); gl_format.setAlphaBufferSize(8); # GL context glctx = QtGui.QOpenGLContext() glctx.setFormat(gl_format) assert glctx.create() == True assert glctx.isValid() == True # Offscreen Surface surface = QtGui.QOffscreenSurface() surface.setFormat(gl_format) surface.create() assert surface.isValid() == True glctx.makeCurrent(surface) # Framebuffer fbo = QtGui.QOpenGLFramebufferObject(w, h) fbo.setAttachment(QtGui.QOpenGLFramebufferObject.CombinedDepthStencil) assert fbo.isValid() == True fbo.bind() # node.gl context ngl_viewer = ngl.Viewer() ngl_viewer.set_scene(scene) if platform.system() == 'Linux': ngl_viewer.set_window(ngl.GLPLATFORM_GLX, ngl.GLAPI_OPENGL3) elif platform.system() == 'Darwin': ngl_viewer.set_window(ngl.GLPLATFORM_CGL, ngl.GLAPI_OPENGL3) ngl_viewer.set_viewport(0, 0, w, h) # Draw every frame nb_frame = int(fps * duration) for i in range(nb_frame): time = i / float(fps) ngl_viewer.draw(time) self.progressed.emit(i*100 / nb_frame) glctx.swapBuffers(surface) self.progressed.emit(100) os.close(fd_w) fbo.release() glctx.doneCurrent() reader.wait() def test_export(): import sys def _get_scene(duration): from examples import misc class DummyCfg: pass cfg = DummyCfg() cfg.duration = duration return misc.triangle(cfg) def print_progress(progress): sys.stdout.write('\r%d%%' % progress) if progress == 100: sys.stdout.write('\n') if len(sys.argv) != 2: print 'Usage: %s <outfile>' % sys.argv[0] sys.exit(0) filename = sys.argv[1] duration = 5 scene = _get_scene(duration) app = QtGui.QGuiApplication(sys.argv) exporter = Exporter() exporter.progressed.connect(print_progress) exporter.export(scene, filename, 320, 240, duration, 60) if __name__ == '__main__': test_export()
Python
0
ab9800183b3ab229782016aa3f88e6825467d01b
Add forgot username tests
api/radar_api/tests/test_forgot_username.py
api/radar_api/tests/test_forgot_username.py
def test_forgot_username(app): client = app.test_client() response = client.post('/forgot-username', data={ 'email': 'foo@example.org' }) assert response.status_code == 200 def test_email_missing(app): client = app.test_client() response = client.post('/forgot-username', data={}) assert response.status_code == 422 def test_user_not_found(app): client = app.test_client() response = client.post('/forgot-username', data={ 'email': '404@example.org' }) assert response.status_code == 422
Python
0
9b9fb4df30a8183c4de9f157200c5ff225d11d67
Add the plot script
src/scripts/prepare_gnuplot.py
src/scripts/prepare_gnuplot.py
#!/usr/bin/python import sys import argparse import csv from string import Template parser = argparse.ArgumentParser(description='Prepare gnuplot script from the supplied data files.') parser.add_argument('files', nargs='+', help='The data files.') MIN_Y_RANGE = 0.000001 GNUPLOT_SCRIPT_TEMPLATE = Template(""" reset set terminal lua tikz latex set output "plot.tex" #set title "tau_m" set style data lines set key left top set logscale y #set tics axis #shrink = 0.1 set xrange[0:100] set yrange[${lower_y_range}:${upper_y_range}] #set xtics shrink/2 #set ytics shrink/2 #set size square set xlabel "\\\\% of queries" set ylabel "time in seconds" plot ${plot_cmd} ${data} pause -1 """) if __name__ == "__main__": args = parser.parse_args() plot_cmd = "" data_string = "" min_data = sys.float_info.max max_data = sys.float_info.min for data_file in args.files: plot_cmd += """'-' title "%s", """ % data_file with open(data_file) as fd: reader = csv.reader(fd, delimiter=',', quotechar='"') header = reader.next() # print header # time_index = header.index('time') time_index = 0 for h in header: if h.find('time') >= 0: break time_index += 1 data = [] for line in reader: data.append(float(line[time_index])/1000) data.sort() if data[0] < min_data: min_data = data[0] if data[len(data) - 1] > max_data: max_data = data[len(data) - 1] step = 100.0/len(data) x = step for d in data: data_string += "%f\t%f\n" % (x, d) x += step data_string += "e\n" pass min_data = max(min_data, MIN_Y_RANGE) print GNUPLOT_SCRIPT_TEMPLATE.substitute(plot_cmd=plot_cmd, data=data_string, lower_y_range=min_data, upper_y_range=max_data) pass
Python
0.999689
6f1729a96e1fe0f4e8d00813b6c2829519cdc15f
Handle the case where we encounter a snap shot correctly.
nova/tests/test_image_utils.py
nova/tests/test_image_utils.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (C) 2012 Yahoo! Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import test from nova import utils from nova.virt import images class ImageUtilsTestCase(test.TestCase): def test_qemu_info(self): path = "disk.config" example_output = """image: disk.config file format: raw virtual size: 64M (67108864 bytes) cluster_size: 65536 disk size: 96K blah BLAH: bb """ self.mox.StubOutWithMock(utils, 'execute') utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path).AndReturn((example_output, '')) self.mox.ReplayAll() image_info = images.qemu_img_info(path) self.assertEquals('disk.config', image_info['image']) self.assertEquals('raw', image_info['file format']) self.assertEquals('64M (67108864 bytes)', image_info['virtual size']) self.assertEquals('96K', image_info['disk size']) self.assertEquals('bb', image_info['blah blah']) self.assertEquals("65536", image_info['cluster_size']) def test_qemu_info_snap(self): path = "disk.config" example_output = """image: disk.config file format: raw virtual size: 64M (67108864 bytes) cluster_size: 65536 disk size: 96K Snapshot list: ID TAG VM SIZE DATE VM CLOCK 1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000 """ self.mox.StubOutWithMock(utils, 'execute') utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path).AndReturn((example_output, '')) self.mox.ReplayAll() image_info = images.qemu_img_info(path) self.assertEquals('disk.config', image_info['image']) self.assertEquals('raw', image_info['file format']) self.assertEquals('64M (67108864 bytes)', image_info['virtual size']) self.assertEquals('96K', image_info['disk size']) self.assertEquals("65536", image_info['cluster_size']) # This would be triggered if the split encountered this section self.assertNotIn('snapshot list', image_info) bad_cap = '1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10' self.assertNotIn(bad_cap, image_info)
Python
0.000006
d08f9cd114329a3ea66f84421b5abbfcf73c1f69
Add timeout test
odo/backends/tests/test_url.py
odo/backends/tests/test_url.py
from __future__ import print_function import pytest from functools import partial import codecs import os from odo import odo, resource, URL, discover, CSV, TextFile, convert from odo.backends.url import sample from odo.temp import _Temp, Temp from odo.utils import tmpfile, raises import datashape try: from urllib2 import urlopen from urllib2 import HTTPError, URLError except ImportError: from urllib.request import urlopen from urllib.error import HTTPError, URLError pytestmark = pytest.mark.skipif(raises(URLError, partial(urlopen, "http://google.com")), reason='unable to connect to google.com') iris_url = ('https://raw.githubusercontent.com/' 'blaze/blaze/master/blaze/examples/data/iris.csv') ftp_url = "ftp://athena-dist.mit.edu/pub/XNeXT/README.txt" def test_url_resource(): csv = resource(iris_url) assert isinstance(csv, URL(CSV)) def test_sample_different_line_counts(): with sample(resource(iris_url), lines=10) as fn: with open(fn, 'r') as f: assert len(list(f)) == 10 with sample(resource(iris_url), lines=5) as fn: with open(fn, 'r') as f: assert len(list(f)) == 5 def test_sample_different_encoding(): encoding = 'latin-1' lines = 10 with sample(resource(iris_url), lines=lines, encoding=encoding) as fn: with codecs.open(fn, 'r', encoding=encoding) as f: assert len(list(f)) == lines @pytest.mark.xfail(raises=HTTPError) def test_failed_url(): failed_url = "http://foo.com/myfile.csv" with tmpfile('.csv') as fn: odo(failed_url, fn) def test_url_discover(): csv = resource(iris_url) assert isinstance(discover(csv), datashape.DataShape) def test_url_to_local_csv(): with tmpfile('.csv') as fn: csv = odo(iris_url, fn) path = os.path.abspath(csv.path) assert os.path.exists(path) def test_url_txt_resource(): txt = resource(ftp_url) assert isinstance(txt, URL(TextFile)) @pytest.mark.xfail( raises=URLError, reason='MIT Athena FTP is down as of October 23, 2015' ) def test_ftp_to_local_txt(): with tmpfile('.txt') as fn: txt = odo(ftp_url, fn, timeout=5) path = os.path.abspath(txt.path) assert os.path.exists(path) def test_convert(): url_csv = resource(iris_url) t_csv = convert(Temp(CSV), url_csv) assert discover(url_csv) == discover(t_csv) assert isinstance(t_csv, _Temp) @pytest.mark.skipif(os.environ.get('HDFS_TEST_HOST') is None, reason='No HDFS_TEST_HOST envar defined') def test_url_to_hdfs(): from .test_hdfs import tmpfile_hdfs, hdfs, HDFS with tmpfile_hdfs() as target: # build temp csv for assertion check url_csv = resource(iris_url) csv = convert(Temp(CSV), url_csv) # test against url scsv = HDFS(CSV)(target, hdfs=hdfs) odo(iris_url, scsv) assert discover(scsv) == discover(csv)
from __future__ import print_function import pytest from functools import partial import codecs import os from odo import odo, resource, URL, discover, CSV, TextFile, convert from odo.backends.url import sample from odo.temp import _Temp, Temp from odo.utils import tmpfile, raises import datashape try: from urllib2 import urlopen from urllib2 import HTTPError, URLError except ImportError: from urllib.request import urlopen from urllib.error import HTTPError, URLError pytestmark = pytest.mark.skipif(raises(URLError, partial(urlopen, "http://google.com")), reason='unable to connect to google.com') iris_url = ('https://raw.githubusercontent.com/' 'blaze/blaze/master/blaze/examples/data/iris.csv') ftp_url = "ftp://athena-dist.mit.edu/pub/XNeXT/README.txt" def test_url_resource(): csv = resource(iris_url) assert isinstance(csv, URL(CSV)) def test_sample_different_line_counts(): with sample(resource(iris_url), lines=10) as fn: with open(fn, 'r') as f: assert len(list(f)) == 10 with sample(resource(iris_url), lines=5) as fn: with open(fn, 'r') as f: assert len(list(f)) == 5 def test_sample_different_encoding(): encoding = 'latin-1' lines = 10 with sample(resource(iris_url), lines=lines, encoding=encoding) as fn: with codecs.open(fn, 'r', encoding=encoding) as f: assert len(list(f)) == lines @pytest.mark.xfail(raises=HTTPError) def test_failed_url(): failed_url = "http://foo.com/myfile.csv" with tmpfile('.csv') as fn: odo(failed_url, fn) def test_url_discover(): csv = resource(iris_url) assert isinstance(discover(csv), datashape.DataShape) def test_url_to_local_csv(): with tmpfile('.csv') as fn: csv = odo(iris_url, fn) path = os.path.abspath(csv.path) assert os.path.exists(path) def test_url_txt_resource(): txt = resource(ftp_url) assert isinstance(txt, URL(TextFile)) @pytest.mark.xfail( raises=URLError, reason='MIT Athena FTP is down as of October 23, 2015' ) def test_ftp_to_local_txt(): with tmpfile('.txt') as fn: txt = odo(ftp_url, fn) path = os.path.abspath(txt.path) assert os.path.exists(path) def test_convert(): url_csv = resource(iris_url) t_csv = convert(Temp(CSV), url_csv) assert discover(url_csv) == discover(t_csv) assert isinstance(t_csv, _Temp) @pytest.mark.skipif(os.environ.get('HDFS_TEST_HOST') is None, reason='No HDFS_TEST_HOST envar defined') def test_url_to_hdfs(): from .test_hdfs import tmpfile_hdfs, hdfs, HDFS with tmpfile_hdfs() as target: # build temp csv for assertion check url_csv = resource(iris_url) csv = convert(Temp(CSV), url_csv) # test against url scsv = HDFS(CSV)(target, hdfs=hdfs) odo(iris_url, scsv) assert discover(scsv) == discover(csv)
Python
0.000004
27a3ca8d746890c7404845d18b8031763ec6b6a7
add netcat-nonblock.py
python/netcat-nonblock.py
python/netcat-nonblock.py
#!/usr/bin/python import errno import fcntl import os import select import socket import sys def setNonBlocking(fd): flags = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) def nonBlockingWrite(fd, data): try: nw = os.write(fd, data) return nw except OSError as e: if e.errno == errno.EWOULDBLOCK: return -1 def relay(sock): socketEvents = select.POLLIN poll = select.poll() poll.register(sock, socketEvents) poll.register(sys.stdin, select.POLLIN) setNonBlocking(sock) # setNonBlocking(sys.stdin) # setNonBlocking(sys.stdout) done = False stdoutOutputBuffer = '' socketOutputBuffer = '' while not done: events = poll.poll(10000) # 10 seconds for fileno, event in events: if event & select.POLLIN: if fileno == sock.fileno(): data = sock.recv(8192) if data: nw = sys.stdout.write(data) # stdout does support non-blocking write, though else: done = True else: assert fileno == sys.stdin.fileno() data = os.read(fileno, 8192) if data: assert len(socketOutputBuffer) == 0 nw = nonBlockingWrite(sock.fileno(), data) if nw < len(data): if nw < 0: nw = 0 socketOutputBuffer = data[nw:] socketEvents |= select.POLLOUT poll.register(sock, socketEvents) poll.unregister(sys.stdin) else: sock.shutdown(socket.SHUT_WR) poll.unregister(sys.stdin) if event & select.POLLOUT: if fileno == sock.fileno(): assert len(socketOutputBuffer) > 0 nw = nonBlockingWrite(sock.fileno(), data) if nw < len(data): assert nw > 0 socketOutputBuffer = socketOutputBuffer[nw:] else: socketOutputBuffer = '' socketEvents &= ~select.POLLOUT poll.register(sock, socketEvents) poll.register(sys.stdin, select.POLLIN) def main(argv): if len(argv) < 3: binary = argv[0] print "Usage:\n %s -l port\n %s host port" % (argv[0], argv[0]) print (sys.stdout.write) return port = int(argv[2]) if argv[1] == "-l": # server server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_socket.bind(('', port)) server_socket.listen(5) (client_socket, client_address) = server_socket.accept() server_socket.close() relay(client_socket) else: # client sock = socket.create_connection((argv[1], port)) relay(sock) if __name__ == "__main__": main(sys.argv)
Python
0.000001
34fd215d73d87c017cdae299aebd6484e6541991
Revert 176254 > Android: upgrade sandbox_linux_unitests to a stable test > > > BUG=166704 > NOTRY=true > > Review URL: https://chromiumcodereview.appspot.com/11783106
build/android/pylib/gtest/gtest_config.py
build/android/pylib/gtest/gtest_config.py
# Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Configuration file for android gtest suites.""" # Add new suites here before upgrading them to the stable list below. EXPERIMENTAL_TEST_SUITES = [ 'sandbox_linux_unittests', ] # Do not modify this list without approval of an android owner. # This list determines which suites are run by default, both for local # testing and on android trybots running on commit-queue. STABLE_TEST_SUITES = [ 'base_unittests', 'cc_unittests', 'content_unittests', 'gpu_unittests', 'ipc_tests', 'media_unittests', 'net_unittests', 'sql_unittests', 'sync_unit_tests', 'ui_unittests', 'unit_tests', 'webkit_compositor_bindings_unittests', 'android_webview_unittests', ]
# Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Configuration file for android gtest suites.""" # Add new suites here before upgrading them to the stable list below. EXPERIMENTAL_TEST_SUITES = [ ] # Do not modify this list without approval of an android owner. # This list determines which suites are run by default, both for local # testing and on android trybots running on commit-queue. STABLE_TEST_SUITES = [ 'base_unittests', 'cc_unittests', 'content_unittests', 'gpu_unittests', 'ipc_tests', 'media_unittests', 'net_unittests', 'sandbox_linux_unittests', 'sql_unittests', 'sync_unit_tests', 'ui_unittests', 'unit_tests', 'webkit_compositor_bindings_unittests', 'android_webview_unittests', ]
Python
0
4783f68cb2fc3eb25b611b4662d1ca3a2fddd7b3
Fix issue #51: added throttled_ftpd.py script in the demo directory.
demo/throttled_ftpd.py
demo/throttled_ftpd.py
#!/usr/bin/env python # throttled_ftpd.py """ftpd supporting bandwidth throttling capabilities for data transfer. """ import os import time import asyncore from pyftpdlib import ftpserver class ThrottledDTPHandler(ftpserver.DTPHandler): """A DTPHandler which wraps sending and receiving in a data counter and sleep loop so that you burst to no more than x Kb/sec average. """ # maximum number of bytes to transmit in a second (0 == no limit) read_limit = 0 write_limit = 0 # smaller the buffers, the less bursty and smoother the throughput ac_in_buffer_size = 2048 ac_out_buffer_size = 2048 def __init__(self, sock_obj, cmd_channel): ftpserver.DTPHandler.__init__(self, sock_obj, cmd_channel) self.timenext = 0 self.datacount = 0 self.sleep = None # --- overridden asyncore methods def readable(self): return self.receive and not self.sleeping() def writable(self): return (self.producer_fifo or (not self.connected)) and not \ self.sleeping() def recv(self, buffer_size): chunk = asyncore.dispatcher.recv(self, buffer_size) if self.read_limit: self.throttle_bandwidth(len(chunk), self.read_limit) return chunk def send(self, data): num_sent = asyncore.dispatcher.send(self, data) if self.write_limit: self.throttle_bandwidth(num_sent, self.write_limit) return num_sent # --- new methods def sleeping(self): """Return True if the channel is temporary blocked.""" if self.sleep: if time.time() >= self.sleep: self.sleep = None else: return True return False def throttle_bandwidth(self, len_chunk, max_speed): """A method which counts data transmitted so that you burst to no more than x Kb/sec average.""" self.datacount += len_chunk if self.datacount >= max_speed: self.datacount = 0 now = time.time() sleepfor = self.timenext - now if sleepfor > 0: # we've passed bandwidth limits self.sleep = now + (sleepfor * 2) self.timenext = now + 1 if __name__ == '__main__': authorizer = ftpserver.DummyAuthorizer() authorizer.add_user('user', '12345', os.getcwd(), perm=('r', 'w')) # use the modified DTPHandler class; set a speed # limit for both sending and receiving dtp_handler = ThrottledDTPHandler dtp_handler.read_limit = 30072 # 30 Kb/sec (30 * 1024) dtp_handler.write_limit = 30072 # 30 Kb/sec (30 * 1024) ftp_handler = ftpserver.FTPHandler ftp_handler.authorizer = authorizer # have the ftp handler use the different dtp handler ftp_handler.dtp_handler = dtp_handler ftpd = ftpserver.FTPServer(('127.0.0.1', 21), ftp_handler) ftpd.serve_forever()
Python
0
732fd24d06f49570c24016b7adfb3ad511e2e6af
Add test for ValidationResultIdentifier.to_tuple()
tests/data_context/test_data_context_resource_identifiers.py
tests/data_context/test_data_context_resource_identifiers.py
from great_expectations.data_context.types.resource_identifiers import ( ValidationResultIdentifier ) def test_ValidationResultIdentifier_to_tuple(expectation_suite_identifier): validation_result_identifier = ValidationResultIdentifier( expectation_suite_identifier, "my_run_id", "my_batch_identifier" ) assert validation_result_identifier.to_tuple() == ( "my", "expectation", "suite", "name", "my_run_id", "my_batch_identifier" ) assert validation_result_identifier.to_fixed_length_tuple() == ( "my.expectation.suite.name", "my_run_id", "my_batch_identifier" ) validation_result_identifier_no_run_id = ValidationResultIdentifier( expectation_suite_identifier, None, "my_batch_identifier" ) assert validation_result_identifier_no_run_id.to_tuple() == ( "my", "expectation", "suite", "name", "__none__", "my_batch_identifier" ) assert validation_result_identifier_no_run_id.to_fixed_length_tuple() == ( "my.expectation.suite.name", "__none__", "my_batch_identifier" ) validation_result_identifier_no_batch_identifier = ValidationResultIdentifier( expectation_suite_identifier, "my_run_id", None ) assert validation_result_identifier_no_batch_identifier.to_tuple() == ( "my", "expectation", "suite", "name", "my_run_id", "__none__" ) assert validation_result_identifier_no_batch_identifier.to_fixed_length_tuple() == ( "my.expectation.suite.name", "my_run_id", "__none__" ) validation_result_identifier_no_run_id_no_batch_identifier = ValidationResultIdentifier( expectation_suite_identifier, None, None ) assert validation_result_identifier_no_run_id_no_batch_identifier.to_tuple() == ( "my", "expectation", "suite", "name", "__none__", "__none__" ) assert validation_result_identifier_no_run_id_no_batch_identifier.to_fixed_length_tuple() == ( "my.expectation.suite.name", "__none__", "__none__" )
Python
0.000004
b3ef51e93b090451718ed4c1240b63b8e99cd085
rename example
miepython/02_glass.py
miepython/02_glass.py
#!/usr/bin/env python3 """ Plot the scattering efficiency as a function of wavelength for 4micron glass spheres """ import numpy as np import matplotlib.pyplot as plt import miepython num = 100 radius = 2 # in microns lam = np.linspace(0.2,1.2,num) # also in microns x = 2*np.pi*radius/lam # from https://refractiveindex.info/?shelf=glass&book=BK7&page=SCHOTT m=np.sqrt(1+1.03961212/(1-0.00600069867/lam**2)+0.231792344/(1-0.0200179144/lam**2)+1.01046945/(1-103.560653/lam**2)) qqsca = np.zeros(num) for i in range(num) : qext, qsca, qabs, qback, g = miepython.mie(m[i],x[i]) qqsca[i]=qsca plt.plot(lam*1000,qqsca) plt.title("BK7 glass spheres 4 micron diameter") plt.xlabel("Wavelength (nm)") plt.ylabel("Scattering Efficiency (-)") plt.show()
Python
0.000515
d1fe5a06f5e082fd8196f510e2eba7daa3468ef8
Add duplicate_nodes.py file
duplicate_nodes.py
duplicate_nodes.py
from shutil import copytree, ignore_patterns import glob import os import sys if __name__ == '__main__': data_dir = './parsedData/' use_symlink = True orig_nodes = os.listdir(data_dir) orig_nodes = [os.path.basename(i) for i in glob.glob(os.path.join(data_dir, '1*'))] for dup_cnt in range(100): for orig_node in orig_nodes: src = os.path.join(data_dir, orig_node) dst = os.path.join(data_dir, 'd%s_%04d' % (orig_node, dup_cnt)) if use_symlink: src = os.path.relpath(src, data_dir) os.symlink(src, dst) else: copytree(src, dst)
Python
0.000003
77f812f76966b90c27131fd65968f548afcdcace
Add loader for basic csv layers without geoms
svir/dialogs/load_basic_csv_as_layer_dialog.py
svir/dialogs/load_basic_csv_as_layer_dialog.py
# -*- coding: utf-8 -*- # /*************************************************************************** # Irmt # A QGIS plugin # OpenQuake Integrated Risk Modelling Toolkit # ------------------- # begin : 2013-10-24 # copyright : (C) 2018 by GEM Foundation # email : devops@openquake.org # ***************************************************************************/ # # OpenQuake is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenQuake is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with OpenQuake. If not, see <http://www.gnu.org/licenses/>. import os import tempfile from svir.utilities.utils import import_layer_from_csv from svir.utilities.shared import OQ_BASIC_CSV_TO_LAYER_TYPES from svir.dialogs.load_output_as_layer_dialog import LoadOutputAsLayerDialog class LoadBasicCsvAsLayerDialog(LoadOutputAsLayerDialog): """ Modal dialog to load as layer a basic csv with no geometries, to be browsed through its attribute table """ def __init__(self, iface, viewer_dock, session, hostname, calc_id, output_type, path=None, mode=None): assert output_type in OQ_BASIC_CSV_TO_LAYER_TYPES, output_type LoadOutputAsLayerDialog.__init__( self, iface, viewer_dock, session, hostname, calc_id, output_type, path, mode) self.create_file_size_indicator() self.setWindowTitle('Load %s from CSV, as layer' % output_type) self.populate_out_dep_widgets() self.adjustSize() self.set_ok_button() def set_ok_button(self): self.ok_button.setEnabled(bool(self.path)) def populate_out_dep_widgets(self): self.show_file_size() def load_from_csv(self): if self.mode == 'testing': dest_shp = tempfile.mkstemp(suffix='.shp')[1] else: dest_shp = None # the destination file will be selected via GUI csv_path = self.path_le.text() # extract the name of the csv file and remove the extension layer_name = os.path.splitext(os.path.basename(csv_path))[0] self.layer = import_layer_from_csv( self, csv_path, layer_name, self.iface, save_as_shp=False, dest_shp=dest_shp, zoom_to_layer=False, has_geom=False)
Python
0
f2a359664bf69a6c8e883d460a49c986b511b80e
add file
eptools/gspread.py
eptools/gspread.py
""" Functions to access the data in google drive spreadsheets """ import pandas as pd from docstamp.gdrive import (get_spreadsheet, worksheet_to_dict) def get_ws_data(api_key_file, doc_key, ws_tab_idx, header=None, start_row=1): """ Return the content of the spreadsheet in the ws_tab_idx tab of the spreadsheet with doc_key as a pandas DataFrame. Parameters ---------- api_key_file: str Path to the Google API key json file. doc_key: str ws_tab_idx: int Index of the worksheet within the spreadsheet. header: List[str] List of values to assign to the header of the result. start_row: int Row index from where to start collecting the data. Returns ------- content: pandas.DataFrame """ spread = get_spreadsheet(api_key_file, doc_key) ws = spread.get_worksheet(ws_tab_idx) ws_dict = worksheet_to_dict(ws, header=header, start_row=start_row) return pd.DataFrame(ws_dict) def find_one_row(substr, df, col_name): """ Return one row from `df`. The returned row has in `col_name` column a value with a sub-string as `substr. Raise KeyError if no row is found. """ for name in df[col_name]: if substr.lower() in name.lower(): return df[df[col_name] == name] raise KeyError('Could not find {} in the ' 'pandas dataframe.'.format(substr))
Python
0
e8d05226f2a8cabf0f38bae6c2e218bd81efa6a1
Add a utility script for encoding packet traces
util/encode_packet_trace.py
util/encode_packet_trace.py
#!/usr/bin/env python # Copyright (c) 2013 ARM Limited # All rights reserved # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Copyright 2008 Google Inc. All rights reserved. # http://code.google.com/p/protobuf/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Andreas Hansson # # This script is used to migrate ASCII packet traces to the protobuf # format currently used in gem5. It assumes that protoc has been # executed and already generated the Python package for the packet # messages. This can be done manually using: # protoc --python_out=. --proto_path=src/proto src/proto/packet.proto # # The ASCII trace format uses one line per request on the format cmd, # addr, size, tick. For example: # r,128,64,4000 # w,232123,64,500000 # This trace reads 64 bytes from decimal address 128 at tick 4000, # then writes 64 bytes to address 232123 at tick 500000. # # This script can of course also be used as a template to convert # other trace formats into the gem5 protobuf format import struct import sys import packet_pb2 def EncodeVarint(out_file, value): """ The encoding of the Varint32 is copied from google.protobuf.internal.encoder and is only repeated here to avoid depending on the internal functions in the library. """ bits = value & 0x7f value >>= 7 while value: out_file.write(struct.pack('<B', 0x80|bits)) bits = value & 0x7f value >>= 7 out_file.write(struct.pack('<B', bits)) def encodeMessage(out_file, message): """ Encoded a message with the length prepended as a 32-bit varint. """ out = message.SerializeToString() EncodeVarint(out_file, len(out)) out_file.write(out) def main(): if len(sys.argv) != 3: print "Usage: ", sys.argv[0], " <ASCII input> <protobuf output>" exit(-1) try: ascii_in = open(sys.argv[1], 'r') except IOError: print "Failed to open ", sys.argv[1], " for reading" exit(-1) try: proto_out = open(sys.argv[2], 'wb') except IOError: print "Failed to open ", sys.argv[2], " for writing" exit(-1) # Write the magic number in 4-byte Little Endian, similar to what # is done in src/proto/protoio.cc proto_out.write("gem5") # Add the packet header header = packet_pb2.PacketHeader() header.obj_id = "Converted ASCII trace " + sys.argv[1] # Assume the default tick rate header.tick_freq = 1000000000 encodeMessage(proto_out, header) # For each line in the ASCII trace, create a packet message and # write it to the encoded output for line in ascii_in: cmd, addr, size, tick = line.split(',') packet = packet_pb2.Packet() packet.tick = long(tick) # ReadReq is 1 and WriteReq is 4 in src/mem/packet.hh Command enum packet.cmd = 1 if cmd == 'r' else 4 packet.addr = long(addr) packet.size = int(size) encodeMessage(proto_out, packet) # We're done ascii_in.close() proto_out.close() if __name__ == "__main__": main()
Python
0
04876b4bea96f983c722cb9bf7845c7cc3b0ecef
add oauth2 example
examples/oauth2.py
examples/oauth2.py
from imap_tools import MailBox # Authenticate to account using OAuth 2.0 mechanism with MailBox('imap.my.ru').xoauth2('user', 'token123', 'INBOX') as mailbox: for msg in mailbox.fetch(): print(msg.date_str, msg.subject)
Python
0