commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
458cf526a4ebb72b4fad84e8cd2b665e0f093c1b | Add functional test for cluster check recover | senlin/tests/functional/test_cluster_health.py | senlin/tests/functional/test_cluster_health.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.tests.functional import api as test_api
from senlin.tests.functional import base
from senlin.tests.functional.utils import test_utils
class TestClusterHealth(base.SenlinFunctionalTest):
def setUp(self):
super(TestClusterHealth, self).setUp()
# Create profile
self.profile = test_api.create_profile(
self.client, test_utils.random_name('profile'),
test_utils.spec_nova_server)
def tearDown(self):
# Delete profile
test_api.delete_profile(self.client, self.profile['id'])
super(TestClusterHealth, self).tearDown()
def test_cluster_check_recover(self):
# Create cluster
desired_capacity = 3
min_size = 2
max_size = 5
cluster = test_api.create_cluster(self.client,
test_utils.random_name('cluster'),
self.profile['id'], desired_capacity,
min_size, max_size)
cluster = test_utils.wait_for_status(test_api.get_cluster, self.client,
cluster['id'], 'ACTIVE')
# Check cluster health status
action_id = test_api.action_cluster(self.client, cluster['id'],
'check')
test_utils.wait_for_status(test_api.get_action, self.client,
action_id, 'SUCCEEDED')
cluster = test_api.get_cluster(self.client, cluster['id'])
self.assertEqual('ACTIVE', cluster['status'])
# Perform cluster recovering operation
action_id = test_api.action_cluster(self.client, cluster['id'],
'recover')
test_utils.wait_for_status(test_api.get_action, self.client,
action_id, 'SUCCEEDED')
action_id = test_api.action_cluster(self.client, cluster['id'],
'recover',
{'operation': 'REBUILD'})
test_utils.wait_for_status(test_api.get_action, self.client,
action_id, 'SUCCEEDED')
# Delete cluster
test_api.delete_cluster(self.client, cluster['id'])
cluster = test_utils.wait_for_delete(test_api.get_cluster, self.client,
cluster['id'])
| Python | 0.000009 | |
48c008b4ac08114e30f4bee7a208d5d3fb925296 | Add partial simple greedy algorithm (baseline). | problem1/steiner-simplegreedy.py | problem1/steiner-simplegreedy.py | import networkx as nx
from sys import argv
def main():
# G = nx.read_gml(argv[1])
G = nx.read_gml("steiner-small.gml")
T = [] # terminals
for v,d in G.nodes_iter(data=True):
if d['T'] == 1:
T.append(v)
U = T[:] # Steiner tree vertices
F = [] # Steiner tree edges
D = [] # candidate edge set
for u in T:
u_incident = G.edges(u)
for i in u_incident:
D.append(i)
UF = nx.Graph()
UF.add_nodes_from(T)
while not nx.is_connected(UF):
if len(D) == 0:
print("Not sufficiently connected")
return None
min_f = float("inf")
for f_i in D:
f_cost = G.edge[f_i[0]][f_i[1]]['c']
if f_cost < min_f:
min_f = f_cost
f = f_i
UF_f = UF.copy()
UF_f.add_edge(f[0], f[1])
if nx.has_no_cycles(UF_f):
pass
#F.append(f)
#U.append(f[0])
#U.append(f[1])
#D.append(f.incident)
#D.remove(f)
return UF
if __name__ == '__main__':
UF = main()
print("UF nodes:",UF.nodes())
print("UF edges:",UF.edges())
| Python | 0.000001 | |
fca390e7dd0d806cd87fa3570ce23ad132d8c852 | add new example | examples/lineWithFocusChart.py | examples/lineWithFocusChart.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Examples for Python-nvd3 is a Python wrapper for NVD3 graph library.
NVD3 is an attempt to build re-usable charts and chart components
for d3.js without taking away the power that d3.js gives you.
Project location : https://github.com/areski/python-nvd3
"""
from nvd3 import lineWithFocusChart
import random
import datetime
import time
start_time = int(time.mktime(datetime.datetime(2012, 6, 1).timetuple()) * 1000)
nb_element = 100
#Open File for test
output_file = open('test_lineWithFocusChart.html', 'w')
#---------------------------------------
type = "lineWithFocusChart"
chart = lineWithFocusChart(name=type, color_category='category20b', date=True)
chart.set_containerheader("\n\n<h2>" + type + "</h2>\n\n")
xdata = range(nb_element)
xdata = map(lambda x: start_time + x * 1000000000, xdata)
ydata = [i + random.randint(-10, 10) for i in range(nb_element)]
ydata2 = map(lambda x: x * 2, ydata)
ydata3 = map(lambda x: x * 3, ydata)
ydata4 = map(lambda x: x * 4, ydata)
extra_serie = {"tooltip": {"y_start": "There is ", "y_end": " calls"}}
#extra_serie = None
chart.add_serie(name="serie 1", y=ydata, x=xdata, extra=extra_serie)
chart.add_serie(name="serie 2", y=ydata2, x=xdata, extra=extra_serie)
chart.add_serie(name="serie 3", y=ydata3, x=xdata, extra=extra_serie)
chart.add_serie(name="serie 4", y=ydata4, x=xdata, extra=extra_serie)
chart.buildhtml()
output_file.write(chart.htmlcontent)
#close Html file
output_file.close()
| Python | 0.000002 | |
0114173d508298d6e9f72fd7f344d9123e4a7e59 | Create wtospark.py | sparkgw/wtospark.py | sparkgw/wtospark.py | from flask import Flask, request, abort
import json
import urllib2
app = Flask(__name__)
#Secret provided by
# fbabottemp99
# MmQ3YTA0MGUtNGI1Zi00MTI3LTlmZTMtMjQxNGJhYmRjMTI0MzI2ZDFlYWYtYzhh
# curl -X POST -H "X-Device-Secret: 12345" http://localhost:8080/report?temp=32
YOUR_DEVICE_SECRET = "12345"
YOUR_BOT_TOKEN = ""
YOUR_ROOM_ID = ""
@app.route('/report', methods =['POST'])
def inputArduino():
headers = request.headers
temperature = request.args.get('temp')
incoming_secret = headers.get('X-Device-Secret')
if temperature is None:
abort(401)
if incoming_secret is None:
abort(401)
elif YOUR_DEVICE_SECRET == incoming_secret:
# we dont use it but for illustration
json_file = request.json
toSpark('**Temperature:** '+temperature)
return 'Ok'
else:
print "Spoofed Hook"
abort(401)
# POST Function that sends the commits & comments in markdown to a Spark room
def toSpark(commits):
url = 'https://api.ciscospark.com/v1/messages'
headers = {'accept':'application/json','Content-Type':'application/json','Authorization': 'Bearer ' + YOUR_BOT_TOKEN}
values = {'roomId': YOUR_ROOM_ID, 'markdown': commits }
data = json.dumps(values)
req = urllib2.Request(url = url , data = data , headers = headers)
response = urllib2.urlopen(req)
the_page = response.read()
return the_page
if __name__ == '__main__':
app.run(host='0.0.0.0' , port=9090, debug=True)
| Python | 0 | |
5432dd2ee2e1d20494d0b4cf8d816b298e70067c | Add test script. | protogeni/test/ma/lookup_keys.py | protogeni/test/ma/lookup_keys.py | #! /usr/bin/env python
#
# Copyright (c) 2012-2014 University of Utah and the Flux Group.
#
# {{{GENIPUBLIC-LICENSE
#
# GENI Public License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#
# }}}
#
import sys
import pwd
import getopt
import os
import re
import xmlrpclib
from M2Crypto import X509
def Usage():
print "usage: " + sys.argv[ 0 ] + " [option...] <public|private|identifying> <user_urn [...]>"
print """Options:
-d, --debug be verbose about XML methods invoked
-h, --help show options and usage
-r file, --read-commands=file specify additional configuration file"""
execfile( "test-common.py" )
authority = "geni-ma"
callargs = [
[{
'geni_type': 'geni_sfa',
'geni_version': '3',
'geni_value': get_self_credential()}],
{
}
]
try:
response = do_method(authority, "lookup_keys",
callargs,
response_handler=geni_am_response_handler)
print response
except xmlrpclib.Fault, e:
Fatal("Could not obtain keys: %s" % (str(e)))
| Python | 0 | |
d1edac38e3402ebe03f96597500c3d39e49f299d | add run_pylint.py | run_pylint.py | run_pylint.py | #!/usr/bin/python
#
# wrapper script for pylint which just shows the errors and changes the return value if there's problems
# (enforcing a minscore and/or maxerrors - defaults to perfection)
#
import sys, re, subprocess, os
MINSCORE = 10.0
MAXERRORS = 0
command = 'pylint --rcfile=pylintrc --disable=W0511,W9911,W9913 `find webui python_saml libs -name "*py"`'
# unbuffer *both* me and the pylint subprocess!
os.environ['PYTHONUNBUFFERED'] = '1'
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, universal_newlines=True)
num_errors = 0
score = 0
while True:
line = p.stdout.readline().strip()
if line is None:
break
match = re.search(r'^.+?:[0-9]+: \[.[0-9]+.+?\] ', line)
if match:
print line
num_errors += 1
continue
match = re.search(r'Your code has been rated at ([0-9.-]+)', line)
if match:
score = float(match.group(1))
break
if score < MINSCORE:
print "scored %.2f which is less than %.2f - aborting" % (score, MINSCORE)
sys.exit(3)
if num_errors < MAXERRORS:
print "%d errors which is more than %d - aborting" % (num_errors, MAXERRORS)
sys.exit(4)
| Python | 0.000011 | |
a96c25cf46cd82716b397ba61c2b67acb8b7c2d7 | Add code reading. | micro.py | micro.py | #!/usr/bin/env python
from sys import argv
def get_code():
return argv[1]
if __name__ == '__main__':
code = get_code()
print(code)
| Python | 0 | |
084ebff19703c42c50621eb94ac070c6a471e983 | Solve the most wanted letter problem. | Home/mostWantedLetter.py | Home/mostWantedLetter.py | def checkio(word):
word = word.lower()
arr = dict()
for i in range(len(word)):
char = word[i]
if not str.isalpha(char):
continue
if not arr.__contains__(char):
arr[char] = 0
arr[char] = arr[char] + 1
result = ""
counter = 0
for k, v in arr.items():
if counter < v or (ord(k) < ord(result) and counter == v):
result = k
counter = v
return result
if __name__ == '__main__':
assert checkio("Hello World!") == "l", "First"
assert checkio("How do you do?") == "o", "Second"
assert checkio("One") == "e", "Third"
assert checkio("") == "", "Final"
print('All ok')
| Python | 0.999816 | |
d437f494db827c69da7aaec00a5acf1d133e16b2 | Add basic slash command example | examples/app_commands/basic.py | examples/app_commands/basic.py | from typing import Optional
import discord
from discord import app_commands
MY_GUILD = discord.Object(id=0) # replace with your guild id
class MyClient(discord.Client):
def __init__(self, *, intents: discord.Intents, application_id: int):
super().__init__(intents=intents, application_id=application_id)
# A CommandTree is a special type that holds all the application command
# state required to make it work. This is a separate class because it
# allows all the extra state to be opt-in.
# Whenever you want to work with application commands, your tree is used
# to store it and work with it.
# Note: When using commands.Bot instead of discord.Client, the bot will
# maintain its own tree instead.
self.tree = app_commands.CommandTree(self)
# In this basic example, we just synchronize the app commands to one guild.
# Instead of specifying a guild to every command, we copy over our global commands instead.
# By doing so we don't have to wait up to an hour until they are shown to the end-user.
async def setup_hook(self):
# This copies the global commands over to your guild.
self.tree.copy_global_to(guild=MY_GUILD)
await self.tree.sync(guild=MY_GUILD)
intents = discord.Intents.default()
# In order to use a basic synchronization of the app commands in the setup_hook,
# you have replace the 0 with your bots application_id you find in the developer portal.
client = MyClient(intents=intents, application_id=0)
@client.event
async def on_ready():
print(f'Logged in as {client.user} (ID: {client.user.id})')
print('------')
@client.tree.command()
async def hello(interaction: discord.Interaction):
"""Says hello!"""
await interaction.response.send_message(f'Hi, {interaction.user.mention}')
@client.tree.command()
@app_commands.describe(
first_value='The first value you want to add something to',
second_value='The value you want to add to the first value',
)
async def add(interaction: discord.Interaction, first_value: int, second_value: int):
"""Adds two numbers together."""
await interaction.response.send_message(f'{first_value} + {second_value} = {first_value + second_value}')
# To make an argument optional, you can either give it a supported default argument
# or you can mark it as Optional from the typing library. This example does both.
@client.tree.command()
@app_commands.describe(member='The member you want to get the joined date from, defaults to the user who uses the command')
async def joined(interaction: discord.Interaction, member: Optional[discord.Member] = None):
"""Says when a member joined."""
# If no member is explicitly provided then we use the command user here
member = member or interaction.user
await interaction.response.send_message(f'{member} joined in {member.joined_at}')
client.run('token')
| Python | 0.009586 | |
235a7107ca0c6d586edd9f224b9ee9132111a842 | remove debug trace | toflib.py | toflib.py | import random
from datetime import datetime, timedelta
# those commands directly trigger cmd_* actions
_simple_dispatch = set()
# those commands directly trigger confcmd_* actions
_simple_conf_dispatch = set()
def cmd(expected_args):
def deco(func):
name = func.__name__[4:]
_simple_dispatch.add(name)
def f(bot, chan, args):
if(len(args) == expected_args):
return func(bot, chan, args)
f.__doc__ = func.__doc__
return f
return deco
def confcmd(expected_args):
def deco(func):
name = func.__name__[8:]
_simple_conf_dispatch.add(name)
def f(bot, chan, args):
if(len(args) == expected_args):
return func(bot, chan, args)
f.__doc__ = func.__doc__
return f
return deco
def distance(string1, string2):
"""
Levenshtein distance
http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#Python
"""
string1 = ' ' + string1
string2 = ' ' + string2
dists = {}
len1 = len(string1)
len2 = len(string2)
for i in range(len1):
dists[i, 0] = i
for j in range (len2):
dists[0, j] = j
for j in range(1, len2):
for i in range(1, len1):
if string1[i] == string2[j]:
dists[i, j] = dists[i-1, j-1]
else:
dists[i, j] = min(dists[i-1, j] + 1,
dists[i, j-1] + 1,
dists[i-1, j-1] + 1
)
return dists[len1-1, len2-1]
class RiddleTeller(object):
"""
A gentleman (and a scholar) who likes to entertain its audience.
"""
def __init__(self, riddle, channel, writeback, max_dist):
self.riddle, self.answer = riddle
self.channel = channel
self.writeback = writeback
self.remaining_msgs = 3
self.writeback(self.riddle)
self.max_dist = max_dist
def wait_answer(self, chan, msg):
"""
Called at each try.
Returns True iff the riddle is over.
"""
if chan != self.channel:
return False
if distance(msg.lower(), self.answer.lower()) < self.max_dist:
self.writeback("10 points pour Griffondor.")
return True
self.remaining_msgs -= 1
if self.remaining_msgs == 0:
self.writeback(self.answer)
return True
return False
class InnocentHand(object):
"""
A cute 6 years old girl, picking a random object
from a given pool of candidates
"""
def __init__(self, pool):
"""
pool: list of candidates
"""
self.pool = pool
def __call__(self, index=None):
if index:
return self.pool[index % len(self.pool)]
random.seed()
return random.choice(self.pool)
class Plugin(object):
def __init__(self, bot):
self.bot = bot
def say(self, msg):
self.bot.msg(self.bot.channels[0], msg)
class CronEvent:
def __init__(self, bot):
self.lastTick = datetime.min
self.period = timedelta(minutes=10)
self.bot = bot
def fire(self):
pass
class Cron:
def __init__(self):
self.events = []
def tick(self):
now = datetime.now ()
for ev in self.events:
if now > ev.lastTick + ev.period:
ev.fire()
ev.lastTick = now
def schedule(self, ev):
self.events.append(ev)
| import random
from datetime import datetime
# those commands directly trigger cmd_* actions
_simple_dispatch = set()
# those commands directly trigger confcmd_* actions
_simple_conf_dispatch = set()
def cmd(expected_args):
def deco(func):
name = func.__name__[4:]
_simple_dispatch.add(name)
def f(bot, chan, args):
if(len(args) == expected_args):
return func(bot, chan, args)
f.__doc__ = func.__doc__
return f
return deco
def confcmd(expected_args):
def deco(func):
name = func.__name__[8:]
_simple_conf_dispatch.add(name)
def f(bot, chan, args):
if(len(args) == expected_args):
return func(bot, chan, args)
f.__doc__ = func.__doc__
return f
return deco
def distance(string1, string2):
"""
Levenshtein distance
http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#Python
"""
string1 = ' ' + string1
string2 = ' ' + string2
dists = {}
len1 = len(string1)
len2 = len(string2)
for i in range(len1):
dists[i, 0] = i
for j in range (len2):
dists[0, j] = j
for j in range(1, len2):
for i in range(1, len1):
if string1[i] == string2[j]:
dists[i, j] = dists[i-1, j-1]
else:
dists[i, j] = min(dists[i-1, j] + 1,
dists[i, j-1] + 1,
dists[i-1, j-1] + 1
)
return dists[len1-1, len2-1]
class RiddleTeller(object):
"""
A gentleman (and a scholar) who likes to entertain its audience.
"""
def __init__(self, riddle, channel, writeback, max_dist):
self.riddle, self.answer = riddle
self.channel = channel
self.writeback = writeback
self.remaining_msgs = 3
self.writeback(self.riddle)
self.max_dist = max_dist
def wait_answer(self, chan, msg):
"""
Called at each try.
Returns True iff the riddle is over.
"""
if chan != self.channel:
return False
if distance(msg.lower(), self.answer.lower()) < self.max_dist:
self.writeback("10 points pour Griffondor.")
return True
self.remaining_msgs -= 1
if self.remaining_msgs == 0:
self.writeback(self.answer)
return True
return False
class InnocentHand(object):
"""
A cute 6 years old girl, picking a random object
from a given pool of candidates
"""
def __init__(self, pool):
"""
pool: list of candidates
"""
self.pool = pool
def __call__(self, index=None):
if index:
return self.pool[index % len(self.pool)]
random.seed()
return random.choice(self.pool)
class Plugin(object):
def __init__(self, bot):
self.bot = bot
def say(self, msg):
self.bot.msg(self.bot.channels[0], msg)
class CronEvent:
def __init__(self, bot):
self.lastTick = datetime.min
self.period = timedelta(minutes=10)
self.bot = bot
def fire(self):
pass
class Cron:
def __init__(self):
self.events = []
def tick(self):
now = datetime.now ()
for ev in self.events:
print ev
if now > ev.lastTick + ev.period:
ev.fire()
ev.lastTick = now
def schedule(self, ev):
self.events.append(ev)
| Python | 0.000004 |
8fb4df5367b5c03d2851532063f6fa781fe2f980 | Add Fibonacci Series Using Recursion | Maths/fibonacciSeries.py | Maths/fibonacciSeries.py | # Fibonacci Sequence Using Recursion
def recur_fibo(n):
if n <= 1:
return n
else:
return(recur_fibo(n-1) + recur_fibo(n-2))
limit = int(input("How many terms to include in fionacci series:"))
if limit <= 0:
print("Plese enter a positive integer")
else:
print("Fibonacci series:")
for i in range(limit):
print(recur_fibo(i))
| Python | 0.000003 | |
5f8e01f976d75eca651e29ebdd379c865aa5bda9 | update merge_two_binary_trees_617 | Python/merge_two_binary_trees_617.py | Python/merge_two_binary_trees_617.py | # Given two binary trees and imagine that when you put one of them to cover the other, some nodes of the two trees are overlapped while the others are not.
# You need to merge them into a new binary tree. The merge rule is that if two nodes overlap, then sum node values up as the new value of the merged node.
# Otherwise, the NOT null node will be used as the node of new tree.
# Example 1:
# Input:
# Tree 1 Tree 2
# 1 2
# / \ / \
# 3 2 1 3
# / \ \
# 5 4 7
# Output:
# Merged tree:
# 3
# / \
# 4 5
# / \ \
# 5 4 7
# Note: The merging process must start from the root nodes of both trees.
# 题解:
# 就是合并两个二叉树,有相同节点的,则相加起来,还放在那个节点,
# 如果一个二叉树的一个节点上有而另一个二叉树的相同节点上没有数据的话就在那个节点上保留有的即可,
# 如此遍历二叉树直至结束
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def mergeTrees(self, t1, t2):
"""
:type t1: TreeNode
:type t2: TreeNode
:rtype: TreeNode
"""
if t1 and t2:
root=TreeNode(t1.val + t2.val)
root.left=self.mergeTrees(t1.left, t2.left)
root.right=self.mergeTrees(t1.right, t2.right)
return root
else:
return t1 or t2
| Python | 0.000004 | |
8c6983656e550ebaf32ff714a3c22be276ba842b | Add ScribdDownloader.py | ScribdDownloader.py | ScribdDownloader.py | #Scribd Downloader
#Adam Knuckey September 2015
print ("Starting Scribd Downloader")
import os
import re
import urllib, urllib2
import threading
from time import sleep
def download(link,destination):
#print link
urllib.urlretrieve(link,destination)
print("Enter textbook link:")
website = raw_input(" > ")
request = urllib2.Request(website)
request.add_header('User-Agent','Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11')
opener = urllib2.build_opener()
html = opener.open(request).read()
regex = re.compile("<title>.+</title>")
for m in regex.finditer(html):
title = m.group().replace("<title>","").replace("</title>","")
print ("Download "+title+"?")
proceed = raw_input("(y/n) > ").lower()
if proceed == "y":
print ("Downloading textbook - "+title+"...")
index = html.index('pageParams.contentUrl = "https://html2-f.scribdassets.com/')+len('pageParams.contentUrl = "https://html2-f.scribdassets.com/')
ident = html[index:index+17]
if not os.path.exists(title):
os.makedirs(title)
page = 1
regex = re.compile(ident)
for m in regex.finditer(html):#
link = html[m.start()-len('https://html2-f.scribdassets.com/'):m.start()+23+len(str(page))+11].replace("pages","images")+".jpg"
t = threading.Thread(target=download,args=(link,title+"/"+str(page)+".jpg"))
t.daemon = True
t.start()
sleep(0.05)
#print link
#urllib.urlretrieve(link,title+"/"+str(page)+".jpg")
page+=1
print ("Downloaded "+str(page-1)+" pages")
else:
print ("Exiting...")
| Python | 0 | |
97ae80b08958646e0c937f65a1b396171bf61e72 | Add a proper unit test for xreload.py. | Lib/test/test_xreload.py | Lib/test/test_xreload.py | """Doctests for module reloading.
>>> from xreload import xreload
>>> from test.test_xreload import make_mod
>>> make_mod()
>>> import x
>>> C = x.C
>>> Cfoo = C.foo
>>> Cbar = C.bar
>>> Cstomp = C.stomp
>>> b = C()
>>> bfoo = b.foo
>>> b.foo()
42
>>> bfoo()
42
>>> Cfoo(b)
42
>>> Cbar()
42 42
>>> Cstomp()
42 42 42
>>> make_mod(repl="42", subst="24")
>>> xreload(x)
<module 'x' (built-in)>
>>> b.foo()
24
>>> bfoo()
24
>>> Cfoo(b)
24
>>> Cbar()
24 24
>>> Cstomp()
24 24 24
"""
SAMPLE_CODE = """
class C:
def foo(self):
print(42)
@classmethod
def bar(cls):
print(42, 42)
@staticmethod
def stomp():
print (42, 42, 42)
"""
import os
import sys
import shutil
import doctest
import xreload
import tempfile
from test.test_support import run_unittest
tempdir = None
save_path = None
def setUp(unused=None):
global tempdir, save_path
tempdir = tempfile.mkdtemp()
save_path = list(sys.path)
sys.path.append(tempdir)
def tearDown(unused=None):
global tempdir, save_path
if save_path is not None:
sys.path = save_path
save_path = None
if tempdir is not None:
shutil.rmtree(tempdir)
tempdir = None
def make_mod(name="x", repl=None, subst=None):
if not tempdir:
setUp()
assert tempdir
fn = os.path.join(tempdir, name + ".py")
f = open(fn, "w")
sample = SAMPLE_CODE
if repl is not None and subst is not None:
sample = sample.replace(repl, subst)
try:
f.write(sample)
finally:
f.close()
def test_suite():
return doctest.DocTestSuite(setUp=setUp, tearDown=tearDown)
def test_main():
run_unittest(test_suite())
if __name__ == "__main__":
test_main()
| Python | 0 | |
53e851f68f106bff919a591a3516f26d5b07c375 | add unit test case for FedMsgContext.send_message | fedmsg/tests/test_core.py | fedmsg/tests/test_core.py | import unittest
import mock
import warnings
from fedmsg.core import FedMsgContext
from common import load_config
class TestCore(unittest.TestCase):
def setUp(self):
config = load_config()
config['io_threads'] = 1
self.ctx = FedMsgContext(**config)
def test_send_message(self):
"""send_message is deprecated
It tests
- deprecation warning showing up appropriately
- that we call publish method behind the scene
"""
fake_topic = "org.fedoraproject.prod.compose.rawhide.complete"
fake_msg = "{'arch'': 's390', 'branch': 'rawhide', 'log': 'done'}"
self.ctx.publish = mock.Mock(spec_set=FedMsgContext.publish)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.ctx.send_message(topic=fake_topic, msg=fake_msg)
assert len(w) == 1
assert str(w[0].message) == ".send_message is deprecated."
assert self.ctx.publish.called
topic, msg, modname = self.ctx.publish.call_args[0]
assert topic == fake_topic
assert msg == fake_msg
assert modname is None
| Python | 0.000002 | |
aa38c6604476f1181903c688c0444ed87c9d75a1 | Add engine tests. | simphony/engine/tests/test_engine_metadata.py | simphony/engine/tests/test_engine_metadata.py | """Tests regarding loading engine's metadata."""
import sys
import unittest
import simphony.engine as engine_api
from simphony.engine import ABCEngineExtension, EngineInterface
from simphony.engine.extension import EngineManager, EngineManagerException
from simphony.engine.extension import EngineFeatureMetadata, EngineMetadata
class _Example1(ABCEngineExtension):
def get_supported_engines(self):
example_engine = \
self.create_engine_metadata('EXAMPLE1',
None,
[EngineInterface.Internal,
EngineInterface.FileIO])
return [example_engine]
def create_wrapper(self, cuds, engine_name, engine_interface):
if engine_name == 'EXAMPLE1':
pass
else:
raise Exception('Only EXAMPLE1 engine is supported. '
'Unsupported eninge: %s', engine_name)
def get_example_engine_extension():
class _Example2(ABCEngineExtension):
def get_supported_engines(self):
example_engine = \
self.create_engine_metadata('EXAMPLE2',
None,
[EngineInterface.Internal,
EngineInterface.FileIO])
return [example_engine]
def create_wrapper(self, cuds, engine_name, engine_interface):
if engine_name == 'EXAMPLE2':
pass
else:
raise Exception('Only EXAMPLE2 engine is supported. '
'Unsupported eninge: %s', engine_name)
return _Example2
class TestEnginePublicAPI(unittest.TestCase):
"""Test everything engine metadata."""
def setUp(self):
self.manager = engine_api._ENGINE_MANAGER
def tearDown(self):
pass
def test_get_supported_engines(self):
supported = engine_api.get_supported_engines()
assert(isinstance(supported, list))
def test_create_wrapper(self):
pass
class TestEngineManager(unittest.TestCase):
"""Test everything engine metadata."""
def setUp(self):
self.manager = EngineManager()
self.manager.load_metadata(sys.modules[__name__])
def tearDown(self):
pass
def test_get_supported_engines(self):
supported = self.manager.get_supported_engines()
self.assertIn('EXAMPLE1', supported)
self.assertNotIn('LAMMPS', supported)
def test_engine_count(self):
supported = self.manager.get_supported_engines()
self.assertEqual(len(supported), 1)
def test_assert_duplicate_engine(self):
self.assertRaises(Exception, self.manager.load_metadata, sys.modules[__name__])
def test_add_extension(self):
cls = get_example_engine_extension()
self.manager.add_extension(cls())
supported = self.manager.get_supported_engines()
self.assertIn('EXAMPLE2', supported)
self.assertEqual(len(supported), 2)
def test_create_wrapper(self):
self.assertRaises(EngineManagerException, self.manager.create_wrapper, None, 'EXAMPLE2')
# Example is a dummpy engine. It does not have any wrapper.
self.assertEqual(self.manager.create_wrapper(None, 'EXAMPLE1'), None)
def test_non_module_load(self):
class MyClass:pass
self.assertRaises(EngineManagerException, self.manager.load_metadata, MyClass)
class TestEngineFeature(unittest.TestCase):
"""Test everything engine metadata."""
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
self.assertRaises(EngineManagerException, EngineFeatureMetadata, None, None)
self.assertRaises(EngineManagerException, EngineFeatureMetadata, None, [])
class TestEngineMetadata(unittest.TestCase):
"""Test everything engine metadata."""
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
m = EngineMetadata('myengine', None, EngineInterface.Internal)
self.assertEqual(m.name, 'myengine')
self.assertEqual(m.features, None)
self.assertEqual(m.interfaces, EngineInterface.Internal)
| Python | 0 | |
51d20a5e10e9cdfc38f8c18f8e98a03c5db99236 | Create plot_daily_cycle.py | Python/plot_daily_cycle.py | Python/plot_daily_cycle.py | #plot_daily_cycle.py
"""plot the average daily cycle of your WRF output
NOTE: we assume variables to have dimension [time,y,x] or [time,z,y,x]
If this is not the case, adapt the dimensions where variable is read
Author: Ingrid Super
Last revisions: 3-6-2016"""
import netCDF4 as nc
import numpy as np
import matplotlib.pyplot as plt
from maptools import *
from numpy import ma
import datetime as dtm
from scipy import stats
##############################################################################################
"""specify the following:"""
"""directory of the WRF output:"""
wrfout_path='/Storage/WRF/super004/WRF/run_paper2/output'
"""x- and y-location of the location you want to plot, the WRF domain, WRF vertical level and variable of interest"""
xloc=22
yloc=28
domain=3 #1 being outer domain
lev=0 #0 being surface level
var='U10'
##############################################################################################
"""read in variable of interest, in this case from WRF (could also be observations or both)"""
vars=[]
timers=[]
wrfout_files=[os.path.join(wrfout_path,filename) for filename in os.listdir(wrfout_path) if filename.startswith('wrfout_d%02d'%domain)]
for each_file in wrfout_files:
mf=nc.Dataset(each_file)
dum=mf.variables[var][:]
wrftime=mf.variables['Times'][:]
for j in range(len(wrftime)):
year=int(''.join(wrftime[j][0:4]))
month=int(''.join(wrftime[j][5:7]))
day=int(''.join(wrftime[j][8:10]))
hour=int(''.join(wrftime[j][11:13]))
dat=dtm.datetime(year,month,day,hour,0)
timers.append(dat)
if len(dum.shape)==3:
dum2=dum[:,yloc,xloc]
elif len(dum.shape)==4:
dum2=dum[:,lev,yloc,xloc]
vars.extend(dum2)
"""cluster the data based on hour of the day; you can add additional selection criteria in the if-statement below:
E.g. the example shows how to show differences between winter and summer; this can also be used to compare simulations or model vs. observations"""
dcy1=[]
dcy2=[]
for i in range(24):
dum1=[]
dum2=[]
for j in range(len(timers)):
if timers[j].hour==i and timers[j].month>4 and timers[j].month<10:
dum1.append(vars[j])
elif timers[j].hour==i and timers[j].month>=10 and timers[j].month<=4:
dum2.append(vars[j])
dcy1.append(dum1)
dcy2.append(dum2)
dcy1=np.array(dcy1)
dcy2=np.array(dcy2)
"""the following routines give you a confidence interval for your daily cycles"""
def effectiveSampleSize(data, stepSize = 1) :
""" Effective sample size, as computed by BEAST Tracer."""
a = 1.0*np.array(data)
n = len(a)
assert len(a) > 1,"no stats for short sequences"
maxLag = min(n//3, 1000)
gammaStat = [0,]*maxLag
varStat = 0.0;
if type(a) != np.ndarray:
a = np.array(a)
normalizedData = a - a[~np.isnan(a)].mean()
for lag in range(maxLag):
v1 = normalizedData[:n-lag]
v2 = normalizedData[lag:]
v = v1 * v2
gammaStat[lag] = np.nansum(v) / len(v)
if lag == 0 :
varStat = gammaStat[0]
elif lag % 2 == 0 :
s = gammaStat[lag-1] + gammaStat[lag]
if s > 0 :
varStat += 2.0*s
else :
break
# auto correlation time
act = stepSize * varStat / gammaStat[0]
# effective sample size
ess = (stepSize * n) / act
return act
def mean_confidence_interval(data, confidence=0.95, act=0):
"""calculate mean and 95% confidence interval"""
if data.ndim == 1:
mask=~np.isnan(data)
b=data[mask]
a = 1.0*np.array(b)
n = len(a)/act
m, se = np.nanmean(a), stats.sem(a)
h = se * stats.t._ppf((1+confidence)/2., n-1)
return m, m-h, m+h
if data.ndim == 2:
a = 1.0*np.array(data)
n = len(a)
m, se = np.nanmean(a,axis=0), stats.sem(a,axis=0)
h = se * stats.t._ppf((1+confidence)/2., n-1)
return m, h
mn1=24*[None]
lowb1=24*[None]
upb1=24*[None]
mn2=24*[None]
lowb2=24*[None]
upb2=24*[None]
for i in range(24):
act=effectiveSampleSize(data=np.array(dcy1[i]),stepSize=1)
output=mean_confidence_interval(data=np.array(dcy1[i]),confidence=0.95,act=1)
mn1[i]=output[0]
lowb1[i]=output[1]
upb1[i]=output[2]
act=effectiveSampleSize(data=np.array(dcy2[i]),stepSize=1)
output=mean_confidence_interval(data=np.array(dcy2[i]),confidence=0.95,act=1)
mn2[i]=output[0]
lowb2[i]=output[1]
upb2[i]=output[2]
"""make plot and lay-out"""
fig=plt.figure(1,figsize=(12,8))
x=arange(24)
fill_between(x,lowb1,upb1,color='silver')
fill_between(x,lowb2,upb2,color='silver')
plot(x,mn1,color='k',label='summer')
plot(x,mn2,color='k',linestyle='--',label='winter')
legend(loc='upper right')
xlabel('label') #please specify
ylabel('label') #please specify
title('title') #please specify
ticks=[x[0],x[1],x[2],x[3],x[4],x[5],x[6],x[7],x[8],x[9],x[10],x[11],x[12],x[13],x[14],x[15],x[16],x[17],x[18],x[19],x[20],x[21],x[22]]
labels=['','','3','','','6','','','9','','','12','','','15','','','18','','','21','','']
plt.xticks(ticks,labels)
plt.grid(True)
plt.show()
| Python | 0.001003 | |
8483174f32801318d1cd8aa33abb04819b4a7810 | Create usonic.py | usonic.py | usonic.py |
#!/usr/bin/python
# remember to change the GPIO values below to match your sensors
# GPIO output = the pin that's connected to "Trig" on the sensor
# GPIO input = the pin that's connected to "Echo" on the sensor
def reading(sensor):
import time
import RPi.GPIO as GPIO
# Disable any warning message such as GPIO pins in use
GPIO.setwarnings(False)
# use the values of the GPIO pins, and not the actual pin number
# so if you connect to GPIO 25 which is on pin number 22, the
# reference in this code is 25, which is the number of the GPIO
# port and not the number of the physical pin
GPIO.setmode(GPIO.BCM)
varOUT = 23
varIN = 24
if sensor == 0:
# point the software to the GPIO pins the sensor is using
# change these values to the pins you are using
# GPIO output = the pin that's connected to "Trig" on the sensor
# GPIO input = the pin that's connected to "Echo" on the sensor
GPIO.setup(varOUT,GPIO.OUT)
GPIO.setup(varIN,GPIO.IN)
GPIO.output(varOUT, GPIO.LOW)
# found that the sensor can crash if there isn't a delay here
# no idea why. If you have odd crashing issues, increase delay
time.sleep(0.3)
# sensor manual says a pulse ength of 10Us will trigger the
# sensor to transmit 8 cycles of ultrasonic burst at 40kHz and
# wait for the reflected ultrasonic burst to be received
# to get a pulse length of 10Us we need to start the pulse, then
# wait for 10 microseconds, then stop the pulse. This will
# result in the pulse length being 10Us.
# start the pulse on the GPIO pin
# change this value to the pin you are using
# GPIO output = the pin that's connected to "Trig" on the sensor
GPIO.output(varOUT, True)
# wait 10 micro seconds (this is 0.00001 seconds) so the pulse
# length is 10Us as the sensor expects
time.sleep(0.00001)
# stop the pulse after the time above has passed
# change this value to the pin you are using
# GPIO output = the pin that's connected to "Trig" on the sensor
GPIO.output(varOUT, False)
# listen to the input pin. 0 means nothing is happening. Once a
# signal is received the value will be 1 so the while loop
# stops and has the last recorded time the signal was 0
# change this value to the pin you are using
# GPIO input = the pin that's connected to "Echo" on the sensor
while GPIO.input(varIN) == 0:
signaloff = time.time()
# listen to the input pin. Once a signal is received, record the
# time the signal came through
# change this value to the pin you are using
# GPIO input = the pin that's connected to "Echo" on the sensor
while GPIO.input(varIN) == 1:
signalon = time.time()
# work out the difference in the two recorded times above to
# calculate the distance of an object in front of the sensor
timepassed = signalon - signaloff
# we now have our distance but it's not in a useful unit of
# measurement. So now we convert this distance into centimetres
distance = timepassed * 17000
# return the distance of an object in front of the sensor in cm
return distance
# we're no longer using the GPIO, so tell software we're done
GPIO.cleanup()
else:
print "Incorrect usonic() function varible."
print reading(0)
| Python | 0 | |
7a7d597c771ba8100957b5ca00156d7147c695c5 | Add clear_db_es_contents tests | src/encoded/tests/test_clear_db_es_contents.py | src/encoded/tests/test_clear_db_es_contents.py | import pytest
from encoded.commands.clear_db_es_contents import (
clear_db_tables,
run_clear_db_es
)
pytestmark = [pytest.mark.setone, pytest.mark.working]
def test_clear_db_tables(app, testapp):
# post an item and make sure it's there
post_res = testapp.post_json('/testing-post-put-patch/', {'required': 'abc'},
status=201)
testapp.get(post_res.location, status=200)
clear_db_tables(app)
# item should no longer be present
testapp.get(post_res.location, status=404)
def test_run_clear_db_envs(app):
# if True, then it cleared DB
assert run_clear_db_es(app, None, True) == True
prev_env = app.registry.settings.get('env.name')
# should never run on these envs
app.registry.settings['env.name'] = 'fourfront-webprod'
assert run_clear_db_es(app, None, True) == False
app.registry.settings['env.name'] = 'fourfront-webprod2'
assert run_clear_db_es(app, None, True) == False
# test if we are only running on specific envs
app.registry.settings['env.name'] = 'fourfront-test-env'
assert run_clear_db_es(app, 'fourfront-other-env', True) == False
assert run_clear_db_es(app, 'fourfront-test-env', True) == True
# reset settings after test
if prev_env is None:
del app.registry.settings['env.name']
else:
app.registry.settings['env.name'] = prev_env
| Python | 0.000002 | |
28677132dbcacd7d348262007256b3e2a9e44da2 | add gate client module | server/Mars/Client/GateClient.py | server/Mars/Client/GateClient.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2016 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import PathHelper as PH
PH.addPathes('../')
import sys
from bson import BSON
from msgpack.bMsgPack import msgPackExt, extHook
import msgpack
from MarsLog.LogManager import LogManager
from MarsRpc.ChannelClient import ChannelClient
from MarsRpc.Compressor import Compressor
from Utils.PyProto import ClientGate_pb2
from Utils.PyProto import Common_pb2
from Utils.EntityFactory import EntityFactory
from Utils.EntityManager import EntityManager
from Utils.IdCreator import IdCreator
from Utils.MessageCodec import Md5IndexDecoder, Md5IndexEncoder
from ServerProxy import ServerProxy
MARS_DEVICEID = str(IdCreator.genId())
class GateClient(ClientGate_pb2.SGate2Client):
ST_INIT = 0
ST_CONNECTING = 1
ST_RECONNECTING = 3
ST_CONNECT_FAILED = 4
ST_CONNECT_SUCCESSED = 5
ST_DISCONNECTED = 6
CB_ON_CONNECT_FAILED = 1
CB_ON_CONNECT_SUCCESSED = 2
CB_ON_DISCONNECTED = 3
CB_ON_CONNECT_REPLY = 4
CB_ON_RELIABLE_MSG_UNSENT = 5
def __init__(self, host, port, clientConf, proto='BSON'):
super(GateClient, self).__init__(self)
self.client = ChannelClient(host, port, self)
| Python | 0.000003 | |
36fdfa89230fd08b6c28501f3f277bff642e36e3 | Create ipy_custom_action_button.py | pyside/pyside_basics/jamming/QAction/ipy_custom_action_button.py | pyside/pyside_basics/jamming/QAction/ipy_custom_action_button.py | from collections import OrderedDict
from functools import partial
from PySide import QtCore
from PySide import QtGui
##
class CustomAction(QtGui.QAction):
def __init__(self, message, *args, **kwargs):
super(CustomAction, self).__init__(*args, **kwargs)
self.message = message
self.triggered.connect(self.callback)
def callback(self):
print self.message, self.sender(), self.senderSignalIndex()
class CustomButton(QtGui.QPushButton):
def __init__(self, *args, **kwargs):
super(CustomButton, self).__init__(*args, **kwargs)
self.clicked.connect(self.callback)
def callback(self):
print self.text()
for action in self.actions():
action.activate(QtGui.QAction.ActionEvent.Trigger)
##
mw = QtGui.QMainWindow()
customAction1 = CustomAction("Action 1", mw)
customAction2 = CustomAction("Action 2", mw)
button = CustomButton("Click me")
print customAction1, button
button.show()
##
button.addAction(customAction1)
##
button.addAction(customAction2)
##
button.removeAction(customAction1)
##
button.removeAction(customAction2)
##
button.addActions([customAction1, customAction2])
| Python | 0.000004 | |
c4625a3ea98da282d9cd77acc13bba996e9fa676 | Add refresh url param to dashboard page to allow on demand worker cache updates | flower/views/dashboard.py | flower/views/dashboard.py | from __future__ import absolute_import
import logging
from functools import partial
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from tornado import web
from tornado import gen
from tornado import websocket
from tornado.ioloop import PeriodicCallback
from ..views import BaseHandler
from ..api.workers import ListWorkers
logger = logging.getLogger(__name__)
class DashboardView(BaseHandler):
@web.authenticated
@gen.coroutine
def get(self):
refresh = self.get_argument('refresh', default=False, type=bool)
app = self.application
events = app.events.state
broker = app.capp.connection().as_uri()
if refresh:
yield ListWorkers.update_workers(app=app)
workers = dict((k, dict(v)) for (k, v) in events.counter.items())
for name, info in workers.items():
worker = events.workers[name]
info.update(self._as_dict(worker))
info.update(status=worker.alive)
self.render("dashboard.html", workers=workers, broker=broker)
@classmethod
def _as_dict(cls, worker):
return dict((k, worker.__getattribute__(k)) for k in worker._fields)
class DashboardUpdateHandler(websocket.WebSocketHandler):
listeners = []
periodic_callback = None
workers = None
page_update_interval = 2000
def open(self):
app = self.application
if not app.options.auto_refresh:
self.write_message({})
return
if not self.listeners:
if self.periodic_callback is None:
cls = DashboardUpdateHandler
cls.periodic_callback = PeriodicCallback(
partial(cls.on_update_time, app),
self.page_update_interval)
if not self.periodic_callback._running:
logger.debug('Starting a timer for dashboard updates')
self.periodic_callback.start()
self.listeners.append(self)
def on_message(self, message):
pass
def on_close(self):
if self in self.listeners:
self.listeners.remove(self)
if not self.listeners and self.periodic_callback:
logger.debug('Stopping dashboard updates timer')
self.periodic_callback.stop()
@classmethod
def on_update_time(cls, app):
update = cls.dashboard_update(app)
if update:
for l in cls.listeners:
l.write_message(update)
@classmethod
def dashboard_update(cls, app):
state = app.events.state
workers = OrderedDict()
for name, worker in sorted(state.workers.items()):
counter = state.counter[name]
started=counter.get('task-started', 0)
processed=counter.get('task-received', 0)
failed=counter.get('task-failed', 0)
succeeded=counter.get('task-succeeded', 0)
retried=counter.get('task-retried', 0)
workers[name] = dict(
status=worker.alive,
active=started - succeeded - failed,
processed=processed,
failed=failed,
succeeded=succeeded,
retried=retried,
loadavg=worker.loadavg)
return workers
| from __future__ import absolute_import
import logging
from functools import partial
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from tornado import web
from tornado import websocket
from tornado.ioloop import PeriodicCallback
from ..views import BaseHandler
logger = logging.getLogger(__name__)
class DashboardView(BaseHandler):
@web.authenticated
def get(self):
app = self.application
events = app.events.state
broker = app.capp.connection().as_uri()
workers = dict((k, dict(v)) for (k, v) in events.counter.items())
for name, info in workers.items():
worker = events.workers[name]
info.update(self._as_dict(worker))
info.update(status=worker.alive)
self.render("dashboard.html", workers=workers, broker=broker)
@classmethod
def _as_dict(cls, worker):
return dict((k, worker.__getattribute__(k)) for k in worker._fields)
class DashboardUpdateHandler(websocket.WebSocketHandler):
listeners = []
periodic_callback = None
workers = None
page_update_interval = 2000
def open(self):
app = self.application
if not app.options.auto_refresh:
self.write_message({})
return
if not self.listeners:
if self.periodic_callback is None:
cls = DashboardUpdateHandler
cls.periodic_callback = PeriodicCallback(
partial(cls.on_update_time, app),
self.page_update_interval)
if not self.periodic_callback._running:
logger.debug('Starting a timer for dashboard updates')
self.periodic_callback.start()
self.listeners.append(self)
def on_message(self, message):
pass
def on_close(self):
if self in self.listeners:
self.listeners.remove(self)
if not self.listeners and self.periodic_callback:
logger.debug('Stopping dashboard updates timer')
self.periodic_callback.stop()
@classmethod
def on_update_time(cls, app):
update = cls.dashboard_update(app)
if update:
for l in cls.listeners:
l.write_message(update)
@classmethod
def dashboard_update(cls, app):
state = app.events.state
workers = OrderedDict()
for name, worker in sorted(state.workers.items()):
counter = state.counter[name]
started=counter.get('task-started', 0)
processed=counter.get('task-received', 0)
failed=counter.get('task-failed', 0)
succeeded=counter.get('task-succeeded', 0)
retried=counter.get('task-retried', 0)
workers[name] = dict(
status=worker.alive,
active=started - succeeded - failed,
processed=processed,
failed=failed,
succeeded=succeeded,
retried=retried,
loadavg=worker.loadavg)
return workers
| Python | 0 |
18f63b98bf7eefe3022dc4681e81ada9969d5228 | Create guess-the-word.py | Python/guess-the-word.py | Python/guess-the-word.py | # Time: O(n^2)
# Space: O(n)
# This problem is an interactive problem new to the LeetCode platform.
#
# We are given a word list of unique words, each word is 6 letters long,
# and one word in this list is chosen as secret.
#
# You may call master.guess(word) to guess a word.
# The guessed word should have type string and must be from the original
# list with 6 lowercase letters.
#
# This function returns an integer type,
# representing the number of exact matches (value and position)
# of your guess to the secret word.
# Also, if your guess is not in the given wordlist, it will return -1 instead.
#
# For each test case, you have 10 guesses to guess the word.
# At the end of any number of calls, if you have made 10 or
# less calls to master.guess
# and at least one of these guesses was the secret, you pass the testcase.
#
# Besides the example test case below,
# there will be 5 additional test cases, each with 100 words in the word list.
# The letters of each word in those testcases were chosen independently at
# random from 'a' to 'z',
# such that every word in the given word lists is unique.
#
# Example 1:
# Input: secret = "acckzz", wordlist = ["acckzz","ccbazz","eiowzz","abcczz"]
#
# Explanation:
#
# master.guess("aaaaaa") returns -1, because "aaaaaa" is not in wordlist.
# master.guess("acckzz") returns 6, because "acckzz" is secret
# and has all 6 matches.
# master.guess("ccbazz") returns 3, because "ccbazz" has 3 matches.
# master.guess("eiowzz") returns 2, because "eiowzz" has 2 matches.
# master.guess("abcczz") returns 4, because "abcczz" has 4 matches.
#
# We made 5 calls to master.guess and one of them was the secret,
# so we pass the test case.
# Note: Any solutions that attempt to circumvent the judge will result
# in disqualification.
#
# """
# This is Master's API interface.
# You should not implement it, or speculate about its implementation
# """
# class Master(object):
# def guess(self, word):
# """
# :type word: str
# :rtype int
# """
import collections
import itertools
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
class Solution(object):
def findSecretWord(self, wordlist, master):
"""
:type wordlist: List[Str]
:type master: Master
:rtype: None
"""
def match(a, b):
matches = 0
for i in xrange(len(a)):
if a[i] == b[i]:
matches += 1
return matches
i, n = 0, 0
while i < 10 and n < 6:
count = collections.Counter(w1 for w1, w2 in
itertools.permutations(wordlist, 2)
if match(w1, w2) == 0)
guess = min(wordlist, key=lambda w: count[w])
n = master.guess(guess)
wordlist = [w for w in wordlist if match(w, guess) == n]
i += 1
| Python | 0.999864 | |
803a2702a1330be1f51428f8d7533cfee27c3f90 | Add facebook_test_user support. | facepy/test.py | facepy/test.py | import facepy
class FacebookTestUser(object):
def __init__(self, **kwargs):
fields = ('id', 'access_token', 'login_url', 'email', 'password')
for field in fields:
setattr(self, field, kwargs[field])
self.graph = facepy.GraphAPI(self.access_token)
class TestUserManager(object):
def __init__(self, app_id, app_secret):
access_token = facepy.get_application_access_token(app_id, app_secret)
self.graph = facepy.GraphAPI(access_token)
self.app_id = app_id
def create_user(self, **parameters):
""" creates facebook test user
Valid parameters (with default values):
installed = true
name = FULL_NAME
locale = en_US
permissions = read_stream
"""
url = "%s/accounts/test-users" % self.app_id
return FacebookTestUser(**self.graph.post(url, **parameters))
def delete_user(self, user):
self.graph.delete(str(user.id))
class TestUser(object):
def __init__(self, manager, **user_params):
self.manager = manager
self.user_params = user_params
def __enter__(self):
self._user = self.manager.create_user(**self.user_params)
return self._user
def __exit__(self, exc_type=None, exc_value=None, traceback=None):
self.manager.delete_user(self._user)
| Python | 0 | |
332f1fc67481432f6e8dd7cd9a35b02b12c9b6f6 | Create numpy.py | numpy.py | numpy.py | # Best dimensions in each column of a matrix x.
for i in range(x.shape[0]):
dims = x[:,i].argsort()[-5:]
vals = x[dims,i]
print dims, vals
| Python | 0.000319 | |
4d4904e69e030be3f2b0e30c957507626d58a50e | Teste nas listas | _Listas/sherlock.py | _Listas/sherlock.py | # Quem é o culpado
perguntas = []
ct = 0
pt = 0
quest = input("Você telefonou a vitima: ")
perguntas.append(quest)
quest = input("Vocẽ esteve no local do crime: ")
perguntas.append(quest)
quest = input("Você mora perto da vitima? ")
perguntas.append(quest)
quest = input("Devia para a vitima? ")
perguntas.append(quest)
quest = input("Já trabalhou com a vitima? ")
perguntas.append(quest)
while ct <= len(perguntas) - 1:
if perguntas[ct] in "sim":
pt += 1
ct += 1
if pt >= 1 and pt <= 2:
print("Você é um suspeito")
elif pt >= 3 and pt <= 4:
print("Você é cumplice!")
if pt == 5:
print("CULPADO,CULPADO, VOCÊ SERÁ PRESO!!!")
| Python | 0.000002 | |
f2d79da88bb84831e40cefc79fa3e1448a1bbaab | Add a Python script to help with various cleanups of 'properties' | fixup_trips.py | fixup_trips.py | """
// LineString
// # Make sure at least 2 points
"""
import json
def fixup(points, linestrings):
return valid_linestrings(points, linestrings)
def valid_linestrings(points, linestrings):
"""GeoJSON requires at least 2 sets of coordinates for a linestring."""
for line in linestrings:
coords = coordinates(line)
if len(coords) == 1:
coords.append(coords[-1])
return points, linestrings
def cross_ref(points, linestrings):
"""List trips visiting a city and what cities a trip visited."""
coordinate_to_point = points_by_coordinates(points)
for line in linestrings:
trip_name = line['properties']['trip']
visited = []
for coordinate in map(tuple, coordinates(line)):
pt = coordinate_to_point[coordinate]
city_name = pt['properties']['city']
if len(visited) == 0 or visited[-1] != city_name: # If a city is listed twice in a row it's just to be valid GeoJSON.
visited.append(city_name)
city_trips = pt['properties'].setdefault('trips', list())
if trip_name not in city_trips:
city_trips.append(trip_name)
line['properties']['visited'] = visited
return points, linestrings
def sort(points, linestrings):
for pt in points:
name = pt['properties']['city']
if ', ' not in name:
raise ValueError(name + ' is missing a comma')
for line in linestrings:
name = line['properties']['trip']
if name.count(':') > 1:
raise ValueError(name + ' contains a colon')
sorted_points = sorted(points, key=lambda pt: tuple(reversed(pt['properties']['city'].split(', '))))
sorted_lines = sorted(linestrings, key=lambda l: l['properties']['trip'].partition(': ')[-1], reverse=True)
return sorted_points, sorted_lines
def rename_trips(points, linestrings):
"""Rename trips to '{name}: {when}' and delete the 'when' value."""
for line in linestrings:
name = line['properties']['trip']
when = line['properties']['when']
new_name = '{}: {}'.format(name, when)
line['properties']['trip'] = new_name
del line['properties']['when']
return points, linestrings
def rename_name(points, linestrings):
"""Rename the 'name' property to 'trip' on linestrings and 'city' on points."""
for point in points:
name = point['properties']['name']
point['properties']['city'] = name
del point['properties']['name']
for linestring in linestrings:
name = linestring['properties']['name']
linestring['properties']['trip'] = name
del linestring['properties']['name']
return points, linestrings
def remove_duplicated_points(points, linestrings):
cleaned_points = []
seen = {}
for point in points:
name = point['properties']['name']
coordinate = coordinates(point)
if name not in seen:
cleaned_points.append(point)
seen[name] = coordinate
elif coordinate != seen[name]:
raise ValueError(name + ' seen twice, but coordinates do not match')
else:
print(name)
return cleaned_points, linestrings
def point_visited_by(points, linestrings):
"""Record who has visited what cities based on travellers on various trips."""
coordinates_mapping = points_by_coordinates(points)
point_visiters = {pt['properties']['name']: set() for pt in points}
for linestring in linestrings:
travellers = linestring['properties']['travellers']
travellers.sort() # Just to clean up along the way.
travellers = set(travellers)
for coordinate in coordinates(linestring):
try:
point = coordinates_mapping[tuple(coordinate)]
except KeyError:
raise ValueError('{!r} has unrecognized point {}'.format(linestring['properties']['name'], coordinate))
point_visiters[point['properties']['name']].update(travellers)
for point in points:
name = point['properties']['name']
point['properties']['visited by'] = sorted(point_visiters[name])
return points, linestrings
def coordinates(thing):
return thing['geometry']['coordinates']
def points_by_coordinates(points):
"""Create a dict with keys of coordinates and values of points."""
mapping = {}
for point in points:
coordinate = coordinates(point)
if len(coordinate) != 2:
raise ValueError('Point has an improper coordinate: {}'.format(coordinate))
mapping[tuple(coordinate)] = point
return mapping
def separate_types(data):
"""Separate out the points from the linestrings."""
if data['type'] != 'FeatureCollection':
raise TypeError('expected a FeatureCollection, not ' + data['type'])
points = []
linestrings = []
for thing in data['features']:
if thing['type'] != 'Feature':
raise TypeError('expected Feature, not ' + thing['type'])
geometry_type = thing['geometry']['type']
if geometry_type == 'Point':
points.append(thing)
elif geometry_type == 'LineString':
linestrings.append(thing)
else:
raise TypeError('expected Point or LineString, not ' + geometry_type)
return points, linestrings
def reconstruct(point, linestrings):
"""Reconstruct the GeoJSON FeatureCollection."""
return {'type': 'FeatureCollection', 'features': list(point) + list(linestrings)}
if __name__ == '__main__':
import sys
in_path = sys.argv[1]
out_path = sys.argv[2]
with open(in_path) as file:
data = json.load(file)
points, linestrings = separate_types(data)
fixed_points, fixed_linestrings = fixup(points, linestrings)
fixed_data = reconstruct(fixed_points, fixed_linestrings)
with open(out_path, 'w') as file:
json.dump(fixed_data, file, indent=2)
| Python | 0 | |
235bfc6db908b6701de77df11e00e89a307d738e | Create tinymongo.py | tinymongo/tinymongo.py | tinymongo/tinymongo.py | Python | 0.000621 | ||
8ab8e8f5ca34cf524f76fe57f78d73546d2f6d09 | Add make_otc_list. | tools/make_otc_list.py | tools/make_otc_list.py | # -*- coding: utf-8 -*-
import csv
import re
import urllib2
from datetime import datetime
NOW = datetime(2013, 12, 17)
SAVEPATH = './otc_list.csv'
TWSEURL = 'http://www.gretai.org.tw/ch/stock/aftertrading/otc_quotes_no1430/stk_wn1430_download.php?d=%(year)s/%(mon)s/%(day)s&se=%%s' % {
'year': NOW.year - 1911,
'mon': NOW.month,
'day': NOW.day}
TWSECLS = {'0049': u'封閉式基金',
'0099P': u'ETF',
'019919T': u'受益證券',
'0999': u'認購權證', #(不含牛證)
'0999P': u'認售權證', #(不含熊證)
'0999C': u'牛證',
'0999B': u'熊證',
'0999GA': u'附認股權特別股',
'0999GD': u'附認股權公司債',
'0999G9': u'認股權憑證',
'01': u'水泥工業',
'02': u'食品工業',
'03': u'塑膠工業',
'04': u'紡織纖維',
'05': u'電機機械',
'06': u'電器電纜',
'07': u'化學生技醫療',
'21': u'化學工業',
'22': u'生技醫療業',
'08': u'玻璃陶瓷',
'09': u'造紙工業',
'10': u'鋼鐵工業',
'11': u'橡膠工業',
'12': u'汽車工業',
'13': u'電子工業',
'24': u'半導體業',
'25': u'電腦及週邊設備業',
'26': u'光電業',
'27': u'通信網路業',
'28': u'電子零組件業',
'29': u'電子通路業',
'30': u'資訊服務業',
'31': u'其他電子業',
'14': u'建材營造',
'15': u'航運業',
'16': u'觀光事業',
'17': u'金融保險',
'18': u'貿易百貨',
'9299': u'存託憑證',
'23': u'油電燃氣業',
'19': u'綜合',
'20': u'其他',
'CB': u'可轉換公司債',}
#'ALL_1': u'全部'}
def fetch_otc_list():
with open(SAVEPATH, 'w') as files:
csv_file = csv.writer(files)
re_pattern = re.compile(r'(=")?[\d\w]{4,6}(=)?')
re_sub = re.compile(r'[^\w\d]')
for no in TWSECLS:
for i in csv.reader(urllib2.urlopen(TWSEURL % no).readlines()):
if len(i) >= 3 and re_pattern.match(i[0]):
pass
else:
i.pop(0)
if len(i) >= 2 and re_pattern.match(i[0]):
csv_file.writerow([re_sub.sub('', i[0]),
i[1].decode('cp950').encode('utf-8'),
no, TWSECLS[no].encode('utf-8')])
with open(SAVEPATH, 'r') as files:
csv_file = csv.reader(files)
all_items = {}
for i in csv_file:
all_items.update({i[0]: i})
with open(SAVEPATH, 'w') as files:
csv_file = csv.writer(files)
csv_file.writerow(['文件更新', datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'x', 'x'])
csv_file.writerow(['證期會代碼', '公司簡稱', '分類代碼', '分類名稱'])
for i in sorted(all_items):
csv_file.writerow(all_items[i])
def output_industry_code():
with open('./industry_code.csv', 'w') as files:
csv_file = csv.writer(files)
for i in sorted(TWSECLS):
csv_file.writerow([i, TWSECLS[i].encode('utf-8')])
if __name__ == '__main__':
fetch_otc_list()
#output_industry_code()
| Python | 0 | |
b351e5106684b0af8b862bb6ba5375671c1f431d | include getcomments.py | getcomments.py | getcomments.py | import urllib2
import json
import datetime
import time
import pytz
import pandas as pd
from pandas import DataFrame
ts = str(int(time.time()))
df = DataFrame()
hitsPerPage = 1000
requested_keys = ["author", "comment_text", "created_at_i", "objectID", "points"]
i = 0
while True:
try:
url = 'https://hn.algolia.com/api/v1/search_by_date?tags=comment&hitsPerPage=%s&numericFilters=created_at_i<%s' % (hitsPerPage, ts)
req = urllib2.Request(url)
response = urllib2.urlopen(req)
data = json.loads(response.read())
last = data["nbHits"] < hitsPerPage
data = DataFrame(data["hits"])[requested_keys]
df = df.append(data,ignore_index=True)
ts = data.created_at_i.min()
print i
if (last):
break
time.sleep(3.6)
i += 1
except Exception, e:
print e
df["comment_text"] = df["comment_text"].map(lambda x: x.translate(dict.fromkeys([0x201c, 0x201d, 0x2011, 0x2013, 0x2014, 0x2018, 0x2019, 0x2026, 0x2032])).encode('utf-8').replace(',',''))
df["created_at"] = df["created_at_i"].map(lambda x: datetime.datetime.fromtimestamp(int(x), tz=pytz.timezone('America/New_York')).strftime('%Y-%m-%d %H:%M:%S'))
ordered_df = df[["comment_text","points","author","created_at","objectID"]]
ordered_df.to_csv("hacker_news_comments.csv",encoding='utf-8', index=False) | Python | 0.000001 | |
74ecac2dbca41d737f62325955fd4d0dc393ac16 | Rename flots.py to plots.py | plots.py | plots.py | import json
import re
class plot(object):
def get_data(self,fn,col1,col2):
y = ''
for line in open(fn, 'rU'):
# don't parse comments
if re.search(r'#',line): continue
x = line.split()
if not re.search(r'[A-Za-z]{2,}\s+[A-Za-z]{2,}',line):
y += '[ ' + x[col1] + ', ' + x[col2] + '], '
str = "[ %s ]" % y
return str
| Python | 0.999997 | |
8136a1a584fe23b57f416e663f479142aafb7c89 | implement expect column values valid ISBN13 code (#4668) | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_isbn13.py | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_isbn13.py | from typing import Optional
from isbnlib import is_isbn13
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.execution_engine import PandasExecutionEngine
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.metrics import (
ColumnMapMetricProvider,
column_condition_partial,
)
# This method compares a string to the valid ISBN13 code
def is_valid_isbn13(code: str) -> bool:
return is_isbn13(code)
# This class defines a Metric to support your Expectation.
# For most ColumnMapExpectations, the main business logic for calculation will live in this class.
class ColumnValuesToBeValidIsbn13(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_isbn13"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_isbn13(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
class ExpectColumnValuesToBeValidIsbn13(ColumnMapExpectation):
"""This Expectation validates data as conforming to the valid ISBN13 format."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"well_formed_isbn13": [
"978-3-16-148410-0",
"978-1-86197-876-9",
"9783161484100",
"9783161 484100",
],
"malformed_isbn13": [
"",
"0-521-22151-X",
"978-3-16-148410-Z",
"This is not a valid ISBN13",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "well_formed_isbn13"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "malformed_isbn13"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_isbn13"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
if configuration is None:
configuration = self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": ["experimental", "hackathon", "typed-entities"],
"contributors": [
"@voidforall",
],
"requirements": ["isbnlib"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidIsbn13().print_diagnostic_checklist()
| Python | 0.000004 | |
f13da24b8fb4cf6d8fff91e88afb1507528c2c2a | Add `.ycm_extra_conf.py` for https://github.com/Valloric/ycmd | android/.ycm_extra_conf.py | android/.ycm_extra_conf.py | import os
basePath = os.path.dirname(os.path.realpath(__file__))
def FlagsForFile(filename, **kwargs):
return {
'flags': [
'-std=c++11',
'-DFOLLY_NO_CONFIG=1',
'-DFOLLY_USE_LIBCPP',
'-I' + basePath + '/ReactAndroid/../ReactCommon/cxxreact/..',
'-I' + basePath + '/ReactAndroid/../ReactCommon/jschelpers/..',
'-I' + basePath + '/ReactAndroid/src/main/jni/first-party/fb/include',
'-I' + basePath + '/ReactAndroid/build/third-party-ndk/folly',
'-I' + basePath + '/ReactAndroid/build/third-party-ndk/jsc',
'-I' + basePath + '/ReactAndroid/build/third-party-ndk/glog/..',
'-I' + basePath + '/ReactAndroid/build/third-party-ndk/glog/glog-0.3.3/src/',
'-I' + basePath + '/ReactAndroid/build/third-party-ndk/boost/boost_1_63_0',
'-I' + basePath + '/ReactAndroid/build/third-party-ndk/double-conversion',
'-I' + basePath + '/ReactAndroid/../ReactCommon/cxxreact',
],
}
| Python | 0.000002 | |
654f21b39a68aa461b6457199403e7d89781cc79 | add migration | students/migrations/0010_auto_20161010_1345.py | students/migrations/0010_auto_20161010_1345.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-10 11:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('students', '0009_auto_20161005_1820'),
]
operations = [
migrations.AlterField(
model_name='student',
name='want_exam',
field=models.BooleanField(default=False, help_text='Die Klausur ist eine Simulation einer Uniklausur, um die Unteschiede zwischen einer Schul- und einer Universitätsklausr zu zeigen. Abgefragt werden hauptsächlich Informationen aus der Ophase. Es ist nicht verpflichtend die Klausur zu bestehen.', verbose_name='Klausur mitschreiben?'),
),
]
| Python | 0.000001 | |
b90b43ceefb78e1a94ba898ed23443567786cf25 | Add /monitoring/status handler | app/monitoring/handlers.py | app/monitoring/handlers.py | from __future__ import unicode_literals, absolute_import, division
import json
from structlog import get_logger
class StatusHandler:
def __init__(self):
self.logger = get_logger()
def on_get(self, req, res):
"""
@type req: falcon.request.Request
@type res: falcon.response.Response
"""
rv = dict(
status='OK',
settings={}, # TODO pass some/all settings here
content_type=req.content_type,
url=req.url,
remote_addr='', # TODO Use falcon or wgsi API to get remote addr
headers=req.headers,
cookies=req.cookies,
context=req.context)
res.body = json.dumps(dict(result=rv))
class AppRoutesHandler:
def __init__(self):
self.logger = get_logger()
def on_get(self, req, res):
# TODO return result: routes?: [handler, url, methods]
pass | Python | 0.000002 | |
95c71727bf340f55e17a15d475aba54438eb0b8e | add solution for Partition List | src/partitionList.py | src/partitionList.py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @param x, an integer
# @return a ListNode
def partition(self, head, x):
head1, head2 = ListNode(0), ListNode(0)
last1, last2 = head1, head2
while head:
if head.val < x:
last1.next = head
head = head.next
last1 = last1.next
last1.next = None
else:
last2.next = head
head = head.next
last2 = last2.next
last2.next = None
last1.next = head2.next
return head1.next
| Python | 0 | |
5bff284204a1397dbc63e83363d865213a35efe6 | add a new test file test_begin_end.py | tests/unit/selection/modules/test_begin_end.py | tests/unit/selection/modules/test_begin_end.py | # Tai Sakuma <tai.sakuma@gmail.com>
import pytest
try:
import unittest.mock as mock
except ImportError:
import mock
from alphatwirl.selection.modules import Not, NotwCount
##__________________________________________________________________||
not_classes = [Not, NotwCount]
not_classe_ids = [c.__name__ for c in not_classes]
@pytest.mark.parametrize('NotClass', not_classes, ids=not_classe_ids)
def test_not_begin(NotClass):
selection = mock.Mock()
obj = NotClass(selection)
event = mock.Mock()
obj.begin(event)
assert [mock.call(event)] == selection.begin.call_args_list
@pytest.mark.parametrize('NotClass', not_classes, ids=not_classe_ids)
def test_not_begin_absent(NotClass):
selection = mock.Mock()
del selection.begin
obj = NotClass(selection)
event = mock.Mock()
obj.begin(event)
@pytest.mark.parametrize('NotClass', not_classes, ids=not_classe_ids)
def test_not_end(NotClass):
selection = mock.Mock()
obj = NotClass(selection)
obj.end()
assert [mock.call()] == selection.end.call_args_list
@pytest.mark.parametrize('NotClass', not_classes, ids=not_classe_ids)
def test_not_end_absent(NotClass):
selection = mock.Mock()
del selection.end
obj = NotClass(selection)
obj.end()
##__________________________________________________________________||
| Python | 0.000016 | |
7655fe94decf2fc9c3a07104f8fa76cf39442ddb | implement rectified drive | rectifieddrive.py | rectifieddrive.py | import navx
import subsystems
class RectifiedDrive:
"""
This class implemented the rectifiedDrive function, which sets the motor outputs
given a desired power and angular velocity using the NavX and a PID controller.
"""
def __init__(self, kp, ki, kd, period=0.05):
self.kp = kp
self.ki = ki
self.kd = kd
self.period = period
self.prev_error = 0.0
def rectifiedDrive(self, power, angular_vel):
error = angular_vel - navx.ahrs.getRate()
output = self.calc_pid(error)
left_output = power - output
if abs(left_output) > 1.0: # normalize if magnitude greater than 1
left_output /= abs(left_output)
right_output = power + output
if abs(right_output) > 1.0: # normalize if magnitude greater than 1
right_output /= abs(right_output)
subsystems.motors.robot_drive.setLeftRightMotorOutputs(left_output, right_output)
def calc_pid(self, error):
e_deriv = (error - self.prev_error) / self.period
e_int = (error + self.prev_error) / 2 * self.period
self.prev_error = error
return self.kp * error + self.kd * e_deriv + self.ki * e_int
| Python | 0.000001 | |
dd1d0893823561efec203cdfbb927b8edac7a72a | Add a coupld tests to create exception classes from error code names | tests/unit/beanstalk/test_exception.py | tests/unit/beanstalk/test_exception.py | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.beanstalk.exception import simple
from boto.compat import unittest
class FakeError(object):
def __init__(self, code, status, reason, body):
self.code = code
self.status = status
self.reason = reason
self.body = body
class TestExceptions(unittest.TestCase):
def test_exception_class_names(self):
# Create exception from class name
error = FakeError('TooManyApplications', 400, 'foo', 'bar')
exception = simple(error)
self.assertEqual(exception.__class__.__name__, 'TooManyApplications')
# Create exception from class name + 'Exception' as seen from the
# live service today
error = FakeError('TooManyApplicationsException', 400, 'foo', 'bar')
exception = simple(error)
self.assertEqual(exception.__class__.__name__, 'TooManyApplications')
# Make sure message body is present
self.assertEqual(exception.message, 'bar')
| Python | 0 | |
de38b3e7b3d8458920b913316b06bb10b886df9f | Implement ArgumentSelector for overload disambiguation | thinglang/symbols/argument_selector.py | thinglang/symbols/argument_selector.py | import collections
import copy
from thinglang.compiler.errors import NoMatchingOverload
from thinglang.lexer.values.identifier import Identifier
SymbolOption = collections.namedtuple('SymbolOption', ['symbol', 'remaining_arguments'])
class ArgumentSelector(object):
"""
Aids in disambiguating overloaded method symbols contained in MergedSymbol objects.
Managed state regarding arguments already observed, and filters out overloads and all arguments are processed.
If a matching overload exists, it is returned - otherwise, an exception is thrown.
"""
def __init__(self, symbols):
self.symbols = symbols
self.collected_arguments = []
self.options = [SymbolOption(symbol, copy.deepcopy(symbol.arguments)) for symbol in symbols]
def constraint(self, resolved):
"""
Filters out option groups that do not expect to see the resolved type as their next argument
"""
self.collected_arguments.append(resolved)
new_options = []
for option in self.options:
if option.remaining_arguments and self.type_match(resolved, option.remaining_arguments.pop(0)):
new_options.append(option)
self.options = new_options
if not self.options:
raise NoMatchingOverload(self.symbols, self.collected_arguments)
def disambiguate(self):
"""
Selects the best matching overload
"""
option_group = [option for option in self.options if not option.remaining_arguments]
if len(option_group) != 1:
raise NoMatchingOverload(self.symbols, self.collected_arguments)
return option_group[0].symbol
@staticmethod
def type_match(resolved, expected_type):
"""
Checks if two types match (TODO: take inheritance chains into account)
"""
if expected_type == Identifier('object'):
return True
return resolved.type == expected_type
| Python | 0 | |
609bd2a0712ee488dd76bb3619aef70343adb304 | add test__doctests.py | greentest/test__doctests.py | greentest/test__doctests.py | import os
import re
import doctest
import unittest
import eventlet
base = os.path.dirname(eventlet.__file__)
modules = set()
for path, dirs, files in os.walk(base):
package = 'eventlet' + path.replace(base, '').replace('/', '.')
modules.add((package, os.path.join(path, '__init__.py')))
for f in files:
module = None
if f.endswith('.py'):
module = f[:-3]
if module:
modules.add((package + '.' + module, os.path.join(path, f)))
suite = unittest.TestSuite()
tests_count = 0
modules_count = 0
for m, path in modules:
if re.search('^\s*>>> ', open(path).read(), re.M):
s = doctest.DocTestSuite(m)
print '%s (from %s): %s tests' % (m, path, len(s._tests))
suite.addTest(s)
modules_count += 1
tests_count += len(s._tests)
print 'Total: %s tests in %s modules' % (tests_count, modules_count)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| Python | 0.000009 | |
2885adb781ba5179e0dcc7645644bcb182e7bfe7 | Create hacks/eKoomerce/__init__.py | hacks/eKoomerce/__init__.py | hacks/eKoomerce/__init__.py | import bs4
| Python | 0.000004 | |
d9cce6f06503f1527d56d40d3037f46344c517d4 | Add PerUserData utility. | src/librement/utils/user_data.py | src/librement/utils/user_data.py | from django.db import models
from django.db.models.signals import post_save, pre_delete
from django.contrib.auth.models import User
def PerUserData(related_name=None):
"""
Class factory that returns an abstract model attached to a ``User`` object
that creates and destroys concrete child instances where required.
Example usage::
class ToppingPreferences(PerUserData('toppings')):
pepperoni = models.BooleanField(default=True)
anchovies = models.BooleanField(default=False)
>>> u = User.objects.create_user('test', 'example@example.com')
>>> u.toppings # ToppingPreferences created automatically
<ToppingPreferences: user=test>
>>> u.toppings.anchovies
False
"""
class UserDataBase(models.base.ModelBase):
def __new__(cls, name, bases, attrs):
model = super(UserDataBase, cls).__new__(cls, name, bases, attrs)
if model._meta.abstract:
return model
def on_create(sender, instance, created, *args, **kwargs):
if created:
model.objects.create(user=instance)
def on_delete(sender, instance, *args, **kwargs):
model.objects.filter(pk=instance).delete()
post_save.connect(on_create, sender=User, weak=False)
pre_delete.connect(on_delete, sender=User, weak=False)
return model
class UserData(models.Model):
user = models.OneToOneField(
'auth.User',
primary_key=True,
related_name=related_name,
)
__metaclass__ = UserDataBase
class Meta:
abstract = True
def __unicode__(self):
return 'user=%s' % self.user.username
return UserData
| Python | 0 | |
e58d30a64ae2ce2962dbaaf119e5e4c4ee33e4e7 | Create pub.py | cloud/mqtt_server/pub.py | cloud/mqtt_server/pub.py | #!/usr/bin/env python
import asyncio
from hbmqtt.client import MQTTClient
from hbmqtt.mqtt.constants import QOS_0, QOS_1, QOS_2
async def publish_test():
try:
C = MQTTClient()
ret = await C.connect('mqtt://192.168.0.4:1883/')
message = await C.publish('server', 'MESSAGE-QOS_0'.encode(), qos=QOS_0)
message = await C.publish('server', 'MESSAGE-QOS_1'.encode(), qos=QOS_1)
message = await C.publish('gateway', 'MESSAGE-QOS_2'.encode(), qos=QOS_2)
print("messages published")
await C.disconnect()
except ConnectException as ce:
print("Connection failed: %s" % ce)
asyncio.get_event_loop().stop()
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(publish_test())
| Python | 0.000001 | |
8551c56a9fea5d21ea9dc6761eff8e93d451f6b3 | Add pip setup.py | setup.py | setup.py | """Setup script for gin-config.
See:
https://github.com/google/gin-config
"""
import codecs
from os import path
from setuptools import find_packages
from setuptools import setup
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with codecs.open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='gin-config',
version='0.1',
include_package_data=True,
packages=find_packages(exclude=['contrib', 'docs', 'tests']), # Required
extras_require={ # Optional
'tf': ['tensorflow'],
'test': ['coverage'],
},
description='Gin-config: a lightweight configuration library for Python',
long_description=long_description,
url='https://github.com/google/gin-config', # Optional
author='The Gin-Config Team', # Optional
classifiers=[ # Optional
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: ML Tools',
# Pick your license as you wish
'License :: OSI Approved :: Apache License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
project_urls={ # Optional
'Bug Reports': 'https://github.com/google/gin-config/issues',
'Source': 'https://github.com/google/gin-config',
},
)
| Python | 0.000002 | |
379c5e73d767753142a62ba57f5928acf754b508 | Add simple setup.py for ease of system-installing | setup.py | setup.py | from setuptools import setup, find_packages
setup(
name="crow2",
version="0.1.dev0",
packages=find_packages(),
scripts=["bin/crow2"],
install_requires=["twisted", "zope.interface"]
)
| Python | 0 | |
c2d14b8c3beaee3cff498fc02106751fce8e8e1c | Add setup.py | setup.py | setup.py | import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import pb2df
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup(
name='pb2df',
version=pb2df.__version__,
author=pb2df.__author__,
author_email='',
description='Convert ProtoBuf objects to Spark DataFrame.',
long_description=__doc__,
url='https://github.com/jason2506/pb2df',
license=pb2df.__license__,
packages=find_packages(),
zip_safe=False,
platforms='any',
install_requires=['protobuf'],
tests_require=['pytest'],
cmdclass={'test': PyTest},
)
| Python | 0.000001 | |
a7bf54f417576bfc355e1851258e711dadd73ad3 | Add python trove classifiers | setup.py | setup.py | from setuptools import setup, find_packages
from taggit import VERSION
f = open('README.rst')
readme = f.read()
f.close()
setup(
name='django-taggit',
version=".".join(map(str, VERSION)),
description='django-taggit is a reusable Django application for simple tagging.',
long_description=readme,
author='Alex Gaynor',
author_email='alex.gaynor@gmail.com',
url='http://github.com/alex/django-taggit/tree/master',
packages=find_packages(),
zip_safe=False,
package_data = {
'taggit': [
'locale/*/LC_MESSAGES/*',
],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Framework :: Django',
],
test_suite='runtests.runtests',
)
| from setuptools import setup, find_packages
from taggit import VERSION
f = open('README.rst')
readme = f.read()
f.close()
setup(
name='django-taggit',
version=".".join(map(str, VERSION)),
description='django-taggit is a reusable Django application for simple tagging.',
long_description=readme,
author='Alex Gaynor',
author_email='alex.gaynor@gmail.com',
url='http://github.com/alex/django-taggit/tree/master',
packages=find_packages(),
zip_safe=False,
package_data = {
'taggit': [
'locale/*/LC_MESSAGES/*',
],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
test_suite='runtests.runtests',
)
| Python | 0.000327 |
2b1146a1741262a9c5acec9a56dfa3e45202f1df | set version 1.0 | setup.py | setup.py | from setuptools import setup, find_packages
setup(
name='aleph',
version='1.0',
description="Document sifting web frontend",
long_description="",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
keywords='',
author='Friedrich Lindenberg',
author_email='friedrich@pudo.org',
url='http://grano.cc',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'test']),
namespace_packages=[],
include_package_data=True,
zip_safe=False,
install_requires=[],
test_suite='nose.collector',
entry_points={
'aleph.ingestors': [
'skip = aleph.ingest.dummy:SkipIngestor',
'pdf = aleph.ingest.text:PDFIngestor',
'doc = aleph.ingest.document:DocumentIngestor',
'ppt = aleph.ingest.document:PresentationIngestor',
'html = aleph.ingest.html:HtmlIngestor',
'img = aleph.ingest.image:ImageIngestor',
'email = aleph.ingest.email:EmailFileIngestor',
'pst = aleph.ingest.email:OutlookIngestor',
'messy = aleph.ingest.tabular:MessyTablesIngestor',
'dbf = aleph.ingest.dbf:DBFIngestor',
'rar = aleph.ingest.packages:RARIngestor',
'zip = aleph.ingest.packages:ZipIngestor',
'tar = aleph.ingest.packages:TarIngestor',
'gz = aleph.ingest.packages:GzipIngestor',
'bz2 = aleph.ingest.packages:BZ2Ingestor'
],
'aleph.analyzers': [
'lang = aleph.analyze.language:LanguageAnalyzer',
'emails = aleph.analyze.regex:EMailAnalyzer',
'urls = aleph.analyze.regex:URLAnalyzer',
'regex = aleph.analyze.regex_entity:RegexEntityAnalyzer',
'polyglot = aleph.analyze.polyglot_entity:PolyglotEntityAnalyzer'
],
'aleph.crawlers': [
# 'stub = aleph.crawlers.stub:StubCrawler',
'opennames = aleph.crawlers.opennames:OpenNamesCrawler',
'idrequests = aleph.crawlers.idashboard:IDRequests',
'idfiles = aleph.crawlers.idashboard:IDFiles',
'blacklight = aleph.crawlers.blacklight:BlacklightCrawler',
'sourceafrica = aleph.crawlers.documentcloud:SourceAfricaCrawler'
],
'aleph.init': [
],
'console_scripts': [
'aleph = aleph.manage:main',
]
},
tests_require=[
'coverage', 'nose'
]
)
| from setuptools import setup, find_packages
setup(
name='aleph',
version='0.2',
description="Document sifting web frontend",
long_description="",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
keywords='',
author='Friedrich Lindenberg',
author_email='friedrich@pudo.org',
url='http://grano.cc',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'test']),
namespace_packages=[],
include_package_data=True,
zip_safe=False,
install_requires=[],
test_suite='nose.collector',
entry_points={
'aleph.ingestors': [
'skip = aleph.ingest.dummy:SkipIngestor',
'pdf = aleph.ingest.text:PDFIngestor',
'doc = aleph.ingest.document:DocumentIngestor',
'ppt = aleph.ingest.document:PresentationIngestor',
'html = aleph.ingest.html:HtmlIngestor',
'img = aleph.ingest.image:ImageIngestor',
'email = aleph.ingest.email:EmailFileIngestor',
'pst = aleph.ingest.email:OutlookIngestor',
'messy = aleph.ingest.tabular:MessyTablesIngestor',
'dbf = aleph.ingest.dbf:DBFIngestor',
'rar = aleph.ingest.packages:RARIngestor',
'zip = aleph.ingest.packages:ZipIngestor',
'tar = aleph.ingest.packages:TarIngestor',
'gz = aleph.ingest.packages:GzipIngestor',
'bz2 = aleph.ingest.packages:BZ2Ingestor'
],
'aleph.analyzers': [
'lang = aleph.analyze.language:LanguageAnalyzer',
'emails = aleph.analyze.regex:EMailAnalyzer',
'urls = aleph.analyze.regex:URLAnalyzer',
'regex = aleph.analyze.regex_entity:RegexEntityAnalyzer',
'polyglot = aleph.analyze.polyglot_entity:PolyglotEntityAnalyzer'
],
'aleph.crawlers': [
# 'stub = aleph.crawlers.stub:StubCrawler',
'opennames = aleph.crawlers.opennames:OpenNamesCrawler',
'idrequests = aleph.crawlers.idashboard:IDRequests',
'idfiles = aleph.crawlers.idashboard:IDFiles',
'blacklight = aleph.crawlers.blacklight:BlacklightCrawler',
'sourceafrica = aleph.crawlers.documentcloud:SourceAfricaCrawler'
],
'aleph.init': [
],
'console_scripts': [
'aleph = aleph.manage:main',
]
},
tests_require=[
'coverage', 'nose'
]
)
| Python | 0.000002 |
733289636661f3c0034a66eaa8763058ef43796d | update setup.py | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
from codecs import open
setup(name='Open-Tamil',
version='0.1-dev',
description='Tamil language text processing tools',
author='Muthiah Annamalai',
author_email='ezhillang@gmail.com',
url='https://github.com/arcturusannamalai/open-tamil',
packages=['tamil'],
license='GPLv3',
platforms='PC,Linux,Mac',
classifiers='Natural Language :: Tamil',
long_description=open('README.md','r','UTF-8').read(),
download_url='https://github.com/arcturusannamalai/open-tamil/archive/latest.zip',#pip
)
| #!/usr/bin/env python
from distutils.core import setup
from codecs import open
setup(name='Open Tamil',
version='0.1-dev',
description='Tamil language text processing tools',
author='Muthiah Annamalai',
author_email='ezhillang@gmail.com',
url='https://github.com/arcturusannamalai/open-tamil',
packages=['tamil'],
license='GPLv3',
platforms='PC,Linux,Mac',
classifiers='Natural Language :: Tamil',
long_description=open('README.md','r','UTF-8').read(),
download_url='https://github.com/arcturusannamalai/open-tamil/archive/latest.zip',#pip
)
| Python | 0.000001 |
231d050fe611adb201cd7ae55f52212d0b84caa1 | Check for pandoc. add pyandoc to setup_requires | setup.py | setup.py | from setuptools import setup, find_packages, Command
from setuptools.command.build_py import build_py as _build_py
from distutils.spawn import find_executable
import os.path
import imp
import pandoc.core
pandoc.core.PANDOC_PATH = find_executable('pandoc')
assert pandoc.core.PANDOC_PATH is not None, \
"'pandoc' is a required system binary to generate documentation.\n" \
"Please install it somewhere in your PATH to run this command."
ROOT = os.path.abspath(os.path.dirname(__file__))
def read(fname):
return open(os.path.join(ROOT, fname)).read()
def read_md_as_rest(fname):
doc = pandoc.Document()
doc.markdown = read(fname)
return doc.rst
def version():
file, pathname, description = imp.find_module('sparts', [ROOT])
return imp.load_module('sparts', file, pathname, description).__version__
class gen_thrift(Command):
user_options=[]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.mkpath(os.path.join(ROOT, 'sparts', 'gen'))
for f in os.listdir(os.path.join(ROOT, 'thrift')):
self.spawn(['thrift', '-out', os.path.join(ROOT, 'sparts', 'gen'),
'-v', '--gen', 'py:new_style',
os.path.join(ROOT, 'thrift', f)])
class build_py(_build_py):
def run(self):
self.run_command('gen_thrift')
_build_py.run(self)
setup(
name="sparts",
version=version(),
packages=find_packages(),
description="Build services in python with as little code as possible",
long_description=read_md_as_rest("README.md"),
install_requires=[],
setup_requires=['pyandoc'],
author='Peter Ruibal',
author_email='ruibalp@gmail.com',
license='ISC',
keywords='service boostrap daemon thrift tornado',
url='http://github.com/fmoo/sparts',
test_suite="tests",
cmdclass={'gen_thrift': gen_thrift,
'build_py': build_py},
)
| from setuptools import setup, find_packages, Command
from setuptools.command.build_py import build_py as _build_py
from distutils.spawn import find_executable
import os.path
import imp
import pandoc.core
pandoc.core.PANDOC_PATH = find_executable('pandoc')
ROOT = os.path.abspath(os.path.dirname(__file__))
def read(fname):
return open(os.path.join(ROOT, fname)).read()
def read_md_as_rest(fname):
doc = pandoc.Document()
doc.markdown = read(fname)
return doc.rst
def version():
file, pathname, description = imp.find_module('sparts', [ROOT])
return imp.load_module('sparts', file, pathname, description).__version__
class gen_thrift(Command):
user_options=[]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.mkpath(os.path.join(ROOT, 'sparts', 'gen'))
for f in os.listdir(os.path.join(ROOT, 'thrift')):
self.spawn(['thrift', '-out', os.path.join(ROOT, 'sparts', 'gen'),
'-v', '--gen', 'py:new_style',
os.path.join(ROOT, 'thrift', f)])
class build_py(_build_py):
def run(self):
self.run_command('gen_thrift')
_build_py.run(self)
setup(
name="sparts",
version=version(),
packages=find_packages(),
description="Build services in python with as little code as possible",
long_description=read_md_as_rest("README.md"),
install_requires=[],
author='Peter Ruibal',
author_email='ruibalp@gmail.com',
license='ISC',
keywords='service boostrap daemon thrift tornado',
url='http://github.com/fmoo/sparts',
test_suite="tests",
cmdclass={'gen_thrift': gen_thrift,
'build_py': build_py},
)
| Python | 0 |
5a16ada916d719a0499d75bc5c82aaa5228dec15 | Split off IP/hostname munging to addr_util | libnamebench/addr_util.py | libnamebench/addr_util.py | # Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions related to IP Addresses & Hostnames."""
# TODO(tstromberg): Investigate replacement with ipaddr library
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import math
import re
import util
import os.path
import socket
import sys
import traceback
import zlib
# TODO(tstromberg): Find a way to combine the following two regexps.
# Used to decide whether or not to benchmark a name
INTERNAL_RE = re.compile('^0|\.pro[md]z*\.|\.corp|\.bor|\.hot$|internal|dmz|\._[ut][dc]p\.|intra|\.\w$|\.\w{5,}$', re.IGNORECASE)
# Used to decide if a hostname should be censored later.
PRIVATE_RE = re.compile('^\w+dc\.|^\w+ds\.|^\w+sv\.|^\w+nt\.|\.corp|internal|intranet|\.local', re.IGNORECASE)
# ^.*[\w-]+\.[\w-]+\.[\w-]+\.[a-zA-Z]+\.$|^[\w-]+\.[\w-]{3,}\.[a-zA-Z]+\.$
FQDN_RE = re.compile('^.*\..*\..*\..*\.$|^.*\.[\w-]*\.\w{3,4}\.$|^[\w-]+\.[\w-]{4,}\.\w+\.')
IP_RE = re.compile('^[0-9.]+$')
def ExtractIPsFromString(ip_string):
"""Return a tuple of ip addressed held in a string."""
ips = []
# IPV6 If this regexp is too loose, see Regexp-IPv6 in CPAN for inspiration.
ips.extend(re.findall('[\dabcdef:]+:[\dabcdef:]+', ip_string, re.IGNORECASE))
ips.extend(re.findall('\d+\.\d+\.\d+\.+\d+', ip_string))
return ips
def ExtractIPTuplesFromString(ip_string):
ip_tuples = []
for ip in ExtractIPsFromString(ip_string):
ip_tuples.append((ip,ip))
return ip_tuples
def IsPrivateHostname(hostname):
"""Basic matching to determine if the hostname is likely to be 'internal'."""
if PRIVATE_RE.search(hostname):
return True
else:
return False
def IsLoopbackIP(ip):
"""Boolean check to see if an IP is private or not.
Returns: Number of bits that should be preserved.
"""
if ip.startswith('127.') or ip == '::1':
return True
else:
return False
def IsPrivateIP(ip):
"""Boolean check to see if an IP is private or not.
Returns: Number of bits that should be preserved.
"""
if re.match('^10\.', ip):
return 1
elif re.match('^192\.168', ip):
return 2
elif re.match('^172\.(1[6-9]|2[0-9]|3[0-1])\.', ip):
return 1
else:
return None
def MaskIPBits(ip, use_bits):
"""Mask an IP, but still keep a meaningful checksum."""
ip_parts = ip.split('.')
checksum = zlib.crc32(''.join(ip_parts[use_bits:]))
masked_ip = '.'.join(ip_parts[0:use_bits])
return masked_ip + ".x-" + str(checksum)[-4:]
def MaskPrivateHost(ip, hostname, name):
"""Mask unnamed private IP's."""
# If we have a name not listed as SYS-x.x.x.x, then we're clear.
if name and ip not in name:
return (ip, hostname, name)
use_bits = IsPrivateIP(ip)
if use_bits:
ip = MaskIPBits(ip, use_bits)
hostname = 'internal.ip'
elif IsPrivateHostname(hostname):
ip = MaskIPBits(ip, 2)
hostname = 'internal.name'
if 'SYS-' in name:
name = "SYS-%s" % ip
else:
name = ''
return (ip, hostname, name)
| Python | 0.000005 | |
e458733b0aa1cbb142fc6818ae1f7cf84bef6518 | Add setup | setup.py | setup.py | import setuptools
if __name__ == "__main__":
setuptools.setup(
name='friendly_computing_machine',
version="0.1.1",
description='A starting template for Python programs',
author='CHENXI CAI',
author_email='ccai28@emory.edu',
url="https://github.com/xiaohaiguicc/friendly-computing-machine",
license='BSD-3C',
packages=setuptools.find_packages(),
install_requires=[
'numpy>=1.7',
],
extras_require={
'docs': [
'sphinx==1.2.3', # autodoc was broken in 1.3.1
'sphinxcontrib-napoleon',
'sphinx_rtd_theme',
'numpydoc',
],
'tests': [
'pytest',
'pytest-cov',
'pytest-pep8',
'tox',
],
},
tests_require=[
'pytest',
'pytest-cov',
'pytest-pep8',
'tox',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
zip_safe=True,
)
| Python | 0.000001 | |
bc17ea522b0120ec7308ba0309d87b18ba9163d9 | Add setup.py | setup.py | setup.py | import sys
from setuptools import setup
setup(
name='pingdombackup',
version="0.1.0",
description='Backup Pingdom logs',
long_description='Backup Pingdom result logs to a SQLite database.',
author='Joel Verhagen',
author_email='joel.verhagen@gmail.com',
install_requires=['requests>=2.1.0'],
url='https://github.com/joelverhagen/PingdomBackup',
packages=['pingdombackup'],
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: System :: Monitoring'
]
)
| Python | 0.000001 | |
1dd8a34cba565f70a30a6c8ab4604a489377e752 | Add template remove script | src/utils/remove_templates.py | src/utils/remove_templates.py | def remove_templates(text):
"""Remove all text contained between '{{' and '}}', even in the case of
nested templates.
Args:
text (str): Full text of a Wikipedia article as a single string.
Returns:
str: The full text with all templates removed.
"""
start_char = 0
while '{{' in text:
depth = 0
prev_char = None
open_pos = None
close_pos = None
for pos in xrange(start_char, len(text)):
char = text[pos]
# Open Marker
if char == '{' and prev_char == '{':
if depth == 0:
open_pos = pos-1
# When we scan the string again after removing the chunk
# that starts here, we know all text before is template
# free, so we mark this position for the next while
# iteration
start_char = open_pos
depth += 1
# Close Marker
elif char == '}' and prev_char == '}':
depth -= 1
if depth == 0:
close_pos = pos
# Remove all text between the open and close markers
text = text[:open_pos] + text[close_pos+1:]
break
prev_char = char
return text
| Python | 0 | |
ad714cbf92d2984c9cc855e99e31bf622c38a220 | add setup file | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(
name = 's7n-menu',
version = "1a1",
packages = ['s7n', 's7n.menu'],
)
| Python | 0.000001 | |
ff147838ce320c97c34e00be4dafb63b6d0603fc | Add setup.py | setup.py | setup.py | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
import sys
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='oneliner',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.1',
description='Module for practical Python one-liners',
long_description=long_description,
# The project's main homepage.
url='https://github.com/seguri/python-oneliner',
# Author details
author='Marco Seguri',
author_email='marco@seguri.name',
# Choose your license
license='Apache 2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache 2.0 License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
],
# What does your project relate to?
keywords='oneliner repl',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
#packages=find_packages(exclude=['contrib', 'docs', 'tests']),
#packages=['oneliner'],
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
py_modules=["oneliner"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
#install_requires=['peppercorn'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data={
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'pyl-{0.major}.{0.minor}=oneliner:main'.format(sys.version_info),
],
},
)
| Python | 0.000001 | |
e9832b7b3bb028170562cacde3dbb52f13adba85 | Set version number | setup.py | setup.py | #!/usr/bin/env python
#
# Copyright (C) 2007 SIOS Technology, Inc.
# Copyright (C) 2011 Umea Universitet, Sweden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
install_requires = [
# core dependencies
'decorator',
'requests >= 1.0.0',
'paste',
'zope.interface',
'repoze.who',
'pycrypto', #'Crypto'
]
tests_require = [
'mongodict',
'pyasn1',
'pymongo',
'python-memcached == 1.51',
'pytest',
'mako',
#'pytest-coverage',
]
# only for Python 2.6
if sys.version_info < (2, 7):
install_requires.append('importlib')
setup(
name='pysaml2',
version='1.2.0beta',
description='Python implementation of SAML Version 2 to be used in a WSGI environment',
# long_description = read("README"),
author='Roland Hedberg',
author_email='roland.hedberg@adm.umu.se',
license='Apache 2.0',
url='https://github.com/rohe/pysaml2',
packages=['saml2', 'xmldsig', 'xmlenc', 's2repoze', 's2repoze.plugins',
"saml2/profile", "saml2/schema", "saml2/extension",
"saml2/attributemaps", "saml2/authn_context",
"saml2/entity_category", "saml2/userinfo"],
package_dir={'': 'src'},
package_data={'': ['xml/*.xml']},
classifiers=["Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Libraries :: Python Modules"],
scripts=["tools/parse_xsd2.py", "tools/make_metadata.py",
"tools/mdexport.py"],
tests_require=tests_require,
extras_require={
'testing': tests_require,
},
install_requires=install_requires,
zip_safe=False,
test_suite='tests',
cmdclass={'test': PyTest},
)
| #!/usr/bin/env python
#
# Copyright (C) 2007 SIOS Technology, Inc.
# Copyright (C) 2011 Umea Universitet, Sweden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
install_requires = [
# core dependencies
'decorator',
'requests >= 1.0.0',
'paste',
'zope.interface',
'repoze.who',
'pycrypto', #'Crypto'
]
tests_require = [
'mongodict',
'pyasn1',
'pymongo',
'python-memcached == 1.51',
'pytest',
'mako',
#'pytest-coverage',
]
# only for Python 2.6
if sys.version_info < (2, 7):
install_requires.append('importlib')
setup(
name='pysaml2',
version='1.1.0',
description='Python implementation of SAML Version 2 to be used in a WSGI environment',
# long_description = read("README"),
author='Roland Hedberg',
author_email='roland.hedberg@adm.umu.se',
license='Apache 2.0',
url='https://github.com/rohe/pysaml2',
packages=['saml2', 'xmldsig', 'xmlenc', 's2repoze', 's2repoze.plugins',
"saml2/profile", "saml2/schema", "saml2/extension",
"saml2/attributemaps", "saml2/authn_context",
"saml2/entity_category", "saml2/userinfo"],
package_dir={'': 'src'},
package_data={'': ['xml/*.xml']},
classifiers=["Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Libraries :: Python Modules"],
scripts=["tools/parse_xsd2.py", "tools/make_metadata.py",
"tools/mdexport.py"],
tests_require=tests_require,
extras_require={
'testing': tests_require,
},
install_requires=install_requires,
zip_safe=False,
test_suite='tests',
cmdclass={'test': PyTest},
)
| Python | 0.000032 |
73d9b80d6fa1cf75dba73e396d1f5d3bd4963df6 | Create setup.py | setup.py | setup.py | from distutils.core import setup
setup(
name = 'wthen',
packages = ['wthen'], # this must be the same as the name above
version = '0.1',
description = 'A simple rule engine with YAML format',
author = 'Alex Yu',
author_email = 'mltest2000@aliyun.com',
url = 'https://github.com/sevenbigcat/wthen', # use the URL to the github repo
download_url = 'https://github.com/sevenbigcat/wtehn/archive/0.1.tar.gz', # I'll explain this in a second
keywords = ['rule engine', 'ECA', 'YAML'], # arbitrary keywords
classifiers = [],
)
| Python | 0.000001 | |
62e126908e08544f8595be368d300b0abaca82d3 | support old setuptools versions | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# Get the version
version_regex = r'__version__ = ["\']([^"\']*)["\']'
with open('h2/__init__.py', 'r') as f:
text = f.read()
match = re.search(version_regex, text)
if match:
version = match.group(1)
else:
raise RuntimeError("No version number found!")
# Stealing this from Kenneth Reitz
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
packages = [
'h2',
]
setup(
name='h2',
version=version,
description='HTTP/2 State-Machine based protocol implementation',
long_description=open('README.rst').read() + '\n\n' + open('HISTORY.rst').read(),
author='Cory Benfield',
author_email='cory@lukasa.co.uk',
url='http://hyper.rtfd.org',
packages=packages,
package_data={'': ['LICENSE', 'README.rst', 'CONTRIBUTORS.rst', 'HISTORY.rst', 'NOTICES']},
package_dir={'h2': 'h2'},
include_package_data=True,
license='MIT License',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
install_requires=[
'hyperframe>=3.1, <4',
'hpack>=2.0, <3',
],
extras_require={
':python_version == "2.7" or python_version == "3.3"': ['enum34>=1.0.4, <1.1'],
}
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# Get the version
version_regex = r'__version__ = ["\']([^"\']*)["\']'
with open('h2/__init__.py', 'r') as f:
text = f.read()
match = re.search(version_regex, text)
if match:
version = match.group(1)
else:
raise RuntimeError("No version number found!")
# Stealing this from Kenneth Reitz
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
packages = [
'h2',
]
setup(
name='h2',
version=version,
description='HTTP/2 State-Machine based protocol implementation',
long_description=open('README.rst').read() + '\n\n' + open('HISTORY.rst').read(),
author='Cory Benfield',
author_email='cory@lukasa.co.uk',
url='http://hyper.rtfd.org',
packages=packages,
package_data={'': ['LICENSE', 'README.rst', 'CONTRIBUTORS.rst', 'HISTORY.rst', 'NOTICES']},
package_dir={'h2': 'h2'},
include_package_data=True,
license='MIT License',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
install_requires=[
'hyperframe~=3.1',
'hpack~=2.0',
],
extras_require={
':python_version<"3.4"': ['enum34~=1.0.4'],
}
)
| Python | 0 |
1e038b40cfb61f15fb69cdf4207a739e2ebe0060 | add tests for scylla-sstable's dump commands | test/cql-pytest/test_tools.py | test/cql-pytest/test_tools.py | # Copyright 2022-present ScyllaDB
#
# SPDX-License-Identifier: AGPL-3.0-or-later
#############################################################################
# Tests for the tools hosted by scylla
#############################################################################
import glob
import json
import nodetool
import os
import pytest
import subprocess
import util
def simple_no_clustering_table(cql, keyspace):
table = util.unique_name()
schema = f"CREATE TABLE {keyspace}.{table} (pk int PRIMARY KEY , v int)"
cql.execute(schema)
for pk in range(0, 10):
cql.execute(f"INSERT INTO {keyspace}.{table} (pk, v) VALUES ({pk}, 0)")
nodetool.flush(cql, f"{keyspace}.{table}")
return table, schema
def simple_clustering_table(cql, keyspace):
table = util.unique_name()
schema = f"CREATE TABLE {keyspace}.{table} (pk int, ck int, v int, PRIMARY KEY (pk, ck))"
cql.execute(schema)
for pk in range(0, 10):
for ck in range(0, 10):
cql.execute(f"INSERT INTO {keyspace}.{table} (pk, ck, v) VALUES ({pk}, {ck}, 0)")
nodetool.flush(cql, f"{keyspace}.{table}")
return table, schema
def clustering_table_with_collection(cql, keyspace):
table = util.unique_name()
schema = f"CREATE TABLE {keyspace}.{table} (pk int, ck int, v map<int, text>, PRIMARY KEY (pk, ck))"
cql.execute(schema)
for pk in range(0, 10):
for ck in range(0, 10):
map_vals = {f"{p}: '{c}'" for p in range(0, pk) for c in range(0, ck)}
map_str = ", ".join(map_vals)
cql.execute(f"INSERT INTO {keyspace}.{table} (pk, ck, v) VALUES ({pk}, {ck}, {{{map_str}}})")
nodetool.flush(cql, f"{keyspace}.{table}")
return table, schema
def clustering_table_with_udt(cql, keyspace):
table = util.unique_name()
create_type_schema = f"CREATE TYPE {keyspace}.type1 (f1 int, f2 text)"
create_table_schema = f" CREATE TABLE {keyspace}.{table} (pk int, ck int, v type1, PRIMARY KEY (pk, ck))"
cql.execute(create_type_schema)
cql.execute(create_table_schema)
for pk in range(0, 10):
for ck in range(0, 10):
cql.execute(f"INSERT INTO {keyspace}.{table} (pk, ck, v) VALUES ({pk}, {ck}, {{f1: 100, f2: 'asd'}})")
nodetool.flush(cql, f"{keyspace}.{table}")
return table, "; ".join((create_type_schema, create_table_schema))
def table_with_counters(cql, keyspace):
table = util.unique_name()
schema = f"CREATE TABLE {keyspace}.{table} (pk int PRIMARY KEY, v counter)"
cql.execute(schema)
for pk in range(0, 10):
for c in range(0, 4):
cql.execute(f"UPDATE {keyspace}.{table} SET v = v + 1 WHERE pk = {pk};")
nodetool.flush(cql, f"{keyspace}.{table}")
return table, schema
@pytest.fixture(scope="module", params=[
simple_no_clustering_table,
simple_clustering_table,
clustering_table_with_collection,
clustering_table_with_udt,
table_with_counters,
])
def scylla_sstable(request, tmp_path_factory, cql, test_keyspace, scylla_only):
workdir = request.config.getoption('workdir')
scylla_path = request.config.getoption('scylla_path')
if not workdir or not scylla_path:
pytest.skip('Cannot run tool tests: workdir and/or scylla_path not provided')
table, schema = request.param(cql, test_keyspace)
schema_file = os.path.join(tmp_path_factory.getbasetemp(), "schema.cql")
with open(schema_file, "w") as f:
f.write(schema)
sstables = glob.glob(os.path.join(workdir, 'data', test_keyspace, table + '-*', '*-Data.db'))
try:
yield (scylla_path, schema_file, sstables)
finally:
cql.execute(f"DROP TABLE {test_keyspace}.{table}")
def one_sstable(sstables):
return [sstables[0]]
def all_sstables(sstables):
return sstables
@pytest.mark.parametrize("what", ["index", "compression-info", "summary", "statistics", "scylla-metadata"])
@pytest.mark.parametrize("which_sstables", [one_sstable, all_sstables])
def test_scylla_sstable_dump(scylla_sstable, what, which_sstables):
(scylla_path, schema_file, sstables) = scylla_sstable
out = subprocess.check_output([scylla_path, "sstable", f"dump-{what}", "--schema-file", schema_file] + which_sstables(sstables))
print(out)
assert out
assert json.loads(out)
@pytest.mark.parametrize("merge", [True, False])
@pytest.mark.parametrize("output_format", ["text", "json"])
def test_scylla_sstable_dump_merge(scylla_sstable, merge, output_format):
(scylla_path, schema_file, sstables) = scylla_sstable
args = [scylla_path, "sstable", "dump-data", "--schema-file", schema_file, "--output-format", output_format]
if merge:
args.append("--merge")
out = subprocess.check_output(args + sstables)
print(out)
assert out
if output_format == "json":
assert json.loads(out)
| Python | 0.00038 | |
4d16ae6d1ad8b308c14c23e802349001b81ae461 | Add Python-based opcode enum parser | thinglang/compiler/opcodes.py | thinglang/compiler/opcodes.py | import os
import re
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ENUM_PARSER = re.compile(r'(.*)\s*?=\s*?(\d+)')
def read_opcodes():
with open(os.path.join(BASE_DIR, '..', '..', 'thingc', 'execution', 'Opcode.h')) as f:
for line in f:
if 'enum class Opcode' in line:
break
for decl in f:
decl = decl.strip()
if not decl:
continue
if '}' in decl:
break
groups = ENUM_PARSER.search(decl).groups()
yield (groups[0].strip(), int(groups[1]))
OPCODES = dict(read_opcodes())
assert set(range(len(OPCODES))) == set(OPCODES.values())
| Python | 0.001332 | |
ac823e61fd214f9818bb7a893a8ed52a3bfa3af4 | Add utils for graph visualization. | neurokernel/conn_utils.py | neurokernel/conn_utils.py | #!/usr/bin/env python
import itertools
import os
import tempfile
import conn
import matplotlib.pyplot as plt
import networkx as nx
def imdisp(f):
"""
Display the specified image file using matplotlib.
"""
im = plt.imread(f)
plt.imshow(im)
plt.axis('off')
plt.draw()
return im
def show_pydot(g):
"""
Display a networkx graph using pydot.
"""
fd = tempfile.NamedTemporaryFile()
fd.close()
p = nx.to_pydot(g)
p.write_jpg(fd.name)
imdisp(fd.name)
os.remove(fd.name)
def show_pygraphviz(g, prog='dot', graph_attr={}, node_attr={}, edge_attr={}):
"""
Display a networkx graph using pygraphviz.
"""
fd = tempfile.NamedTemporaryFile(suffix='.jpg')
fd.close()
p = nx.to_agraph(g)
p.graph_attr.update(graph_attr)
p.node_attr.update(node_attr)
p.edge_attr.update(edge_attr)
p.draw(fd.name, prog=prog)
imdisp(fd.name)
os.remove(fd.name)
def conn_to_bipartite(c):
"""
Convert a Connectivity object into a bipartite NetworkX multigraph.
"""
g = nx.MultiDiGraph()
src_nodes = ['src_%i' % i for i in xrange(c.N_src)]
dest_nodes = ['dest_%i' % i for i in xrange(c.N_dest)]
g.add_nodes_from(src_nodes)
g.add_nodes_from(dest_nodes)
for key in c._data.keys():
syn, dir, name = key.split('/')
syn = int(syn)
if name == 'conn':
if dir == '+':
for src, dest in itertools.product(xrange(c.N_src), xrange(c.N_dest)):
if c[src, dest, syn, dir, name] == 1:
g.add_edge('src_%i' % src, 'dest_%i' % dest)
elif dir == '-':
for src, dest in itertools.product(xrange(c.N_src), xrange(c.N_dest)):
if c[src, dest, syn, dir, name] == 1:
g.add_edge('dest_%i' % dest, 'src_%i' % src)
else:
raise ValueError('invalid direction')
return g
| Python | 0 | |
e663394d1dc4de7b8e3a877f0c9870a804e804f2 | Make tests runnable from lifelines.tests | lifelines/tests/__main__.py | lifelines/tests/__main__.py | import unittest
from . import test_suite
if __name__ == '__main__':
unittest.main(module=test_suite)
| Python | 0.00001 | |
525a8438bd601592c4f878ca5d42d3dab8943be0 | Test that specific Failures are caught before parent Failures | ooni/tests/test_errors.py | ooni/tests/test_errors.py | from twisted.trial import unittest
import ooni.errors
class TestErrors(unittest.TestCase):
def test_catch_child_failures_before_parent_failures(self):
"""
Verify that more specific Failures are caught first by
handleAllFailures() and failureToString().
Fails if a subclass is listed after it's parent Failure.
"""
# Check each Failure against all subsequent failures
for index, (failure, _) in enumerate(ooni.errors.known_failures):
for sub_failure, _ in ooni.errors.known_failures[index+1:]:
# Fail if subsequent Failure inherits from the current Failure
self.assertNotIsInstance(sub_failure(None), failure)
| Python | 0.000001 | |
90d079928eaf48e370d21417e4d6e649ec0f5f6f | Update tasks and evaluate viewports on saving | taskwiki/taskwiki.py | taskwiki/taskwiki.py | import sys
import re
import vim
from tasklib.task import TaskWarrior, Task
# Insert the taskwiki on the python path
sys.path.insert(0, vim.eval("s:plugin_path") + '/taskwiki')
from regexp import *
from task import VimwikiTask
from cache import TaskCache
"""
How this plugin works:
1.) On startup, it reads all the tasks and syncs info TW -> Vimwiki file. Task is identified by their
uuid.
2.) When saving, the opposite sync is performed (Vimwiki -> TW direction).
a) if task is marked as subtask by indentation, the dependency is created between
"""
tw = TaskWarrior()
cache = TaskCache(tw)
def update_from_tw():
"""
Updates all the incomplete tasks in the vimwiki file if the info from TW is different.
"""
cache.load_buffer()
cache.update_tasks()
cache.update_buffer()
cache.evaluate_viewports()
def update_to_tw():
"""
Updates all tasks that differ from their TaskWarrior representation.
"""
cache.reset()
cache.load_buffer()
cache.update_tasks()
cache.save_tasks()
cache.update_buffer()
cache.evaluate_viewports()
if __name__ == '__main__':
update_from_tw()
| import sys
import re
import vim
from tasklib.task import TaskWarrior, Task
# Insert the taskwiki on the python path
sys.path.insert(0, vim.eval("s:plugin_path") + '/taskwiki')
from regexp import *
from task import VimwikiTask
from cache import TaskCache
"""
How this plugin works:
1.) On startup, it reads all the tasks and syncs info TW -> Vimwiki file. Task is identified by their
uuid.
2.) When saving, the opposite sync is performed (Vimwiki -> TW direction).
a) if task is marked as subtask by indentation, the dependency is created between
"""
tw = TaskWarrior()
cache = TaskCache(tw)
def update_from_tw():
"""
Updates all the incomplete tasks in the vimwiki file if the info from TW is different.
"""
cache.load_buffer()
cache.update_tasks()
cache.update_buffer()
cache.evaluate_viewports()
def update_to_tw():
"""
Updates all tasks that differ from their TaskWarrior representation.
"""
cache.reset()
cache.load_buffer()
cache.save_tasks()
cache.update_buffer()
if __name__ == '__main__':
update_from_tw()
| Python | 0 |
f2e9f2adbc81a37847bbe27401dd852317243486 | add a test for the session tables | test/sessionstest.py | test/sessionstest.py | #!/usr/bin/python2.4
#
# Copyright (c) 2004-2005 rpath, Inc.
#
import time
import testsuite
testsuite.setup()
import sqlite3
import rephelp
from mint_rephelp import MintRepositoryHelper
from mint import dbversion
from mint import sessiondb
class SessionTest(MintRepositoryHelper):
def testSessions(self):
st = sessiondb.SessionsTable(self.db)
# create a session
st.save("abcdefg123456", {'_data': 'data',
'_accessed': time.time() - 20,
'_timeout': 10}
)
# load and check data
d = st.load("abcdefg123456")
assert(d['_data'] == 'data')
# clean up expired sessions
st.cleanup()
# confirm that expired session went away
d = st.load("abcdefg123456")
assert(not d)
def setUp(self):
rephelp.RepositoryHelper.setUp(self)
try:
os.unlink(self.reposDir + "/db")
except:
pass
self.db = sqlite3.connect(self.reposDir + "/db")
self.versionTable = dbversion.VersionTable(self.db)
self.db.commit()
if __name__ == "__main__":
testsuite.main()
| Python | 0 | |
ddbfc403034c1ed98590088889687ff23f222aab | add package | var/spack/packages/paraview/package.py | var/spack/packages/paraview/package.py | from spack import *
class Paraview(Package):
homepage = 'http://www.paraview.org'
url = 'http://www.paraview.org/files/v4.4/ParaView-v4.4.0-source.tar.gz'
version('4.4.0', 'fa1569857dd680ebb4d7ff89c2227378', url='http://www.paraview.org/files/v4.4/ParaView-v4.4.0-source.tar.gz')
variant('python', default=False, description='Enable Python support')
variant('matplotlib', default=False, description='Enable Matplotlib support')
variant('numpy', default=False, description='Enable NumPy support')
variant('tcl', default=False, description='Enable TCL support')
variant('mpi', default=False, description='Enable MPI support')
variant('osmesa', default=False, description='Enable OSMesa support')
variant('qt', default=False, description='Enable Qt support')
depends_on('python', when='+python')
depends_on('py-numpy', when='+python+numpy')
depends_on('py-matplotlib', when='+python+matplotlib')
depends_on('tcl', when='+tcl')
depends_on('mpi', when='+mpi')
depends_on('qt', when='+qt')
depends_on('bzip2')
depends_on('freetype')
depends_on('hdf5') # drags in mpi
depends_on('jpeg')
depends_on('libpng')
depends_on('libtiff')
#depends_on('libxml2') # drags in python
depends_on('netcdf')
#depends_on('protobuf') # version mismatches?
#depends_on('sqlite') # external version not supported
depends_on('zlib')
def install(self, spec, prefix):
with working_dir('spack-build', create=True):
def feature_to_bool(feature, on='ON', off='OFF'):
if feature in spec:
return on
return off
def nfeature_to_bool(feature):
return feature_to_bool(feature, on='OFF', off='ON')
feature_args = std_cmake_args[:]
feature_args.append('-DPARAVIEW_BUILD_QT_GUI:BOOL=%s' % feature_to_bool('+qt'))
feature_args.append('-DPARAVIEW_ENABLE_PYTHON:BOOL=%s' % feature_to_bool('+python'))
feature_args.append('-DPARAVIEW_USE_MPI:BOOL=%s' % feature_to_bool('+mpi'))
feature_args.append('-DVTK_ENABLE_TCL_WRAPPING:BOOL=%s' % feature_to_bool('+tcl'))
feature_args.append('-DVTK_OPENGL_HAS_OSMESA:BOOL=%s' % feature_to_bool('+osmesa'))
feature_args.append('-DVTK_USE_X:BOOL=%s' % nfeature_to_bool('+osmesa'))
feature_args.append('-DVTK_RENDERING_BACKEND:STRING=%s' % feature_to_bool('+opengl2', 'OpenGL2', 'OpenGL'))
feature_args.extend(std_cmake_args)
cmake('..',
'-DCMAKE_INSTALL_PREFIX:PATH=%s' % prefix,
'-DBUILD_TESTING:BOOL=OFF',
'-DVTK_USER_SYSTEM_FREETYPE:BOOL=ON',
'-DVTK_USER_SYSTEM_HDF5:BOOL=ON',
'-DVTK_USER_SYSTEM_JPEG:BOOL=ON',
#'-DVTK_USER_SYSTEM_LIBXML2:BOOL=ON',
'-DVTK_USER_SYSTEM_NETCDF:BOOL=ON',
'-DVTK_USER_SYSTEM_TIFF:BOOL=ON',
'-DVTK_USER_SYSTEM_ZLIB:BOOL=ON',
*feature_args)
make()
make('install')
| Python | 0 | |
eb71a3d3319480b3f99cb44f934a51bfb1b5bd67 | Add abstract class for HAP channels | pyatv/auth/hap_channel.py | pyatv/auth/hap_channel.py | """Base class for HAP based channels (connections)."""
from abc import ABC, abstractmethod
import asyncio
import logging
from typing import Callable, Tuple, cast
from pyatv.auth.hap_pairing import PairVerifyProcedure
from pyatv.auth.hap_session import HAPSession
from pyatv.support import log_binary
_LOGGER = logging.getLogger(__name__)
class AbstractHAPChannel(ABC, asyncio.Protocol):
"""Abstract base class for connections using HAP encryption and segmenting."""
def __init__(self, output_key: bytes, input_key: bytes) -> None:
"""Initialize a new AbstractHAPChannel instance."""
self.buffer = b""
self.transport = None
self.session: HAPSession = HAPSession()
self.session.enable(output_key, input_key)
def connection_made(self, transport) -> None:
"""Device connection was made."""
sock = transport.get_extra_info("socket")
dstaddr, dstport = sock.getpeername()
_LOGGER.debug("Connected to %s:%d", dstaddr, dstport)
self.transport = transport
def data_received(self, data: bytes) -> None:
"""Message was received from device."""
assert self.transport is not None
decrypt = self.session.decrypt(data)
log_binary(_LOGGER, "Received data", Data=data)
self.buffer += decrypt
self.handle_received()
@abstractmethod
def handle_received(self) -> None:
"""Handle received data that was put in buffer."""
def send(self, data: bytes) -> None:
"""Send message to device."""
assert self.transport is not None
encrypted = self.session.encrypt(data)
log_binary(_LOGGER, "Sending data", Encrypted=encrypted)
self.transport.write(encrypted)
def connection_lost(self, exc) -> None:
"""Device connection was dropped."""
_LOGGER.debug("Connection was lost to remote")
async def setup_channel(
factory: Callable[[bytes, bytes], AbstractHAPChannel],
verifier: PairVerifyProcedure,
address: str,
port: int,
salt: str,
output_info: str,
input_info: str,
) -> Tuple[asyncio.BaseTransport, AbstractHAPChannel]:
"""Set up a new HAP channel and enable encryption."""
out_key, in_key = verifier.encryption_keys(salt, output_info, input_info)
loop = asyncio.get_event_loop()
transport, protocol = await loop.create_connection(
lambda: factory(out_key, in_key),
address,
port,
)
return transport, cast(AbstractHAPChannel, protocol)
| Python | 0 | |
079ff9e471c4ad60f03e87929f0c7c44e239ddf2 | Add timer class and checkpoint class | pycqed/utilities/timer.py | pycqed/utilities/timer.py | import numpy as np
import datetime as dt
import logging
from collections import OrderedDict
from pycqed.measurement.hdf5_data import write_dict_to_hdf5
log = logging.getLogger(__name__)
import functools
class Timer(OrderedDict):
HDF_GRP_NAME = "Timers"
NAME_CKPT_START = "start"
NAME_CKPT_END = "end"
def __init__(self, name="timer", fmt="%Y-%m-%d %H:%M:%S.%f", name_separator=".",
verbose=False, auto_start=True, timer=None):
self.fmt = fmt
self.name = name
self.name_separator = name_separator
self.verbose = verbose
if timer is not None:
super().__init__(timer)
if auto_start and timer is not None:
self.checkpoint(self.NAME_CKPT_START)
@staticmethod
def from_string(timer):
pass
def __call__(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# print(args)
if hasattr(args[0], "timer"):
args[0].timer.checkpoint(func.__qualname__ + self.name_separator + self.NAME_CKPT_START)
else:
log.warning(f'Using @Timer decorator on {args[0]} but {args[0]} has no .timer attribute.'
'Time will not be logged.')
output = func(*args, **kwargs)
if hasattr(args[0], "timer"):
args[0].timer.checkpoint(func.__qualname__ + self.name_separator + self.NAME_CKPT_END)
return output
return wrapper
def __enter__(self):
if self.get(self.NAME_CKPT_START, None) is None:
# overwrite auto_start because when used in "with" statement, start must happen at beginning
self.checkpoint(self.NAME_CKPT_START)
if self.verbose:
lvl = log.level
log.setLevel(logging.INFO)
log.info(f'Start of {self.name}: {self[self.NAME_CKPT_START].get_start()}')
log.setLevel(lvl)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.checkpoint(self.NAME_CKPT_END)
if self.verbose:
lvl = log.level
log.setLevel(logging.INFO)
log.info(f'End of {self.name}: {self[self.NAME_CKPT_END].get_end()}. Duration: {self.duration(return_type="str")}')
log.setLevel(lvl)
def checkpoint(self, name, value=None, log_every_x=1):
if name not in self:
self[name] = Checkpoint(name, fmt=self.fmt, log_every_x=log_every_x)
else:
self[name].log_time()
def duration(self, keys=None, return_type="seconds"):
if keys is None:
keys = (self.NAME_CKPT_START, self.NAME_CKPT_END)
try:
duration = self[keys[1]].get_end() - self[keys[0]].get_start()
if return_type == "seconds":
return duration.total_seconds()
elif return_type == "time_delta":
return duration
elif return_type == "str":
return str(duration)
else:
raise ValueError(f'return_type={return_type} not understood.')
except KeyError as ke:
log.error(f"Could not find key in timer: {ke}. Available keys: {self.keys()}")
def save(self, data_object, group_name=None):
'''
Saves metadata on the MC (such as timings)
'''
if group_name is None:
group_name = self.name
set_grp = data_object.create_group(group_name)
d = {k: repr(v) for k, v in self.items()}
write_dict_to_hdf5(d, entry_point=set_grp,
overwrite=False)
class Checkpoint(list):
def __init__(self, name, checkpoints=(), log_every_x=1, fmt="%Y-%m-%d %H:%M:%S.%f",
min_timedelta=0, verbose=False):
super().__init__()
self.extend(checkpoints)
self.name = name
self.fmt = fmt
self.log_every_x = log_every_x
self.counter = 0
self.min_timedelta = min_timedelta
self.log_time()
self.verbose = verbose
def get_start(self):
return self[0]
def get_end(self):
return self[-1]
def active(self):
if len(self) > 0 and \
(dt.datetime.now() - self[-1]).total_seconds() < self.min_timedelta:
# (dt.datetime.now() - dt.datetime.strptime(self[-1], self.fmt)).total_seconds() < self.min_timedelta:
return False
else:
return True
def log_time(self, value=None):
if self.active():
if self.counter % self.log_every_x == 0:
if value is None:
value = dt.datetime.now() # .strftime(self.fmt)
self.counter += 1
self.append(value)
def duration(self, ref=None, return_type="seconds"):
if ref is None:
ref = self[0]
duration = self[-1] - ref
if return_type == "seconds":
return duration.total_seconds()
elif return_type == "time_delta":
return duration
elif return_type == "str":
return str(duration)
else:
raise ValueError(f'return_type={return_type} not understood.')
# def __enter__(self):
# if self.verbose:
# lvl = log.level
# log.setLevel(logging.INFO)
# log.info(f'Start of checkpoint {self.name}: {self[0]}.')
# log.setLevel(lvl)
# # self.log_time()
# return self
# def __exit__(self, exc_type, exc_val, exc_tb):
# self.log_time()
# if self.verbose:
# lvl = log.level
# log.setLevel(logging.INFO)
# log.info(f'End of checkpoint {self.name}: {self[-1]}. Duration: {self.duration(return_type="str")}')
# log.setLevel(lvl)
def __str__(self):
return "['" + "', '".join(dt.datetime.strftime(pt, self.fmt) for pt in self) + "']"
def __repr__(self):
return self.__str__() | Python | 0 | |
5fa7514d9cf6bed319adb5f63b07c29feb5e29ea | add hex.cmdline.py3.py | python/hex.cmdline.py3.py | python/hex.cmdline.py3.py | #!/usr/bin/env python3
# Copyright (c) 2014 Tristan Cavelier <t.cavelier@free.fr>
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# http://www.wtfpl.net/ for more details.
"""usage: hex [-h] [-d] [-i] [-w COLS] [FILE]
Hex encode or decode FILE, or standard input, to standard output.
positional arguments:
FILE
optional arguments:
-h, --help show this help message and exit
-d, --decode decode data
-i, --ignore-garbage when decoding, ignore non-hex digits
-w COLS, --wrap COLS wrap encoded lines after COLS character (default 76).
Use 0 to disable line wrapping
"""
import sys, os
import argparse
import binascii
def pipe_encode_no_wrap():
while True:
chunk = os.read(sys.stdin.fileno(), 1024)
if len(chunk) == 0: return 0
sys.stdout.write("".join("%02X" % b for b in chunk))
def pipe_encode_wrap(wrap):
byte_length = int(wrap / 2)
while True:
chunk = os.read(sys.stdin.fileno(), byte_length)
if len(chunk) == 0: return 0
sys.stdout.write("".join("%02X" % b for b in chunk) + "\n")
def pipe_decode_ignore_garbage():
remain = None
while True:
chunk = os.read(sys.stdin.fileno(), 1024)
if len(chunk) == 0: break
for b in chunk:
if (b >= 48 and b <= 57) or (b >= 97 and b <= 102) or (b >= 65 and b <= 70):
if remain is None: remain = b
else:
os.write(sys.stdout.fileno(), binascii.unhexlify(chr(remain) + chr(b)))
remain = None
if remain is None: return 0
sys.stderr.write("hex: invalid input\n")
return 1
def pipe_decode():
remain = None
while True:
chunk = os.read(sys.stdin.fileno(), 1024)
if len(chunk) == 0: break
for b in chunk:
if b in (0x0D, 0x0A): continue
if remain is None: remain = b
else:
os.write(sys.stdout.fileno(), binascii.unhexlify(chr(remain) + chr(b)))
remain = None
if remain is None: return 0
sys.stderr.write("hex: invalid input\n")
return 1
def main():
parser = argparse.ArgumentParser(description='Hex encode or decode FILE, or standard input, to standard output.')
parser.add_argument("-d", "--decode", dest="decode", default=False, action="store_true", help="decode data")
parser.add_argument("-i", "--ignore-garbage", dest="ignore_garbage", default=False, action="store_true", help="when decoding, ignore non-hex digits")
parser.add_argument("-w", "--wrap", metavar="COLS", dest="wrap", default=76, type=int, help="wrap encoded lines after COLS character (default 76). Use 0 to disable line wrapping")
parser.add_argument("file", metavar="FILE", nargs="?", default=None)
args = parser.parse_args()
if args.file is not None:
sys.stdin = open(args.file, "rb")
if args.decode:
if args.ignore_garbage:
return pipe_decode_ignore_garbage()
return pipe_decode()
if args.wrap == 0:
return pipe_encode_no_wrap()
if args.wrap % 2 == 0:
return pipe_encode_wrap(args.wrap)
return pipe_encode_clever_wrap(args.wrap)
if __name__ == "__main__":
sys.exit(main())
| Python | 0.000004 | |
0089de0eccae27bf4cd5a2f9166e8418d64171c3 | Create XOR.py | XOR.py | XOR.py | '''
Implement XOR operation
'''
def XOR(a,b):
result = 0
power = 1
while a>0 or b>0:
m = a%2
n = b%2
if m+n==1:
result = result+power
power *=2
a = a/2
b = b/2
return result
if __name__=='__main__':
a = 123
b = 230
print XOR(a,b)
| Python | 0.000027 | |
bd01797f18012927202b87872dc33caf685306c0 | Add GDB plugin for printing ABC values | gdb.py | gdb.py | deadbeef = 0xdeadbeefdeadbeef
abc_any = gdb.lookup_type("union any")
def color(s, c):
return "\x1b[" + str(c) + "m" + s + "\x1b[0m"
def gray(s):
return color(s, 90)
def red(s):
return color(s, "1;31")
def p(indent, tag, value):
print(" " * indent + tag + ": " + str(value))
def print_abc(i, v):
v = v.cast(abc_any)
vt = v['as_tagged']
if vt == 0xdeadf00ddeadf00d:
p(i, "Unit", "Unit")
elif vt == deadbeef:
p(i, "Dead", "Beef")
elif vt == 0:
p(i, red("!!!NULL POINTER!!!"), "This should never happen")
elif (vt & 0xfff0000000000000) != 0:
p(i, "Number", (~vt).cast(abc_any)['as_num'])
elif vt < 0x00007f0000000000: # FIXME should get actual mappings -- don't know how to.
block = gdb.block_for_pc(int(vt))
if block == None:
name = str(v['as_indirect'])
else:
name = str(block.function)
p(i, "Block", name)
else:
tag = vt & 0x3
ptr = vt & ~0x3
hexptr = gray(hex(int(ptr)))
v = ptr.cast(abc_any)
try:
if tag == 0:
pair = v['as_pair'].dereference()
if pair['snd']['as_tagged'] == deadbeef:
p(i, "Left", hexptr)
print_abc(i+4, pair['fst'])
else:
p(i, "Pair", hexptr)
print_abc(i+4, pair['fst'])
print_abc(i+4, pair['snd'])
elif tag == 1:
pair = v['as_comp_block'].dereference()
if pair['yz']['as_tagged'] == deadbeef:
p(i, "Right", hexptr)
print_abc(i+4, pair['xy'])
else:
p(i, "Composed", hexptr)
print_abc(i+4, pair['xy'])
print_abc(i+4, pair['yz'])
elif tag == 2:
p(i, "Quoted", hexptr)
print_abc(i+4, v['as_indirect'].dereference())
else:
p(i, "INVALID TAG", hexptr)
except gdb.MemoryError:
p(i, red("!!!INVALID POINTER!!!"), hexptr)
class PrintABCValue(gdb.Command):
def __init__(self):
super(PrintABCValue, self).__init__('print-abc-value', gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL)
def invoke(self, arg, tty):
print_abc(0, gdb.parse_and_eval(arg))
PrintABCValue()
| Python | 0 | |
2d320058c96f88348d8226fa4a827a6c2c973237 | Add Classical multidimensional scaling algorithm. | mds.py | mds.py | """
Simple implementation of classical MDS.
See http://www.stat.cmu.edu/~ryantibs/datamining/lectures/09-dim3-marked.pdf for more details.
"""
import numpy as np
import numpy.linalg as linalg
import matplotlib.pyplot as plt
def square_points(size):
nsensors = size**2
return np.array([(i/size, i%size) for i in range(nsensors)])
def norm(vec):
return np.sqrt(np.sum(vec**2))
def mds(D, dim=2):
"""
Classical multidimensional scaling algorithm.
Given a matrix of interpoint distances D, find a set of low dimensional points
that have a similar interpoint distances.
"""
(n,n) = D.shape
A = (-0.5 * D**2)
M = np.ones((n,n))/n
I = np.eye(n)
B = np.dot(np.dot(I-M, A),I-M)
'''Another way to compute inner-products matrix B
Ac = np.mat(np.mean(A, 1))
Ar = np.mat(np.mean(A, 0))
B = np.array(A - np.transpose(Ac) - Ar + np.mean(A))
'''
[U,S,V] = linalg.svd(B)
Y = U * np.sqrt(S)
return (Y[:,0:dim], S)
def test():
points = square_points(10)
distance = np.zeros((100,100))
for (i, pointi) in enumerate(points):
for (j, pointj) in enumerate(points):
distance[i,j] = norm(pointi-pointj)
Y, eigs = mds(distance)
plt.figure()
plt.plot(Y[:,0], Y[:,1], '.')
plt.figure(2)
plt.plot(points[:,0], points[:,1], '.')
plt.show()
def main():
import sys, os, getopt, pdb
def usage():
print sys.argv[0] + "[-h] [-d]"
try:
(options, args) = getopt.getopt(sys.argv[1:], 'dh', ['help', 'debug'])
except getopt.GetoptError:
usage()
sys.exit(2)
for o, a in options:
if o in ('-h', '--help'):
usage()
sys.exit()
elif o in ('-d', '--debug'):
pdb.set_trace()
test()
if __name__ == '__main__':
main() | Python | 0.000001 | |
fe128c1172070476ba09536bbbbe81c69a46ef57 | fix bgp reggression | ryu/services/protocols/bgp/operator/commands/show/route_formatter_mixin.py | ryu/services/protocols/bgp/operator/commands/show/route_formatter_mixin.py | import six
class RouteFormatterMixin(object):
fmtstr = ' {0:<3s} {1:<32s} {2:<8s} {3:<20s} {4:<15s} '\
'{5:<6s} {6:<6s} {7:<}\n'
@classmethod
def _format_family_header(cls):
ret = ''
ret += ('Status codes: * valid, > best\n')
ret += ('Origin codes: i - IGP, e - EGP, ? - incomplete\n')
ret += cls.fmtstr.format('', 'Network', 'Labels', 'Next Hop', 'Reason',
'Metric', 'LocPrf', 'Path')
return ret
@classmethod
def _format_family(cls, dest_list):
if six.PY3:
import io
msg = io.StringIO()
else:
import StringIO
msg = StringIO.StringIO()
def _append_path_info(buff, path, is_best, show_prefix):
aspath = path.get('aspath')
origin = path.get('origin')
if origin:
aspath.append(origin)
bpr = path.get('bpr')
next_hop = path.get('nexthop')
med = path.get('metric')
labels = path.get('labels')
localpref = path.get('localpref')
# Construct path status string.
path_status = '*'
if is_best:
path_status += '>'
# Check if we want to show prefix.
prefix = ''
if show_prefix:
prefix = path.get('prefix')
# Append path info to String buffer.
buff.write(cls.fmtstr.format(path_status, prefix, labels,
next_hop, bpr, str(med),
str(localpref),
' '.join(map(str, aspath))))
for dist in dest_list:
for idx, path in enumerate(dist.get('paths')):
_append_path_info(msg, path, path['best'], (idx == 0))
ret = msg.getvalue()
msg.close()
return ret
| import io
class RouteFormatterMixin(object):
fmtstr = ' {0:<3s} {1:<32s} {2:<8s} {3:<20s} {4:<15s} '\
'{5:<6s} {6:<6s} {7:<}\n'
@classmethod
def _format_family_header(cls):
ret = ''
ret += ('Status codes: * valid, > best\n')
ret += ('Origin codes: i - IGP, e - EGP, ? - incomplete\n')
ret += cls.fmtstr.format('', 'Network', 'Labels', 'Next Hop', 'Reason',
'Metric', 'LocPrf', 'Path')
return ret
@classmethod
def _format_family(cls, dest_list):
msg = io.StringIO()
def _append_path_info(buff, path, is_best, show_prefix):
aspath = path.get('aspath')
origin = path.get('origin')
if origin:
aspath.append(origin)
bpr = path.get('bpr')
next_hop = path.get('nexthop')
med = path.get('metric')
labels = path.get('labels')
localpref = path.get('localpref')
# Construct path status string.
path_status = '*'
if is_best:
path_status += '>'
# Check if we want to show prefix.
prefix = ''
if show_prefix:
prefix = path.get('prefix')
# Append path info to String buffer.
buff.write(cls.fmtstr.format(path_status, prefix, labels,
next_hop, bpr, str(med),
str(localpref),
' '.join(map(str, aspath))))
for dist in dest_list:
for idx, path in enumerate(dist.get('paths')):
_append_path_info(msg, path, path['best'], (idx == 0))
ret = msg.getvalue()
msg.close()
return ret
| Python | 0 |
a78d879c9c097c32c58f5246d46a4a188b17d99c | Add workup vebose name change migration. | workup/migrations/0002_add_verbose_names.py | workup/migrations/0002_add_verbose_names.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workup', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='historicalworkup',
name='fam_hx',
field=models.TextField(verbose_name=b'Family History'),
),
migrations.AlterField(
model_name='historicalworkup',
name='labs_ordered_internal',
field=models.TextField(null=True, verbose_name=b'Labs Ordered Internally', blank=True),
),
migrations.AlterField(
model_name='historicalworkup',
name='labs_ordered_quest',
field=models.TextField(null=True, verbose_name=b'Labs Ordered from Quest', blank=True),
),
migrations.AlterField(
model_name='historicalworkup',
name='ros',
field=models.TextField(verbose_name=b'ROS'),
),
migrations.AlterField(
model_name='historicalworkup',
name='rx',
field=models.TextField(null=True, verbose_name=b'Prescription Orders', blank=True),
),
migrations.AlterField(
model_name='historicalworkup',
name='soc_hx',
field=models.TextField(verbose_name=b'Social History'),
),
migrations.AlterField(
model_name='workup',
name='fam_hx',
field=models.TextField(verbose_name=b'Family History'),
),
migrations.AlterField(
model_name='workup',
name='labs_ordered_internal',
field=models.TextField(null=True, verbose_name=b'Labs Ordered Internally', blank=True),
),
migrations.AlterField(
model_name='workup',
name='labs_ordered_quest',
field=models.TextField(null=True, verbose_name=b'Labs Ordered from Quest', blank=True),
),
migrations.AlterField(
model_name='workup',
name='ros',
field=models.TextField(verbose_name=b'ROS'),
),
migrations.AlterField(
model_name='workup',
name='rx',
field=models.TextField(null=True, verbose_name=b'Prescription Orders', blank=True),
),
migrations.AlterField(
model_name='workup',
name='soc_hx',
field=models.TextField(verbose_name=b'Social History'),
),
]
| Python | 0 | |
d4a87c2131c02b3638743167ce32c779ece14fd5 | Create crawlerino.py | crawlerino.py | crawlerino.py | """Simple web crawler, to be extended for various uses.
Written in Python 3, uses requests and BeautifulSoup modules.
"""
def crawler(startpage, maxpages=100, singledomain=True):
"""Crawl the web starting from specified page.
1st parameter = starting page url
maxpages = maximum number of pages to crawl
singledomain = whether to only crawl links within startpage's domain
"""
import requests, re, bs4
from urllib.parse import urldefrag, urljoin, urlparse
from collections import deque
pagequeue = deque() # queue of pages to be crawled
pagequeue.append(startpage)
crawled = [] # list of pages already crawled
domain = urlparse(startpage).netloc # for singledomain option
pages = 0 # number of pages succesfully crawled so far
failed = 0 # number of pages that couldn't be crawled
while pages < maxpages and pagequeue:
url = pagequeue.popleft() # get next page to crawl (FIFO queue)
try:
response = requests.get(url)
if not response.headers['content-type'].startswith('text/html'):
continue # don't crawl non-HTML links
soup = bs4.BeautifulSoup(response.text, "html.parser")
print('Crawling:', url)
pages += 1
crawled.append(url)
# PROCESSING CODE GOES HERE:
# do something interesting with this page
# get target URLs for all links on the page
links = [a.attrs.get('href') for a in soup.select('a[href]')]
# remove fragment identifiers
links = [urldefrag(link)[0] for link in links]
# remove any empty strings
links = list(filter(None,links))
# if it's a relative link, change to absolute
links = [link if bool(urlparse(link).netloc) else urljoin(url,link) for link in links]
# if singledomain=True, remove links to other domains
if singledomain:
links = [link for link in links if (urlparse(link).netloc == domain)]
# add these links to the queue (except if already crawled)
for link in links:
if link not in crawled and link not in pagequeue:
pagequeue.append(link)
except:
print("*FAILED*:", url)
failed += 1
print('{0} pages crawled, {1} pages failed to load.'.format(pages, failed))
# if running standalone, crawl some Microsoft pages as a test
if __name__ == "__main__":
crawler('http://www.microsoft.com', maxpages=30, singledomain=True)
| Python | 0.000005 | |
91facfcc42e001e2a598d6d06e55270ef9239b1d | add migration | actstream/migrations/0006_auto_20170329_2048.py | actstream/migrations/0006_auto_20170329_2048.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-29 20:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('actstream', '0005_auto_20161119_2211'),
]
operations = [
migrations.AddField(
model_name='action',
name='deleted',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='action',
name='timestamp',
field=models.DateTimeField(auto_now_add=True, db_index=True),
),
]
| Python | 0.000001 | |
1e050f30e8307a75976a52b8f1258a5b14e43733 | Add middleware for static serving | wsgi_static.py | wsgi_static.py | import wsgi_server
import os
from werkzeug.wsgi import SharedDataMiddleware
application = SharedDataMiddleware(wsgi_server.application, {
'/static': os.path.join(os.path.dirname(__file__), 'static')
})
| Python | 0.000001 | |
a086307e6aac341ed8a6596d0a05b7a8d198c7ec | Add command to dump and restore user pointers. | zephyr/management/commands/dump_pointers.py | zephyr/management/commands/dump_pointers.py | from optparse import make_option
from django.core.management.base import BaseCommand
from zephyr.models import Realm, UserProfile
import simplejson
def dump():
pointers = []
for u in UserProfile.objects.select_related("user__email").all():
pointers.append((u.user.email, u.pointer))
file("dumped-pointers", "w").write(simplejson.dumps(pointers) + "\n")
def restore(change):
for (email, pointer) in simplejson.loads(file("dumped-pointers").read()):
u = UserProfile.objects.get(user__email=email)
print "%s: pointer %s => %s" % (email, u.pointer, pointer)
if change:
u.pointer = pointer
u.save()
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--restore', default=False, action='store_true'),
make_option('--dry-run', '-n', default=False, action='store_true'),)
def handle(self, *args, **options):
if options["restore"]:
restore(change=not options['dry_run'])
else:
dump()
| Python | 0 | |
769abf579f7bd082f7c6f4295edb49b41b252bce | Add empty alembic revision | alembic/versions/4784a128a6dd_empty_revision.py | alembic/versions/4784a128a6dd_empty_revision.py | """Empty revision
This is the empty revision that can be used as the base for future
migrations.
Initial database creation shall be done via `metadata.create_all()` and
`alembic stamp head`.
Revision ID: 4784a128a6dd
Revises:
Create Date: 2017-12-13 00:48:12.079431
"""
from alembic import op
import sqlalchemy as sa
import pycroft
# revision identifiers, used by Alembic.
revision = '4784a128a6dd'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| Python | 0.000003 | |
4bf84b05b183916fd211f77ab8099ef14c9cec06 | Update migrations | app/timetables/migrations/0003_auto_20171107_1103.py | app/timetables/migrations/0003_auto_20171107_1103.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-11-07 11:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timetables', '0002_auto_20171005_2209'),
]
operations = [
migrations.AlterField(
model_name='course',
name='name',
field=models.CharField(help_text='Example: appetizer, main course, dessert', max_length=150, verbose_name='Course Name'),
),
migrations.AlterField(
model_name='dish',
name='name',
field=models.CharField(max_length=255, verbose_name='Dish Name'),
),
migrations.AlterField(
model_name='meal',
name='name',
field=models.CharField(max_length=60, verbose_name='Meal Name'),
),
migrations.AlterField(
model_name='timetable',
name='name',
field=models.CharField(max_length=255, verbose_name='Timetable Name'),
),
migrations.AlterField(
model_name='vendor',
name='name',
field=models.CharField(max_length=255, verbose_name='Vendor Name'),
),
]
| Python | 0.000001 | |
74135b8289fa4b6684c54d8c9e37671c75b92447 | add admin for area settings | adhocracy4/maps/admin.py | adhocracy4/maps/admin.py | from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from . import models
@admin.register(models.AreaSettings)
class AreaSettingsAdmin(admin.ModelAdmin):
list_filter = ('module__project__organisation', 'module__project')
list_display = ('module',)
fieldsets = (
(None, {'fields': ('module',)}),
(_('Polygon'), {
'fields': ('polygon',),
'description': _('Enter a valid GeoJSON object. '
'To initialize a new areasetting enter the '
'string "false" without quotation marks.')
})
)
| Python | 0 | |
2dce9ed68463b536f246f01b2ac5cb275df2453b | add polynomial | regression.py | regression.py | # coding: utf8
from datetime import datetime
import itertools
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import PolynomialFeatures, Imputer
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import Ridge, BayesianRidge
from utils import *
data_accidents = load_data_from_csv('data/parsered1.csv', False)
data_weather = load_data_from_csv('data/weather_utf8.csv',False)
'''
Because of the fact that we don't have particular time (only dates) for accidents and do have time for weather
measurements, let's choose one time for all accident, e.g. 15:00.
'''
for i in data_accidents.index:
#converting date to standard datetime representation, adding particular time
data_accidents.ix[i,'date'] = str(datetime.strptime(str(data_accidents.ix[i,'date']) + ' 15:00:00', '%Y-%m-%d %H:%M:%S'))
for i in data_weather.index:
data_weather.ix[i,'date'] = str(datetime.strptime(str(data_weather.ix[i,'date']),'%d.%m.%Y %H:%M'))
#merging two datasets on date
data = data_accidents.merge(data_weather, on='date')
#casting to numpy array
array = np.array(data[['num_dtp','T']].values, dtype=np.float64)
#preprocessing, completing missing values
imp = Imputer(missing_values='NaN', strategy='median', axis=1)
new_data = imp.fit_transform(array)
#sorting data by 'T', for better plotting
new_data = new_data[new_data[:, 1].argsort()]
x = new_data[:,1]
y = new_data[:,0]
x_plot = x
X = x[:,np.newaxis]
X_plot = x_plot[:,np.newaxis]
plt.scatter(x, y, s = 30, label="training points")
#algos = itertools.cycle([Ridge(), BayesianRidge()])
for degree in [1, 3, 5, 7]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| Python | 0.999999 | |
5982e1c875827569a02e9ecbe20d2a59392baf0c | move client to amcat repo for now | amcat/nlp/vunlpclient.py | amcat/nlp/vunlpclient.py | ###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of vunlp, the VU University NLP e-lab #
# #
# vunlp is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# vunlp is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with vunlp. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
"""
The VU NLP eLab offers a web service to facilitate natural language preprocessing
by running the preprocessing jobs, e.g. on a computer cluster like SARA's lisa.
This module contains a class Connection to facilitate talking to the web service
for parsing files.
Command line usage:
python client.py COMMAND argument [< text]
where COMMAND is one of:
- upload: upload the text to be parsed using the argument as parse_command,
printing the resulting handle if successful
- check: check the status of the file identified by the argument as handle
- download: retrieve the parser output of the file identified by the argument as handle
"""
from __future__ import unicode_literals, print_function, absolute_import
import requests, logging
log = logging.getLogger(__name__)
DEFAULT_URL = 'http://nlp.labs.vu.nl/webservice/index.php'
# Templates for API calls, should be instantiated with .format(url=".."[,filename=".."])
REQUEST_STATUS= "{url}?filstat=status&filnam={filename}"
REQUEST_RETRIEVE = "{url}?getparse=getparse&filnam={filename}"
REQUEST_ID = "{url}?getID"
serverurl = 'http://nlp.labs.vu.nl/webservice/index.php'
class Client():
"""
Class that communicates with the vu nlp web service to upload, check, and retrieve parses.
Since each Connection has a unique id, use the same connection object for all actions on a file.
"""
def __init__(self, url=DEFAULT_URL):
"""
@param url: the url of the web service
"""
self.url = url
self._id = None
def _get_filename(self):
"""Temporary method to create unique file name until the ws can do it for us"""
if self._id is None:
log.debug("Requesting unique ID from server")
r = requests.get(REQUEST_ID.format(url=self.url))
r.raise_for_status()
self._id = r.text.strip()
self._filename_sequence = 0
log.debug("Initialized parser with id: {self._id}".format(**locals()))
else:
self._filename_sequence += 1
return "{self._id}_{self._filename_sequence}".format(**locals())
def upload(self, parsecommand, text):
"""
Upload the given text for parsing with the given command
@param parsecommand: the command to use for parsing
@param text: The text to parse (as string or file object)
@returns: the handle with which the results can be checked/retrieved
"""
filename = self._get_filename()
command = "#!{parsecommand}\n{text}".format(**locals())
log.debug("Uploading file {filename}".format(**locals()))
r = requests.post(self.url, files=dict(infil = (filename, command)))
r.raise_for_status()
return filename
def check(self, handle):
"""
Check and return the parse status of the given file
@param handle: a handle from a succesful upload_file call
@returns: a string indicating the status of the file
"""
r = requests.get(REQUEST_STATUS.format(url=self.url, filename=handle))
r.raise_for_status()
log.debug("Checked status for {handle}: status={r.text!r}".format(**locals()))
return r.text.strip()
def download(self, handle):
"""
Retrieve the parse results for this file. Will throw an Exception if the file is not yet parsed.
@param handle: a handle from a succesful upload_file call
@return: a string containing the parse results
"""
# TODO: Should the parser not return a 404 if a file is not found?
r = requests.get(REQUEST_RETRIEVE.format(url=self.url, filename=handle))
r.raise_for_status()
if r.text == "notfound\n":
raise Exception('Could not retrieve {handle}'.format(**locals()))
return r.text
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)-15s %(name)s:%(lineno)d %(levelname)s] %(message)s')
import sys
if len(sys.argv) != 3:
print(__doc__, file=sys.stderr)
sys.exit(64)
command, argument = sys.argv[1:]
if command == "upload":
text = sys.stdin.read()
handle = Client().upload(argument, text)
print(handle)
elif command == "check":
print(Client().check(argument))
elif command == "download":
print(Client().download(argument))
| Python | 0 | |
eb91b11930319369bc9cfc3b1b15c0b92fb4d85c | Add `OrganizationOption` tests based on `ProjectOption`. | tests/sentry/models/test_organizationoption.py | tests/sentry/models/test_organizationoption.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from sentry.models import OrganizationOption
from sentry.testutils import TestCase
class OrganizationOptionManagerTest(TestCase):
def test_set_value(self):
OrganizationOption.objects.set_value(self.organization, 'foo', 'bar')
assert OrganizationOption.objects.filter(
organization=self.organization, key='foo', value='bar').exists()
def test_get_value(self):
result = OrganizationOption.objects.get_value(self.organization, 'foo')
assert result is None
OrganizationOption.objects.create(
organization=self.organization, key='foo', value='bar')
result = OrganizationOption.objects.get_value(self.organization, 'foo')
assert result == 'bar'
def test_unset_value(self):
OrganizationOption.objects.unset_value(self.organization, 'foo')
OrganizationOption.objects.create(
organization=self.organization, key='foo', value='bar')
OrganizationOption.objects.unset_value(self.organization, 'foo')
assert not OrganizationOption.objects.filter(
organization=self.organization, key='foo').exists()
def test_get_value_bulk(self):
result = OrganizationOption.objects.get_value_bulk([self.organization], 'foo')
assert result == {self.organization: None}
OrganizationOption.objects.create(
organization=self.organization, key='foo', value='bar')
result = OrganizationOption.objects.get_value_bulk([self.organization], 'foo')
assert result == {self.organization: 'bar'}
| Python | 0 | |
f264f8804c208f2b55471f27f92a9e8c1ab5d778 | Test our new happenings-by-year view. | tests/correlations/test_views.py | tests/correlations/test_views.py | # -*- coding: utf-8 -*-
import datetime
import pytest
from django.core.urlresolvers import reverse
from components.people.factories import GroupFactory, IdolFactory
@pytest.mark.django_db
def test_happenings_by_year_view(client):
[GroupFactory(started=datetime.date(2013, 1, 1)) for i in xrange(5)]
response = client.get(reverse('happenings-by-year', kwargs={'year': 2013}))
assert response.status_code == 200
assert 'object_list' in response.context
assert '2010s' in response.context['years']
assert 'correlations/happenings_year.html' in [template.name for template in response.templates]
| Python | 0 | |
43c4595ae26a7663538e712af37553c7a64fade7 | Add a couple unit tests for teuthology.parallel | teuthology/test/test_parallel.py | teuthology/test/test_parallel.py | from ..parallel import parallel
def identity(item, input_set=None, remove=False):
if input_set is not None:
assert item in input_set
if remove:
input_set.remove(item)
return item
class TestParallel(object):
def test_basic(self):
in_set = set(range(10))
with parallel() as para:
for i in in_set:
para.spawn(identity, i, in_set, remove=True)
assert para.any_spawned is True
assert para.count == len(in_set)
def test_result(self):
in_set = set(range(10))
with parallel() as para:
for i in in_set:
para.spawn(identity, i, in_set)
for result in para:
in_set.remove(result)
| Python | 0 | |
f370ee48c8aec312f9ea8a9ce1737214e51e2eaf | Disable repaint.key_mobile_sites_repaint. | tools/perf/benchmarks/repaint.py | tools/perf/benchmarks/repaint.py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from benchmarks import silk_flags
from measurements import smoothness
from telemetry import benchmark
import page_sets
class _Repaint(perf_benchmark.PerfBenchmark):
@classmethod
def AddBenchmarkCommandLineArgs(cls, parser):
parser.add_option('--mode', type='string',
default='viewport',
help='Invalidation mode. '
'Supported values: fixed_size, layer, random, viewport.')
parser.add_option('--width', type='int',
default=None,
help='Width of invalidations for fixed_size mode.')
parser.add_option('--height', type='int',
default=None,
help='Height of invalidations for fixed_size mode.')
@classmethod
def Name(cls):
return 'repaint'
def CreateStorySet(self, options):
return page_sets.KeyMobileSitesRepaintPageSet(
options.mode, options.width, options.height)
def CreatePageTest(self, options):
return smoothness.Repaint()
#crbug.com/499320
#@benchmark.Enabled('android')
@benchmark.Disabled()
class RepaintKeyMobileSites(_Repaint):
"""Measures repaint performance on the key mobile sites.
http://www.chromium.org/developers/design-documents/rendering-benchmarks"""
@classmethod
def Name(cls):
return 'repaint.key_mobile_sites_repaint'
@benchmark.Enabled('android')
class RepaintGpuRasterizationKeyMobileSites(_Repaint):
"""Measures repaint performance on the key mobile sites with forced GPU
rasterization.
http://www.chromium.org/developers/design-documents/rendering-benchmarks"""
tag = 'gpu_rasterization'
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'repaint.gpu_rasterization.key_mobile_sites_repaint'
| # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from benchmarks import silk_flags
from measurements import smoothness
from telemetry import benchmark
import page_sets
class _Repaint(perf_benchmark.PerfBenchmark):
@classmethod
def AddBenchmarkCommandLineArgs(cls, parser):
parser.add_option('--mode', type='string',
default='viewport',
help='Invalidation mode. '
'Supported values: fixed_size, layer, random, viewport.')
parser.add_option('--width', type='int',
default=None,
help='Width of invalidations for fixed_size mode.')
parser.add_option('--height', type='int',
default=None,
help='Height of invalidations for fixed_size mode.')
@classmethod
def Name(cls):
return 'repaint'
def CreateStorySet(self, options):
return page_sets.KeyMobileSitesRepaintPageSet(
options.mode, options.width, options.height)
def CreatePageTest(self, options):
return smoothness.Repaint()
@benchmark.Enabled('android')
class RepaintKeyMobileSites(_Repaint):
"""Measures repaint performance on the key mobile sites.
http://www.chromium.org/developers/design-documents/rendering-benchmarks"""
@classmethod
def Name(cls):
return 'repaint.key_mobile_sites_repaint'
@benchmark.Enabled('android')
class RepaintGpuRasterizationKeyMobileSites(_Repaint):
"""Measures repaint performance on the key mobile sites with forced GPU
rasterization.
http://www.chromium.org/developers/design-documents/rendering-benchmarks"""
tag = 'gpu_rasterization'
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'repaint.gpu_rasterization.key_mobile_sites_repaint'
| Python | 0.000002 |
3d523bca7377c0f4c80a4f697b0c41d340eb8200 | add a command to clear the celery queue | crate_project/apps/crate/management/clear_celery.py | crate_project/apps/crate/management/clear_celery.py | import redis
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
r = redis.StrictRedis(host=settings.GONDOR_REDIS_HOST, port=settings.GONDOR_REDIS_PORT, password=settings.GONDOR_REDIS_PASSWORD)
r.delete("celery")
| Python | 0.000001 | |
33e7216ae9b367c509b5075496fce08d346743e2 | Implement channel limit | txircd/modules/rfc/cmode_l.py | txircd/modules/rfc/cmode_l.py | from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.module_interface import IMode, IModuleData, Mode, ModuleData
from txircd.utils import ModeType
from zope.interface import implements
class LimitMode(ModuleData, Mode):
implements(IPlugin, IModuleData, IMode)
name = "LimitMode"
core = True
affectedActions = [ "joinpermission" ]
def hookIRCd(self, ircd):
self.ircd = ircd
def channelModes(self):
return [ ("l", ModeType.Param, self) ]
def actions(self):
return [ ("modeactioncheck-channel-l-joinpermission", 10, self.isModeSet) ]
def isModeSet(self, channel, alsoChannel, user):
if "l" in channel.modes:
return channel.modes["l"]
return None
def checkSet(self, param):
try:
return [ int(param) ]
except ValueError:
return None
def apply(self, actionType, channel, param, alsoChannel, user):
if len(channel.users) >= param:
user.sendMessage(irc.ERR_CHANNELISFULL, channel.name, ":Cannot join channel (Channel is full)")
return False
return None
limitMode = LimitMode() | Python | 0.000001 | |
fc03641455ce005c340bdd0baf2463a7db41ba8f | test with captured packet from chrome | test/b.py | test/b.py | from spdy.connection import Connection, SERVER
b = b'\x80\x02\x00\x01\x01\x00\x01\x0e\x00\x00\x00\x01\x00\x00\x00\x00\x00\x008\xea\xdf\xa2Q\xb2b\xe0b`\x83\xa4\x17\x06{\xb8\x0bu0,\xd6\xae@\x17\xcd\xcd\xb1.\xb45\xd0\xb3\xd4\xd1\xd2\xd7\x02\xb3,\x18\xf8Ps,\x83\x9cg\xb0?\xd4=:`\x07\x81\xd5\x99\xeb@\xd4\x1b3\xf0\xa3\xe5i\x06A\x90\x8bu\xa0N\xd6)NI\xce\x80\xab\x81%\x03\x06\xbe\xd4<\xdd\xd0`\x9d\xd4<\xa8\xa5,\xa0<\xce\xc0\x07J\x089 \xa6\x95\xa5\xa9\xa5%\x03[.\xb0l\xc9Oa`vw\ra`+\x06&\xc7\xdcT\x06\xd6\x8c\x92\x92\x82b\x06f\x90\xbf\x19\xf5\x19\xb8\x10\x99\x95\x01\x18\xf5U\x9999\x89\xfa\xa6z\x06\n\x1a\x11\x00\x19\x1aZ+\xf8d\xe6\x95V(d\x9aY\x98i*8\x02}\x9e\x1a\x9e\x9a\xe4\x9dY\xa2ojl\xaagh\xa8\xa0\xe1\xed\x11\xe2\xeb\xa3\xa3\x90\x93\x99\x9d\xaa\xe0\x9e\x9a\x9c\x9d\xaf\xa9\xe0\x9c\x01,sR\xf5\r\xcd\xf5\x80\x01cf\xacgn\xa9\x10\x9c\x98\x96X\x94\t\xd5\xc4\xc0\x0e\ry\x06\x0eX\x84\x00\x00\x00\x00\xff\xff\x80\x02\x00\x06\x00\x00\x00\x04\x00\x00\x00\x01'
c = Connection(SERVER)
c.incoming(b)
print(c.get_frame())
print(c.get_frame())
print(c.get_frame())
print(c.get_frame())
| Python | 0 | |
ddc9a02ba64c24f8243bc299cd898bd337e5ce9a | isscalar predicate | datashape/predicates.py | datashape/predicates.py | from .util import collect, dshape
from .internal_utils import remove
from .coretypes import *
# https://github.com/ContinuumIO/datashape/blob/master/docs/source/types.rst
__all__ = ['isdimension', 'ishomogeneous', 'istabular', 'isfixed']
dimension_types = (Fixed, Var, Ellipsis)
isunit = lambda x: isinstance(x, Unit)
def isdimension(ds):
""" Is a component a dimension?
>>> isdimension(Fixed(10))
True
>>> isdimension(Var())
True
>>> isdimension(int32)
False
"""
return isinstance(ds, dimension_types)
def ishomogeneous(ds):
""" Does datashape contain only one dtype?
>>> ishomogeneous(int32)
True
>>> ishomogeneous('var * 3 * string')
True
>>> ishomogeneous('var * {name: string, amount: int}')
False
"""
ds = dshape(ds)
return len(set(remove(isdimension, collect(isunit, ds)))) == 1
def _dimensions(ds):
""" Number of dimensions of datashape
Interprets records as dimensional
>>> _dimensions(int32)
0
>>> _dimensions(10 * int32)
1
>>> _dimensions('var * 10 * int')
2
>>> _dimensions('var * {name: string, amount: int}')
2
"""
ds = dshape(ds)
if isdimension(ds[0]):
return 1 + _dimensions(ds.subarray(1))
if isinstance(ds[0], Record):
return 1 + max(map(_dimensions, ds[0].types))
if len(ds) == 1 and isunit(ds[0]):
return 0
if isinstance(ds[0], Option):
return _dimensions(ds[0].ty)
raise NotImplementedError('Can not compute dimensions for %s' % ds)
def isfixed(ds):
""" Contains no variable dimensions
>>> isfixed('10 * int')
True
>>> isfixed('var * int')
False
>>> isfixed('10 * {name: string, amount: int}')
True
>>> isfixed('10 * {name: string, amounts: var * int}')
False
"""
ds = dshape(ds)
if isinstance(ds[0], TypeVar):
return None # don't know
if isinstance(ds[0], Var):
return False
if isinstance(ds[0], Record):
return all(map(isfixed, ds[0].types))
if len(ds) > 1:
return isfixed(ds.subarray(1))
return True
def istabular(ds):
""" Can be represented by a two dimensional with fixed columns
>>> istabular('var * 3 * int')
True
>>> istabular('var * {name: string, amount: int}')
True
>>> istabular('var * 10 * 3 * int')
False
>>> istabular('10 * var * int')
False
"""
ds = dshape(ds)
return _dimensions(ds) == 2 and isfixed(ds.subarray(1))
def isscalar(ds):
""" Has no dimensions
>>> isscalar('int')
True
>>> isscalar('3 * int')
False
>>> isscalar('{name: string, amount: int}')
True
"""
ds = dshape(ds)
return not ds.shape
| from .util import collect, dshape
from .internal_utils import remove
from .coretypes import *
# https://github.com/ContinuumIO/datashape/blob/master/docs/source/types.rst
__all__ = ['isdimension', 'ishomogeneous', 'istabular', 'isfixed']
dimension_types = (Fixed, Var, Ellipsis)
isunit = lambda x: isinstance(x, Unit)
def isdimension(ds):
""" Is a component a dimension?
>>> isdimension(Fixed(10))
True
>>> isdimension(Var())
True
>>> isdimension(int32)
False
"""
return isinstance(ds, dimension_types)
def ishomogeneous(ds):
""" Does datashape contain only one dtype?
>>> ishomogeneous(int32)
True
>>> ishomogeneous('var * 3 * string')
True
>>> ishomogeneous('var * {name: string, amount: int}')
False
"""
ds = dshape(ds)
return len(set(remove(isdimension, collect(isunit, ds)))) == 1
def _dimensions(ds):
""" Number of dimensions of datashape
Interprets records as dimensional
>>> _dimensions(int32)
0
>>> _dimensions(10 * int32)
1
>>> _dimensions('var * 10 * int')
2
>>> _dimensions('var * {name: string, amount: int}')
2
"""
ds = dshape(ds)
if isdimension(ds[0]):
return 1 + _dimensions(ds.subarray(1))
if isinstance(ds[0], Record):
return 1 + max(map(_dimensions, ds[0].types))
if len(ds) == 1 and isunit(ds[0]):
return 0
if isinstance(ds[0], Option):
return _dimensions(ds[0].ty)
raise NotImplementedError('Can not compute dimensions for %s' % ds)
def isfixed(ds):
""" Contains no variable dimensions
>>> isfixed('10 * int')
True
>>> isfixed('var * int')
False
>>> isfixed('10 * {name: string, amount: int}')
True
>>> isfixed('10 * {name: string, amounts: var * int}')
False
"""
ds = dshape(ds)
if isinstance(ds[0], TypeVar):
return None # don't know
if isinstance(ds[0], Var):
return False
if isinstance(ds[0], Record):
return all(map(isfixed, ds[0].types))
if len(ds) > 1:
return isfixed(ds.subarray(1))
return True
def istabular(ds):
""" Can be represented by a two dimensional with fixed columns
>>> istabular('var * 3 * int')
True
>>> istabular('var * {name: string, amount: int}')
True
>>> istabular('var * 10 * 3 * int')
False
>>> istabular('10 * var * int')
False
"""
ds = dshape(ds)
return _dimensions(ds) == 2 and isfixed(ds.subarray(1))
| Python | 0.999798 |
7c24ffe52fe96339d14f522dc7c67122d01cead6 | add istabular predicate | datashape/predicates.py | datashape/predicates.py | from .util import collect, remove, dshape
from .coretypes import *
# https://github.com/ContinuumIO/datashape/blob/master/docs/source/types.rst
dimension_types = (Fixed, Var, Ellipsis)
isunit = lambda x: isinstance(x, Unit)
def isdimension(ds):
""" Is a component a dimension?
>>> isdimension(Fixed(10))
True
>>> isdimension(Var())
True
>>> isdimension(int32)
False
"""
return isinstance(ds, dimension_types)
def ishomogenous(ds):
""" Does datashape contain only one dtype?
>>> ishomogenous(int32)
True
>>> ishomogenous(var * (3 * string))
True
>>> ishomogenous(var * Record([('name', string), ('amount', int32)]))
False
"""
return len(set(remove(isdimension, collect(isunit, ds)))) == 1
def dimensions(ds):
""" Number of dimensions of datashape
Interprets records as dimensional
>>> dimensions(int32)
0
>>> dimensions(10 * int32)
1
>>> dimensions(var * (10 * int32))
2
>>> dimensions(var * Record([('name', string), ('amount', int32)]))
2
"""
if not isinstance(ds, DataShape):
ds = dshape(ds)
if isdimension(ds[0]):
return 1 + dimensions(ds.subarray(1))
if isinstance(ds[0], Record):
return 1 + max(map(dimensions, ds[0].fields.values()))
if len(ds) == 1 and isunit(ds[0]):
return 0
raise NotImplementedError('Can not compute dimensions for %s' % ds)
def isfixed(ds):
""" Contains no variable dimensions
>>> isfixed('10 * int')
True
>>> isfixed('var * int')
False
>>> isfixed('10 * {name: string, amount: int}')
True
>>> isfixed('10 * {name: string, amounts: var * int}')
False
"""
if not isinstance(ds, DataShape):
ds = dshape(ds)
if isinstance(ds[0], Var):
return False
if isinstance(ds[0], Record):
return all(map(isfixed, ds[0].fields.values()))
if len(ds) > 1:
return isfixed(ds.subarray(1))
return True
def istabular(ds):
""" Can be represented by a two dimensional with fixed columns
>>> istabular('var * 3 * int')
True
>>> istabular('var * {name: string, amount: int}')
True
>>> istabular('var * 10 * 3 * int')
False
>>> istabular('10 * var * int')
False
"""
if not isinstance(ds, DataShape):
ds = dshape(ds)
return dimensions(ds) == 2 and isfixed(ds.subarray(1))
| from .util import collect, remove, dshape
from .coretypes import *
# https://github.com/ContinuumIO/datashape/blob/master/docs/source/types.rst
dimension_types = (Fixed, Var, Ellipsis)
isunit = lambda x: isinstance(x, Unit)
def isdimension(ds):
""" Is a component a dimension?
>>> isdimension(Fixed(10))
True
>>> isdimension(Var())
True
>>> isdimension(int32)
False
"""
return isinstance(ds, dimension_types)
def ishomogenous(ds):
""" Does datashape contain only one dtype?
>>> ishomogenous(int32)
True
>>> ishomogenous(var * (3 * string))
True
>>> ishomogenous(var * Record([('name', string), ('amount', int32)]))
False
"""
return len(set(remove(isdimension, collect(isunit, ds)))) == 1
def dimensions(ds):
""" Number of dimensions of datashape
Interprets records as dimensional
>>> dimensions(int32)
0
>>> dimensions(10 * int32)
1
>>> dimensions(var * (10 * int32))
2
>>> dimensions(var * Record([('name', string), ('amount', int32)]))
2
"""
if not isinstance(ds, DataShape):
ds = dshape(ds)
if isdimension(ds[0]):
return 1 + dimensions(ds.subarray(1))
if isinstance(ds[0], Record):
return 1 + max(map(dimensions, ds[0].fields.values()))
if len(ds) == 1 and isunit(ds[0]):
return 0
raise NotImplementedError('Can not compute dimensions for %s' % ds)
def isfixed(ds):
""" Contains no variable dimensions
>>> isfixed('10 * int')
True
>>> isfixed('var * int')
False
>>> isfixed('10 * {name: string, amount: int}')
True
>>> isfixed('10 * {name: string, amounts: var * int}')
False
"""
if not isinstance(ds, DataShape):
ds = dshape(ds)
if isinstance(ds[0], Var):
return False
if isinstance(ds[0], Record):
return all(map(isfixed, ds[0].fields.values()))
if len(ds) > 1:
return isfixed(ds.subarray(1))
return True
| Python | 0.999995 |
3406467f3d17621d436fc05d8820e21b7399a241 | add simple depth frame network benchmark | scripts/depth_client.py | scripts/depth_client.py | #!/usr/bin/env python
"""
Simple benchmark of how fast depth frames are delivered.
"""
import logging
import threading
import time
from tornado.ioloop import IOLoop, PeriodicCallback
from streamkinect2.server import ServerBrowser
from streamkinect2.client import Client
# Install the zmq ioloop
from zmq.eventloop import ioloop
ioloop.install()
# Get our logger
log = logging.getLogger(__name__)
class Benchmark(object):
def __init__(self, io_loop=None):
self.io_loop = io_loop or IOLoop.instance()
self.records = {}
self.report_callback = PeriodicCallback(self._report, 1000, self.io_loop)
self.report_callback.start()
def on_depth_frame(self, client, depth_frame, kinect_id):
self.records[kinect_id]['count'] += 1
def on_add_kinect(self, client, kinect_id):
client.on_depth_frame.connect(self.on_depth_frame, sender=client)
log.info('Enabling depth streaming on kinect "{0}"'.format(kinect_id))
client.enable_depth_frames(kinect_id)
self.records[kinect_id] = { 'start': time.time(), 'count': 0, }
def new_client(self, client, io_loop):
"""Called when a new client has been created. Enable depth streaming on all
devices and benchmark result."""
# Register interest in devices
client.on_add_kinect.connect(self.on_add_kinect, sender=client)
def _report(self):
now = time.time()
for k, v in self.records.items():
delta = now - v['start']
log.info('Kinect "{0}", {1} frames in {2:.0f} seconds => {3:1f} fps'.format(
k, v['count'], delta, v['count']/delta))
# Our listening class
class Listener(object):
def __init__(self, browser, benchmark, io_loop = None):
self.benchmark = benchmark
self.io_loop = io_loop or IOLoop.instance()
browser.on_add_server.connect(self.add_server, sender=browser)
browser.on_remove_server.connect(self.remove_server, sender=browser)
# Keep a reference to browser since we remain interested and do not
# wish it garbage collected.
self.browser = browser
# Keep a list of clients for each server which appears
self.clients = {}
def add_server(self, browser, server_info):
log.info('Discovered server "{0.name}" at "{0.endpoint}"'.format(server_info))
client = Client(server_info.endpoint, connect_immediately=True)
self.clients[server_info.endpoint] = client
self.benchmark.new_client(client, self.io_loop)
def remove_server(self, browser, server_info):
log.info('Server "{0.name}" at "{0.endpoint}" went away'.format(server_info))
try:
client = self.clients[server_info.endpoint]
except KeyError:
# We didn't have a client for this server
return
client.disconnect()
del self.clients[server_info.endpoint]
class IOLoopThread(threading.Thread):
def run(self):
# Create the server browser
log.info('Creating server browser...')
listener = Listener(ServerBrowser(), Benchmark())
# Run the ioloop
log.info('Running...')
ioloop.IOLoop.instance().start()
log.info('Stopping')
def stop(self):
io_loop = ioloop.IOLoop.instance()
io_loop.add_callback(io_loop.stop)
self.join(3)
def main():
# Set log level
logging.basicConfig(level=logging.INFO)
print('=============================================')
print('Press Enter to exit')
print('=============================================')
# Start the event loop
ioloop_thread = IOLoopThread()
ioloop_thread.start()
# Wait for input
input()
# Stop thread
ioloop_thread.stop()
if __name__ == '__main__':
main()
| Python | 0 | |
9dd20f8361cff99329a5ab4b526e29edddac9a61 | add session.py | session.py | session.py | #!/usr/bin/python
#A quick and dirty interface to end a session
# This assumes systemd and xinitrc (for logout)
#By Charles Bos
from tkinter import *
import os
import sys
def getWm() :
args = sys.argv
if len(args) == 1 : return "-u $USER"
else : return args[1]
def runAction() :
if option.get() == 1 : os.system("pkill " + getWm())
elif option.get() == 2 : os.system("systemctl suspend")
elif option.get() == 3 : os.system("systemctl hibernate")
elif option.get() == 4 : os.system("systemctl reboot")
elif option.get() == 5 : os.system("systemctl poweroff")
class UI() :
def __init__(self, parent) :
global option
option = IntVar()
r1 = Radiobutton(parent, text = "Logout", variable = option, value = 1).grid(row = 2, column = 1)
r2 = Radiobutton(parent, text = "Suspend", variable = option, value = 2).grid(row = 2, column = 2)
r3 = Radiobutton(parent, text = "Hibernate", variable = option, value = 3).grid(row = 2, column = 3)
r4 = Radiobutton(parent, text = "Reboot", variable = option, value = 4).grid(row = 2, column = 4)
r5 = Radiobutton(parent, text = "Poweroff", variable = option, value = 5).grid(row = 2, column = 5)
b1 = Button(parent, text = "Ok", command = runAction).grid(row = 3, column = 1, columnspan = 5)
top = Tk()
top.title("End session")
ui = UI(top)
top.mainloop()
| Python | 0.000001 | |
c43d929f9ee2f21a7e93986171307cd0f17fa96c | add unittests of helpers | tests/test_helper.py | tests/test_helper.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from types import BuiltinFunctionType
from clime.helper import *
class TestClimeHelper(unittest.TestCase):
def test_autotype(self):
cases = ('string', '100', '100.0', None)
answers = ('string', 100 , 100.0 , None)
for case, answer in zip(cases, answers):
self.assertEqual(autotype(case), answer)
def test_getargspec(self):
docs = [
None,
'',
'abcd',
'f1()',
'f2(x)',
'f3(x, y)',
'f4(x[, a])',
'f5(x, y[, a])',
'f6(x, y[, a[, b]])',
'f7([a])',
'f8([a[, b]])',
]
answers = [
(None, 0),
(None, 0),
(None, 0),
(None, 0),
(['x'], 0),
(['x', 'y'], 0),
(['x', 'a'], 1),
(['x', 'y', 'a'], 1),
(['x', 'y', 'a', 'b'], 2),
(['a'], 1),
(['a', 'b'], 2),
]
f = type('Dummy', tuple(), {'__doc__': None})()
trans = lambda x: (x[0], len(x[-1] or []))
for doc, answer in zip(docs, answers):
f.__doc__ = doc
self.assertEqual(trans(getargspec( f )), answer)
def test_getoptmetas(self):
doc = """
-d, --debug enable debug mode
-q, -s, --quiet, --slient enable slient mode
-n N, --times N how many times do you want
"""
answer = [ [('d', None), ('debug', None)],
[('q', None), ('s', None), ('quiet', None), ('slient', None)],
[('n', 'N'), ('times', 'N')] ]
self.assertEqual(list(getoptmetas(doc)), answer)
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
dd8496c61543b3e39c5ee3ccb8bc7b9f69e9487f | add tests for packet | tests/test_packet.py | tests/test_packet.py | from zope.interface.verify import verifyClass, verifyObject
from ironman.packet import IPBusPacket
from ironman.interfaces import IIPbusPacket
def test_ipbus_packet_create():
obj = IPBusPacket()
assert obj is not None
def test_ipbus_packet_class_iface():
# Assure the class implements the declared interface
assert verifyClass(IIPBusPacket, IPBusPacket)
def test_ipbus_packet_instance_iface():
# Assure instances of the class provide the declared interface
assert verifyObject(IIPBusPacket, IPBusPacket())
| Python | 0.000001 | |
a6e4516f4cae0d42e1b7ccf30b34d25837a5123a | add tableTemplateClass (for IE) | src/ObjectModel/tableTemplateClass.py | src/ObjectModel/tableTemplateClass.py | # -*- coding: utf-8 -*-
"""
Table Template
Hervé Déjean
cpy Xerox 2017
READ project
"""
from templateClass import templateClass
import numpy as np
from ObjectModel.XMLDSTABLEClass import XMLDSTABLEClass
class tableTemplateClass(templateClass):
"""
table template class
Describes a table regarding an IE extraction problem:
what is the pattern?
a list of tagging instruction for cells
colheader = 1
rowheader = -1
For table, but a list (and sublist) can be organized with a table (and then ndarray syntax can be used for decoration)
-> after
"""
def __init__(self):
templateClass.__init__(self)
# list of labelling instruction
## instrution: slice, fiedname
self._lLabellingInstruction = []
def __str__(self):return 'tableTemplate'
def __repr__(self):return 'tableTemplate'
@classmethod
def string2slice(cls,s):
"""
convert a string into slice
string:
"""
lx = s.split(":")
if len(lx) == 2:
start=lx[0]
stop=lx[1]
step=None
elif len(lx) == 3:
start=lx[0]
stop=lx[1]
step=lx[2]
ss = slice(start,stop,step)
return ss
def buildFromPattern(self,p):
"""
a small language ? a la numpy
([:],(fieldname,value))
([:],(fieldname,))
([:,2],(fieldname,value))
ex: column 2(+1) , row ingoring first row
([1:,2],(fieldname,None))
fieldname: headers
tag at cell level: add field to cell.getFields()
at full column/row? yes for the moment
date
page 1 ([:],(date,1870))
page 2 [2:], ( date,1870))
page 2 [3:], ( date,1871))
page 1 ([:1], ((firstname,None) , (lastname,None)))
page 1 ([1:1], ((firstname,Helmut) , (lastname,Goetz))) # for GT only?
"""
## reformalute the pattern with complete values: start:stop:step for each dimension (2D)
## split with ',' : then by : if just ':' NoneNoneNone,
for index,lFields in p:
print index, lFields
self._lLabellingInstruction.append((index,lFields))
def labelTable(self,table):
"""
use template to label cells
Remember that a slicing tuple can always be constructed as obj and used in the x[obj] notation.
Slice objects can be used in the construction in place of the [start:stop:step] notation. For example, x[1:10:5,::-1] can also be implemented as obj = (slice(1,10,5), slice(None,None,-1)); x[obj] .
This can be useful for constructing generic code that works on arrays of arbitrary dimension.
"""
for sslice, lFields in self._lLabellingInstruction:
for field in lFields:
for cell in np.nditer(table.getNPArray()[sslice],['refs_ok'],op_dtypes=np.dtype(object)):
# cell 0d array ndim=0
cell[()].addField(field.cloneMe())
def registration(self,o):
raise "SOFTWARE ERROR: your component must define a testRun method"
def describeMe(self):
"""
a plain text description of this template
"""
raise "SOFTWARE ERROR: your component must define a testRun method"
def tagDom(self,dom):
raise "SOFTWARE ERROR: your component must define a testRun method"
# --- AUTO-TESTS --------------------------------------------------------------------------------------------
#def test_template():
if __name__ == "__main__":
from XMLDSCELLClass import XMLDSTABLECELLClass
from recordClass import fieldClass
table=XMLDSTABLEClass()
cell1=XMLDSTABLECELLClass()
cell1.setIndex(0, 0)
cell2=XMLDSTABLECELLClass()
cell2.setIndex(0, 1)
cell2.setContent('Maria Schmidt')
cell3=XMLDSTABLECELLClass()
cell3.setIndex(1, 0)
cell3.setContent('kindlein')
cell4=XMLDSTABLECELLClass()
cell4.setIndex(1, 1)
table.addCell(cell1)
table.addCell(cell2)
table.addCell(cell3)
table.addCell(cell4)
table.buildColumnFromCells()
table.buildRowFromCells()
table.buildNDARRAY()
print table.getNPArray()
# ([1:,2],(fieldname,None))
stemplate='[((slice(1,None),slice(0,1)) ,["name", "fistname"]),((slice(1,2),slice(1,None)) ,["ledig"])]'
myTemplate=tableTemplateClass()
# build ltemplate with record
ltemplate = eval(stemplate)
myTemplate.buildFromPattern(ltemplate)
myTemplate.labelTable(table)
for cell in table.getCells():
print cell.getIndex(), cell.getFields()
## extract data usinf field information
field = fieldClass('name')
| Python | 0 | |
b2aace3212f51ac7db83281903e6282849a58adb | add portmanteau finder | portmanteau.py | portmanteau.py | """
Let's find pairs of words that blend nicely, like
book + hookup --> bookup
Strategy: given a wordlist, first remove generative affixes like un-
and -ly. Find all reasonably-long substrings of every word. Match
suffixes of candidate first words with midparts of candidate second
words.
TODO: get better at stripping affixes
(though that's not always a win: e.g.:
contendresse contendress + tendresse)
(also: bunchawed from bunch and unchawed)
TODO: currently we're matching suffixes against prefixes instead
of midparts, so the motivating example above doesn't even appear...
TODO: the pronunciations should blend, not just the spelling.
"""
import re
raw_words = set(unicode(line.rstrip('\n'), 'utf8').lower()
for line in open('words')) #open('/usr/share/dict/words'))
left_noise = """
be bi em en di duo im iso non oct octo out pre quad quadra quadri re
sub tri un uni
""".split()
right_noise = """
ability able adian age an ation d ed en ent er es escent ful ian ic
ies ily iness ing ish ite ize less let log like liness ly ness og
ogy proof r ress ry s ship tion y
""".split()
def noisy(w):
for ln in left_noise:
if w.startswith(ln) and w[len(ln):] in raw_words:
return True
for rn in right_noise:
if w.endswith(rn) and w[:-len(rn)] in raw_words:
return True
for i in range(1, len(w)):
p, s = w[:i], w[i:]
if p in raw_words and s in raw_words:
return True
return False
words = set(w for w in raw_words if not noisy(w))
if False:
for word in sorted(words):
print word
import sys
sys.exit(0)
prefixes = {}
for w in words:
if 3 < len(w):
for i in range(3, len(w)+1):
p = w[:i]
prefixes.setdefault(p, []).append(w)
suffixes = {}
for w in words:
if 3 < len(w):
for i in range(len(w)-3):
p = w[i:]
suffixes.setdefault(p, []).append(w)
common = set()
for prefix, prefix_words in prefixes.iteritems():
if prefix in suffixes:
suffix_words = suffixes[prefix]
if suffix_words != prefix_words:
if any(not p.startswith(s)
and not s.endswith(p)
and (s + p[len(prefix):]) not in raw_words
for p in prefix_words
for s in suffix_words):
common.add(prefix)
print len(common)
print max(common, key=len)
def portmanteaus(affix):
for p in prefixes[affix]:
for s in suffixes[affix]:
if (not p.startswith(s) and not s.endswith(p)
and (s + p[len(affix):]) not in raw_words):
yield s, p, affix
import math
import pdist
def score((s, p, affix)):
return -math.log10(pdist.Pw(s) * pdist.Pw(p) * 1.1**len(affix))
L = len(s) + len(p) - len(affix)
return -math.log10(pdist.Pw(s) * pdist.Pw(p) * 2**(-float(L)/len(affix)))
results = [(score(triple), triple)
for affix in common
for triple in portmanteaus(affix)]
for score, (s, p, affix) in sorted(results):
combo = s + p[len(affix):]
print ' %6.2f %-30s %s + %s' % (score, combo, s, p)
| Python | 0.000679 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.