commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
c658aeed099b4ac85ae4a5868e9e2c9d4d96e335 | Create downloadBingWallpaperNew.pyw | downloadBingWallpaperNew.pyw | downloadBingWallpaperNew.pyw | #!/usr/bin/env python
# -- coding: utf-8 --
import urllib,re,urllib.request,os,win32api,win32gui
from win32api import *
from win32gui import *
import win32con
import sys
import struct
import time
import json
class WindowsBalloonTip:
def __init__(self, title, msg):
message_map = {
win32con.WM_DESTROY: self.OnDestroy,
}
# Register the Window class.
wc = WNDCLASS()
hinst = wc.hInstance = GetModuleHandle(None)
wc.lpszClassName = "PythonTaskbar"
wc.lpfnWndProc = message_map # could also specify a wndproc.
classAtom = RegisterClass(wc)
# Create the Window.
style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU
self.hwnd = CreateWindow( classAtom, "Taskbar", style, \
0, 0, win32con.CW_USEDEFAULT, win32con.CW_USEDEFAULT, \
0, 0, hinst, None)
UpdateWindow(self.hwnd)
iconPathName = os.path.abspath(os.path.join( sys.path[0], "bing.ico" ))
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
try:
hicon = LoadImage(hinst, iconPathName, \
win32con.IMAGE_ICON, 0, 0, icon_flags)
except:
hicon = LoadIcon(0, win32con.IDI_APPLICATION)
flags = NIF_ICON | NIF_MESSAGE | NIF_TIP
nid = (self.hwnd, 0, flags, win32con.WM_USER+20, hicon, "tooltip")
Shell_NotifyIcon(NIM_ADD, nid)
Shell_NotifyIcon(NIM_MODIFY, \
(self.hwnd, 0, NIF_INFO, win32con.WM_USER+20,\
hicon, "Balloon tooltip",msg,200,title))
# self.show_balloon(title, msg)
time.sleep(10)
DestroyWindow(self.hwnd)
def OnDestroy(self, hwnd, msg, wparam, lparam):
nid = (self.hwnd, 0)
Shell_NotifyIcon(NIM_DELETE, nid)
PostQuitMessage(0) # Terminate the app.
def balloon_tip(title, msg):
w=WindowsBalloonTip(title, msg)
def tryDownloadVideo(result):
vid = result['images'][0]
if 'vid' in vid.keys():
src = vid['vid']['sources'][1]
video_url = src[2]
if(not video_url):
return False;
video_url = video_url.replace('\\',"")
print(video_url)
video_name = video_url.split('/')[-1]
print(video_name)
if(video_url.find('http') > 0):
video_src = urllib.request.urlopen(video_url).read()
else:
video_src = urllib.request.urlopen("http:" + video_url).read()
video_file = video_folder + "/" + video_name
with open(video_file, 'wb') as f:
f.write(video_src)
balloon_tip("视频也已下载完毕","本日的壁纸包含对应视频,已经保存到您的电脑目录")
return True;
pic_folder = "D:/OneDrive/图片/必应壁纸"
video_folder = "D:/OneDrive/图片/必应视频"
bing_url = "http://cn.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1&nc=1439260838289&pid=hp&video=1"
#未联网的情况下等待
times = 10
waitSeconds = 15
i = 0;
if_connected = False
try:
urllib.request.urlopen("http://www.baidu.com")
if_connected = True
except:
if_connected = False
while not if_connected:
i = i + 1
print("未连接到互联网,等待重新尝试:当前是第" + str(i) + "次尝试")
time.sleep(waitSeconds)
try:
urllib.request.urlopen("http://www.baidu.com")
if_connected = True
except:
if_connected = False
if i == times:
print("长时间未联网,自动断开")
exit()
content = urllib.request.urlopen(bing_url).read().decode('utf-8')
result = json.loads(content)
wallpaper_url = result['images'][0]['url']
print(wallpaper_url)
pic_name = wallpaper_url.split('/')[-1]
print(pic_name)
img_src = urllib.request.urlopen(wallpaper_url).read()
img_file = pic_folder + "/" + pic_name
if os.path.isfile(img_file):
print("文件已经存在")
balloon_tip("壁纸已经是最新了","最新的壁纸已经保存到您的图片目录下了,不必重新下载")
else:
with open(img_file, 'wb') as f:
f.write(img_src)
k = win32api.RegOpenKeyEx(win32con.HKEY_CURRENT_USER,"Control Panel\\Desktop",0,win32con.KEY_SET_VALUE)
win32api.RegSetValueEx(k, "WallpaperStyle", 0, win32con.REG_SZ, "0")
win32api.RegSetValueEx(k, "TileWallpaper", 0, win32con.REG_SZ, "0")
win32gui.SystemParametersInfo(win32con.SPI_SETDESKWALLPAPER, img_file, 1+2)
balloon_tip("今日的壁纸已经更新",result['images'][0]['copyright'])
tryDownloadVideo(result)
| Python | 0 | |
93548efe9eb04dd9659e3cc76c711d967e8770df | Create filereader.py | filereader.py | filereader.py | #!/usr/bin/python
import os
import re
from optparse import OptionParser
SUFFIX=".out"
def main () :
global filename
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="the file to update", metavar="FILE")
parser.add_option("-n", "--name", dest="name",
help="the name to replace the original name with", metavar="NAME")
parser.add_option("-c", "--fromname", dest="fromname",
help="the name be replaced", metavar="FROMNAME")
(options, args) = parser.parse_args()
if not options.filename :
print "You must specify the file to modify"
exit(-1)
if not options.name :
print "You must specify the name to replace Tim with"
exit(-1)
if not options.fromname :
print "You must specify the name to be replaced"
exit(-1)
fin = open(options.filename, 'r')
fout = open(options.filename + SUFFIX, 'w')
for line in fin :
fout.write(re.sub(options.fromname, options.name, line))
fin.close()
fout.close()
main()
| Python | 0 | |
23ab301f4773892f6db7321105f79ba0c48404a3 | add urls | src/doc/expedient/source/developer/sshaggregate/urls.py | src/doc/expedient/source/developer/sshaggregate/urls.py | from django.conf.urls.defaults import *
urlpatterns = patterns('sshaggregate.views',
url(r'^aggregate/create/$', 'aggregate_crud', name='sshaggregate_aggregate_create'),
url(r'^aggregate/(?P<agg_id>\d+)/edit/$', 'aggregate_crud', name='sshaggregate_aggregate_edit'),
url(r'^aggregate/(?P<agg_id>\d+)/servers/$', 'aggregate_add_servers', name='sshaggregate_aggregate_servers'),
)
| Python | 0.000006 | |
fed2e3f9bdb3a00b077b5e7df1aed4d927b77b6c | Add test for Clifford drudge by quaternions | tests/clifford_test.py | tests/clifford_test.py | """Test for the Clifford algebra drudge."""
from drudge import CliffordDrudge, Vec, inner_by_delta
def test_clifford_drudge_by_quaternions(spark_ctx):
"""Test basic functionality of Clifford drudge by quaternions.
"""
dr = CliffordDrudge(
spark_ctx, inner=lambda v1, v2: -inner_by_delta(v1, v2)
)
e_ = Vec('e')
i_ = dr.sum(e_[2] * e_[3]).simplify()
j_ = dr.sum(e_[3] * e_[1]).simplify()
k_ = dr.sum(e_[1] * e_[2]).simplify()
for i in [i_, j_, k_]:
assert (i * i).simplify() == -1
assert (i_ * j_ * k_).simplify() == -1
assert (i_ * j_).simplify() == k_
assert (j_ * k_).simplify() == i_
assert (k_ * i_).simplify() == j_
| Python | 0 | |
09a0689b8e521c1d5c0ea68ac448dc9ae7abcff5 | Read the header of a fits file and/or look up a single key (case insensitive). | fitsHeader.py | fitsHeader.py | #!/usr/bin/env python
# -*- coding: utf8 -*-
# My imports
from __future__ import division
from astropy.io import fits
from pydoc import pager
import argparse
def _parser():
parser = argparse.ArgumentParser(description='View the header of a fits file')
parser.add_argument('input', help='File name of fits file')
parser.add_argument('-key', help='Look up a given key (case insensitive)', default=None)
return parser.parse_args()
if __name__ == '__main__':
args = _parser()
h = fits.getheader(args.input)
h.keys = map(str.lower, h.keys())
if args.key:
args.key = args.key.lower()
try:
print h[args.key]
except KeyError:
raise KeyError('Key was not found')
else:
string = '\n'.join("{!s} : {!r}".format(key, val) for (key, val) in h.items())
pager(string)
| Python | 0 | |
b674f921a8e5cffb2d3e320f564c61ca01455a9f | Add command to generate a csv of talk titles and video reviewers | wafer/management/commands/wafer_talk_video_reviewers.py | wafer/management/commands/wafer_talk_video_reviewers.py | import sys
import csv
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
from wafer.talks.models import Talk, ACCEPTED, PROVISIONAL
class Command(BaseCommand):
help = ("List talks and the associated video_reviewer emails."
" Only reviewers for accepted talks are listed")
def _video_reviewers(self, options):
talks = Talk.objects.filter(status=ACCEPTED)
csv_file = csv.writer(sys.stdout)
for talk in talks:
reviewer = talk.video_reviewer
if not reviewer:
reviewer = 'NO REVIEWER'
row = [x.encode("utf-8") for x in (
talk.title,
reviewer,
)]
csv_file.writerow(row)
def handle(self, *args, **options):
self._video_reviewers(options)
| Python | 0.000011 | |
3db3c22d83071550d8bbd70062f957cf43c5e54a | Add a compatibility module, because of Python 2/3 compatibility issues. | cart/_compatibility.py | cart/_compatibility.py | import sys
is_py3 = sys.version_info[0] >= 3
def utf8(string):
"""Cast to unicode DAMMIT!
Written because Python2 repr always implicitly casts to a string, so we
have to cast back to a unicode (and we now that we always deal with valid
unicode, because we check that in the beginning).
"""
if is_py3:
return str(string)
elif not isinstance(string, unicode):
return unicode(str(string), 'UTF-8')
return string
| Python | 0 | |
156b7dfc11f24a7d77d2280e8ddade3cb7a474b7 | Add a script for listing all Elasticsearch indexes | misc/list_all_es_indexes.py | misc/list_all_es_indexes.py | #!/usr/bin/env python
# -*- encoding: utf-8
import boto3
import hcl
import requests
def get_terraform_vars():
s3_client = boto3.client("s3")
tfvars_body = s3_client.get_object(
Bucket="wellcomecollection-platform-infra",
Key="terraform.tfvars"
)["Body"]
return hcl.load(tfvars_body)
def build_url(es_credentials):
protocol = es_credentials["protocol"]
name = es_credentials["name"]
region = es_credentials["region"]
port = es_credentials["port"]
return f"{protocol}://{name}.{region}.aws.found.io:{port}"
def get_all_indexes(es_url, username, password):
resp = requests.get(
f"{es_url}/_cat/indices",
auth=(username, password),
params={"format": "json"}
)
resp.raise_for_status()
return resp.json()
if __name__ == "__main__":
terraform_vars = get_terraform_vars()
es_cluster_credentials = terraform_vars["es_cluster_credentials"]
es_url = build_url(es_cluster_credentials)
username = es_cluster_credentials["username"]
password = es_cluster_credentials["password"]
indexes = get_all_indexes(es_url, username=username, password=password)
print(
'\n'.join(sorted(
idx["index"]
for idx in indexes
if not idx["index"].startswith(".")
))
)
| Python | 0 | |
006a921f19f6c4f64d694c86346ad85ada2c8bb8 | Add tests for subclass support | tests/subclass_test.py | tests/subclass_test.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
try:
import unittest2 as unittest
except ImportError:
import unittest
import pycurl
CLASSES = (pycurl.Curl, pycurl.CurlMulti, pycurl.CurlShare)
class SubclassTest(unittest.TestCase):
def test_baseclass_init(self):
# base classes do not accept any arguments on initialization
for baseclass in CLASSES:
try:
baseclass(0)
except TypeError:
pass
else:
raise AssertionError('Base class accepted invalid args')
try:
baseclass(a=1)
except TypeError:
pass
else:
raise AssertionError('Base class accepted invalid kwargs')
def test_subclass_create(self):
for baseclass in CLASSES:
# test creation of a subclass
class MyCurlClass(baseclass):
pass
# test creation of its object
obj = MyCurlClass()
# must be of type subclass, but also an instance of base class
assert type(obj) == MyCurlClass
assert isinstance(obj, baseclass)
def test_subclass_init(self):
for baseclass in CLASSES:
class MyCurlClass(baseclass):
def __init__(self, x, y=4):
self.x = x
self.y = y
# subclass __init__ must be able to accept args and kwargs
obj = MyCurlClass(3)
assert obj.x == 3
assert obj.y == 4
obj = MyCurlClass(5, y=6)
assert obj.x == 5
assert obj.y == 6
# and it must throw TypeError if arguments don't match
try:
MyCurlClass(1, 2, 3, kwarg=4)
except TypeError:
pass
else:
raise AssertionError('Subclass accepted invalid arguments')
def test_subclass_method(self):
for baseclass in CLASSES:
class MyCurlClass(baseclass):
def my_method(self, x):
return x + 1
obj = MyCurlClass()
# methods must be able to accept arguments and return a value
assert obj.my_method(1) == 2
def test_subclass_method_override(self):
# setopt args for each base class
args = {
pycurl.Curl: (pycurl.VERBOSE, 1),
pycurl.CurlMulti: (pycurl.M_MAXCONNECTS, 3),
pycurl.CurlShare: (pycurl.SH_SHARE, pycurl.LOCK_DATA_COOKIE),
}
for baseclass in CLASSES:
class MyCurlClass(baseclass):
def setopt(self, option, value):
# base method must not be overwritten
assert super().setopt != self.setopt
# base method mut be callable, setopt must return None
assert super().setopt(option, value) is None
# return something else
return 'my setopt'
obj = MyCurlClass()
assert obj.setopt(*args[baseclass]) == 'my setopt'
| Python | 0 | |
c8816f509a661ed53c166d843ebfb7dcb6b8d75a | use only single threaded svrlight | examples/undocumented/python_modular/regression_svrlight_modular.py | examples/undocumented/python_modular/regression_svrlight_modular.py | ###########################################################################
# svm light based support vector regression
###########################################################################
from numpy import array
from numpy.random import seed, rand
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
label_traindat = lm.load_labels('../data/label_train_twoclass.dat')
parameter_list = [[traindat,testdat,label_traindat,1.2,1,1e-5,1e-2,1],[traindat,testdat,label_traindat,2.3,0.5,1e-5,1e-6,1]]
def regression_svrlight_modular(fm_train=traindat,fm_test=testdat,label_train=label_traindat, \
width=1.2,C=1,epsilon=1e-5,tube_epsilon=1e-2,num_threads=3):
from shogun.Features import Labels, RealFeatures
from shogun.Kernel import GaussianKernel
try:
from shogun.Regression import SVRLight
except ImportError:
print 'No support for SVRLight available.'
return
feats_train=RealFeatures(fm_train)
feats_test=RealFeatures(fm_test)
kernel=GaussianKernel(feats_train, feats_train, width)
labels=Labels(label_train)
svr=SVRLight(C, epsilon, kernel, labels)
svr.set_tube_epsilon(tube_epsilon)
svr.parallel.set_num_threads(num_threads)
svr.train()
kernel.init(feats_train, feats_test)
out = svr.classify().get_labels()
return out, kernel
if __name__=='__main__':
print 'SVRLight'
regression_svrlight_modular(*parameter_list[0])
| ###########################################################################
# svm light based support vector regression
###########################################################################
from numpy import array
from numpy.random import seed, rand
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
label_traindat = lm.load_labels('../data/label_train_twoclass.dat')
parameter_list = [[traindat,testdat,label_traindat,1.2,1,1e-5,1e-2,3],[traindat,testdat,label_traindat,2.3,0.5,1e-5,1e-6,1]]
def regression_svrlight_modular(fm_train=traindat,fm_test=testdat,label_train=label_traindat, \
width=1.2,C=1,epsilon=1e-5,tube_epsilon=1e-2,num_threads=3):
from shogun.Features import Labels, RealFeatures
from shogun.Kernel import GaussianKernel
try:
from shogun.Regression import SVRLight
except ImportError:
print 'No support for SVRLight available.'
return
feats_train=RealFeatures(fm_train)
feats_test=RealFeatures(fm_test)
kernel=GaussianKernel(feats_train, feats_train, width)
labels=Labels(label_train)
svr=SVRLight(C, epsilon, kernel, labels)
svr.set_tube_epsilon(tube_epsilon)
svr.parallel.set_num_threads(num_threads)
svr.train()
kernel.init(feats_train, feats_test)
out = svr.classify().get_labels()
return out, kernel
if __name__=='__main__':
print 'SVRLight'
regression_svrlight_modular(*parameter_list[0])
| Python | 0 |
7327250621dc34a1e7c2f1998333d65024583168 | add simple test | tests/test_commands.py | tests/test_commands.py | # Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from zpmlib import commands
def test_all_commands_sorted():
cmd_names = [cmd.__name__ for cmd in commands.all_commands()]
assert cmd_names == sorted(cmd_names)
| Python | 0.00057 | |
2b2f11cc7650fc5c40cd21a6e8ad671656fc9b21 | add quicksort | quicksort.py | quicksort.py | '''
QuickSort implementation
'''
def quick_sort(arr, l, r):
i = l
j = r
x = arr[(l + r) / 2]
if len(arr) == 0:
return arr
else:
while True:
while arr[i] < x:
i += 1
while arr[j] > x:
j -= 1
if i <= j:
tmp = arr[i]
arr[i] = arr[j]
arr[j] = tmp
i += 1
j -= 1
if i >= j:
break
if l < j:
quick_sort(arr, l, j)
if i < r:
quick_sort(arr, i, r)
if __name__ == '__main__':
arr = [12, 4, 5, 6, 7, 3, 1, 15]
quick_sort(arr, 0, len(arr) - 1)
print arr
| Python | 0.00001 | |
ef76498542aec046c2307562db01e4764ae68b50 | Add gce_resize | gce_resize.py | gce_resize.py | #!/usr/bin/env python
# import section
import argparse, os, time
from googleapiclient import discovery
from oauth2client.service_account import ServiceAccountCredentials
from pprint import pprint
# functions
def get_instanceGroup(service, project,zone, instanceGroup):
"""
Returns instance group object.
"""
try:
result = service.instanceGroups().get(project=project, zone=zone, instanceGroup=instanceGroup).execute()
except Exception as error:
print("Error getting instance group: %s." % str(error.message))
exit(1)
return result
def get_instanceGroupManager(service, project,zone, instanceGroup):
"""
Return instance group manager object.
"""
try:
result = service.instanceGroupManagers().get(project=project, zone=zone, instanceGroupManager=instanceGroup).execute()
except Exception as error:
print("Error getting instance group manager: %s." % str(error.message))
exit(1)
return result
def resize_instanceGroup(service, project, zone, instanceGroup, instances_num):
"""
Resize instanceGroup manager to instances_num. Usually returns immediatly.
"""
operation = service.instanceGroupManagers().resize(project=project, zone=zone, instanceGroupManager=instanceGroup, size=instances_num).execute()
try:
result = wait_for_operation(service, project, zone, operation)
except Exception as error:
print("Error executing resize: %s." % str(error.message))
exit(1)
return result
def wait_instanceGroupManager(service, project, zone, instanceGroup, timeout=None):
"""
Checks and waits for any operation on an instance group until complete. Consider use of timeout.
"""
n = 0
all_actions = 1
while all_actions > 0:
result = get_instanceGroupManager(service, project, zone, instanceGroup)
all_actions = sum(result['currentActions'].values()) - result['currentActions']['none']
if timeout != None and n > timeout:
print("Timeout while checking for finish actions on instance group manager")
exit(1)
n+=1
time.sleep(1)
def wait_for_operation(service, project, zone, operation):
"""
Keep waiting for an operation object to finish on gcp to complete.
"""
print('Waiting for operation to finish...')
while True:
result = service.zoneOperations().get(
project=project,
zone=zone,
operation=operation['name']).execute()
if result['status'] == 'DONE':
print("done.")
if 'error' in result:
raise Exception(result.error)
return result
print("progress: %i" % (operation.progress))
time.sleep(1)
# main
def main(project_id, zone, credentials_file, instance_group, instances_num):
# start credentials, service
scopes = ['https://www.googleapis.com/auth/compute']
if credentials_file is not None:
credentials = ServiceAccountCredentials.from_json_keyfile_name(credentials_file, scopes)
else:
credentials = GoogleCredentials.get_application_default()
service = discovery.build('compute', 'v1', credentials=credentials)
# do resize
instancegroup = get_instanceGroup(service, project_id, zone, instance_group)
print("Got instance group")
instancegroup_resize = resize_instanceGroup(service, project_id, zone, instance_group, instances_num)
wait_instanceGroupManager(service, project_id, zone, instance_group, 100)
print("Instance group resize successfuly. %s intances on %s group." % (instances_num, instance_group))
if __name__ == '__main__':
""" Script for resizing an Instance Group on GCP.
Example: gce_resize.py --project_id=<project> --instance_group=<instance_group_name> --instance_num=<int> [--zone=<gce_zone>] [--credentials_file=json_gcp_application_credentials>]
Arguments:
--project_id
--instance_group
--instance_num
[--zone]
[--credentials_file]
"""
parser = argparse.ArgumentParser( description =__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-p', '--project_id', help='Your Google Cloud project ID.', required=True)
parser.add_argument('-i', '--instance_group', help='Instance Group to resize.', required=True)
parser.add_argument('-n', '--instances_num', help='Number of instances to grow or shrink instance group to.', required=True)
parser.add_argument('-z', '--zone', default='europe-west1-d', help='Compute Engine zone to deploy to.', required=False)
parser.add_argument('-c', '--credentials_file', default=None, help='Optional service credentials from json file.', required=False)
args = parser.parse_args()
main(project_id=args.project_id, zone=args.zone, credentials_file=args.credentials_file, instance_group=args.instance_group, instances_num=args.instances_num)
| Python | 0.000012 | |
b102a2769dc70deb2055a2d4ae0bf11f48c13f9d | add game window | core/core.py | core/core.py | # -*- coding: utf-8 -*-
import pygame
from pygame.locals import *
class App:
def __init__(self):
self._running = True
self._display_surf = None
self.size = self.weight, self.height = 1024, 576
def on_init(self):
pygame.init()
self._display_surf = pygame.display.set_mode(self.size, pygame.HWSURFACE | pygame.DOUBLEBUF)
self._running = True
def on_event(self, event):
if event.type == pygame.QUIT:
self._running = False
def on_loop(self):
pass
def on_render(self):
pass
def on_cleanup(self):
pygame.quit()
def on_execute(self):
if self.on_init() == False:
self._running = False
while( self._running ):
for event in pygame.event.get():
self.on_event(event)
self.on_loop()
self.on_render()
self.on_cleanup()
if __name__ == "__main__" :
theApp = App()
theApp.on_execute() | Python | 0.000001 | |
fb6dd1a92471697b8665364dfaa7fedc519d00ed | Create properties.py | data/properties.py | data/properties.py | import libtcodpy as libtcod
class Object():
def __init__(self, x, y, char, color, screen):
self.x = x
self.y = y
self.char = char
self.color = color
self.screen = screen
def draw_object(self):
#Set the color of the character and draw it
libtcod.console_set_default_foreground(self.screen, self.color)
libtcod.console_put_char(self.screen, self.x, self.y, self.char, libtcod.BKGND_NONE)
def delete(self):
#Erase the char
libtcod.console_put_char(self.screen, self.x, self.y, self.char, libtcod.BKGND_NONE)
class Tile():
#Properties of a map's tiles, theres not much to it like there is to Object
def __init__(self, blocked, blocked_sight):
self.blocked = blocked
self.blocked_sight = blocked_sight
#blocked_sight's variable depends on blocked if its None
if blocked_sight == None: blocked_sight = blocked
| Python | 0.000001 | |
a2ba0c1658850064f55de1a99c3c2a49ef847b8d | Add join_by draft | drafts/join_by.py | drafts/join_by.py | def join_by(op, dicts, start=EMPTY):
dicts = list(dicts)
if not dicts:
return {}
elif len(dicts) == 1:
return dicts[0]
result = {}
for d in dicts:
for k, v in iteritems(d):
if k in result:
result[k] = op(result[k], v)
else:
result[k] = v if start is EMPTY else op(start, v)
# result[k] = v if start is EMPTY else start(v)
# result[k] = v if start is EMPTY else op(start(), v)
# result[k] = v if start is EMPTY else op(start() if callable(start) else start, v)
return result
join_by(operator.__add__, dnfs, start=list)
join_with(cat, dnfs)
join_by(list.extend, dnfs, start=list)
join_by(lambda c, _: c + 1, dnfs, start=lambda _: 1)
join_by(lambda l, v: l + len(v), dnfs, start=len)
# join_by(list.append, dnfs, initial=[])
join_by(lambda l, v: l + len(v), dnfs, 0)
| Python | 0 | |
79602383ece3835e6ed94d14f3254190104bd03d | Fix aliases with bash | thefuck/shells/bash.py | thefuck/shells/bash.py | import os
from ..conf import settings
from ..const import ARGUMENT_PLACEHOLDER
from ..utils import memoize
from .generic import Generic
class Bash(Generic):
def app_alias(self, alias_name):
# It is VERY important to have the variables declared WITHIN the function
return '''
function {name} () {{
TF_PREVIOUS=$(fc -ln -1);
TF_CMD=$(
export TF_ALIAS={name}
export TF_SHELL_ALIASES=$(alias)
export PYTHONIOENCODING=utf-8
thefuck $TF_PREVIOUS {argument_placeholder} $@
) && eval $TF_CMD;
{alter_history}
}}
'''.format(
name=alias_name,
argument_placeholder=ARGUMENT_PLACEHOLDER,
alter_history=('history -s $TF_CMD;'
if settings.alter_history else ''))
def _parse_alias(self, alias):
name, value = alias.replace('alias ', '', 1).split('=', 1)
if value[0] == value[-1] == '"' or value[0] == value[-1] == "'":
value = value[1:-1]
return name, value
@memoize
def get_aliases(self):
raw_aliases = os.environ.get('TF_SHELL_ALIASES', '').split('\n')
return dict(self._parse_alias(alias)
for alias in raw_aliases if alias and '=' in alias)
def _get_history_file_name(self):
return os.environ.get("HISTFILE",
os.path.expanduser('~/.bash_history'))
def _get_history_line(self, command_script):
return u'{}\n'.format(command_script)
def how_to_configure(self):
if os.path.join(os.path.expanduser('~'), '.bashrc'):
config = '~/.bashrc'
elif os.path.join(os.path.expanduser('~'), '.bash_profile'):
config = '~/.bash_profile'
else:
config = 'bash config'
return self._create_shell_configuration(
content=u'eval $(thefuck --alias)',
path=config,
reload=u'source {}'.format(config))
| import os
from ..conf import settings
from ..const import ARGUMENT_PLACEHOLDER
from ..utils import memoize
from .generic import Generic
class Bash(Generic):
def app_alias(self, alias_name):
# It is VERY important to have the variables declared WITHIN the function
return '''
function {name} () {{
TF_PREVIOUS=$(fc -ln -1);
TF_CMD=$(
TF_ALIAS={name}
TF_SHELL_ALIASES=$(alias)
PYTHONIOENCODING=utf-8
thefuck $TF_PREVIOUS {argument_placeholder} $@
) && eval $TF_CMD;
{alter_history}
}}
'''.format(
name=alias_name,
argument_placeholder=ARGUMENT_PLACEHOLDER,
alter_history=('history -s $TF_CMD;'
if settings.alter_history else ''))
def _parse_alias(self, alias):
name, value = alias.replace('alias ', '', 1).split('=', 1)
if value[0] == value[-1] == '"' or value[0] == value[-1] == "'":
value = value[1:-1]
return name, value
@memoize
def get_aliases(self):
raw_aliases = os.environ.get('TF_SHELL_ALIASES', '').split('\n')
return dict(self._parse_alias(alias)
for alias in raw_aliases if alias and '=' in alias)
def _get_history_file_name(self):
return os.environ.get("HISTFILE",
os.path.expanduser('~/.bash_history'))
def _get_history_line(self, command_script):
return u'{}\n'.format(command_script)
def how_to_configure(self):
if os.path.join(os.path.expanduser('~'), '.bashrc'):
config = '~/.bashrc'
elif os.path.join(os.path.expanduser('~'), '.bash_profile'):
config = '~/.bash_profile'
else:
config = 'bash config'
return self._create_shell_configuration(
content=u'eval $(thefuck --alias)',
path=config,
reload=u'source {}'.format(config))
| Python | 0.000001 |
0d8bfef0a629f6f8fb07415df21812eb1d458cde | Remove unnecessary lines after Android gyp fix Review URL: https://codereview.appspot.com/6353066 | gyp/bench.gyp | gyp/bench.gyp | # GYP file to build performance testbench.
#
{
'includes': [
'apptype_console.gypi',
],
'targets': [
{
'target_name': 'bench',
'type': 'executable',
'include_dirs' : [
'../src/core',
'../src/gpu',
],
'includes': [
'bench.gypi'
],
'dependencies': [
'core.gyp:core',
'effects.gyp:effects',
'gpu.gyp:gr',
'gpu.gyp:skgr',
'images.gyp:images',
'ports.gyp:ports',
'utils.gyp:utils',
'bench_timer',
],
},
{
'target_name' : 'bench_timer',
'type': 'static_library',
'sources': [
'../bench/BenchTimer.h',
'../bench/BenchTimer.cpp',
'../bench/BenchSysTimer_mach.h',
'../bench/BenchSysTimer_mach.cpp',
'../bench/BenchSysTimer_posix.h',
'../bench/BenchSysTimer_posix.cpp',
'../bench/BenchSysTimer_windows.h',
'../bench/BenchSysTimer_windows.cpp',
'../bench/BenchGpuTimer_gl.h',
'../bench/BenchGpuTimer_gl.cpp',
],
'include_dirs': [
'../src/core',
'../src/gpu',
],
'dependencies': [
'core.gyp:core',
'gpu.gyp:gr',
],
'conditions': [
[ 'skia_os != "mac"', {
'sources!': [
'../bench/BenchSysTimer_mach.h',
'../bench/BenchSysTimer_mach.cpp',
],
}],
[ 'skia_os not in ["linux", "freebsd", "openbsd", "solaris", "android"]', {
'sources!': [
'../bench/BenchSysTimer_posix.h',
'../bench/BenchSysTimer_posix.cpp',
],
}],
[ 'skia_os in ["linux", "freebsd", "openbsd", "solaris"]', {
'link_settings': {
'libraries': [
'-lrt',
],
},
}],
[ 'skia_os != "win"', {
'sources!': [
'../bench/BenchSysTimer_windows.h',
'../bench/BenchSysTimer_windows.cpp',
],
}],
],
}
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
| # GYP file to build performance testbench.
#
{
'includes': [
'apptype_console.gypi',
],
'targets': [
{
'target_name': 'bench',
'type': 'executable',
'include_dirs' : [
'../src/core',
'../src/gpu',
],
'includes': [
'bench.gypi'
],
'dependencies': [
'core.gyp:core',
'effects.gyp:effects',
'gpu.gyp:gr',
'gpu.gyp:skgr',
'images.gyp:images',
'ports.gyp:ports',
'utils.gyp:utils',
'bench_timer',
],
},
{
'target_name' : 'bench_timer',
'type': 'static_library',
'sources': [
'../bench/BenchTimer.h',
'../bench/BenchTimer.cpp',
'../bench/BenchSysTimer_mach.h',
'../bench/BenchSysTimer_mach.cpp',
'../bench/BenchSysTimer_posix.h',
'../bench/BenchSysTimer_posix.cpp',
'../bench/BenchSysTimer_windows.h',
'../bench/BenchSysTimer_windows.cpp',
'../bench/BenchGpuTimer_gl.h',
'../bench/BenchGpuTimer_gl.cpp',
],
'include_dirs': [
'../src/core',
'../src/gpu',
],
'dependencies': [
'core.gyp:core',
'gpu.gyp:gr',
],
'conditions': [
[ 'skia_os != "mac"', {
'sources!': [
'../bench/BenchSysTimer_mach.h',
'../bench/BenchSysTimer_mach.cpp',
],
}],
[ 'skia_os not in ["linux", "freebsd", "openbsd", "solaris", "android"]', {
'sources!': [
'../bench/BenchSysTimer_posix.h',
'../bench/BenchSysTimer_posix.cpp',
],
}],
[ 'skia_os in ["linux", "freebsd", "openbsd", "solaris"]', {
'link_settings': {
'libraries': [
'-lrt',
],
},
}],
[ 'skia_os != "win"', {
'sources!': [
'../bench/BenchSysTimer_windows.h',
'../bench/BenchSysTimer_windows.cpp',
],
}],
[ 'skia_os == "android"', {
'dependencies!': [
'android_system.gyp:Android_EntryPoint',
],
}],
],
}
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
| Python | 0.000264 |
235cc3a7529b36e11a7935e15c90f496210d7c31 | implement method for generating request signature | scup/auth.py | scup/auth.py | import hashlib
import time
def get_request_signature(private_key):
current_time = int(time.time())
message = '{}{}'.format(current_time, private_key)
digest = hashlib.md5(message).hexdigest()
return current_time, digest
| Python | 0 | |
5834f2e259834b325cf076b36af634dc6b64f442 | Add info if not parsed | intelmq/bots/parsers/generic/parser.py | intelmq/bots/parsers/generic/parser.py | from intelmq.lib.bot import Bot, sys
from intelmq.lib.message import Event
from intelmq.bots import utils
import re
class GenericBot(Bot):
# Generic parser, will simply parse and add named group to event
# for example if you have the regex :
# '^\s*(?P<ip>(?:(?:\d){1,3}\.){3}\d{1,3})'
# You will have an item 'ip' in your event.
def process(self):
report = self.receive_message()
self.logger.debug("Will apply regex %s" % self.parameters.regex)
if report:
rowcount = 0
for row in report.split('\n'): # For each line
self.logger.debug(self.parameters.regex)
event = Event()
match = re.search(self.parameters.regex, row)
if match:
for key in match.groupdict():
event.add(key, matchtuple[key])
else:
continue # skip lines without matching regex
rowcount += 1
# Get detail from parser parameters, will be nice to have it by
# source parameters.. Avoid adding if parsed
if not 'feed' in match.groupdict():
event.add('feed', self.parameters.feed)
if not 'feed_url' in match.groupdict():
event.add('feed_url', self.parameters.feed_url)
if not 'type' in match.groupdict():
event.add('type', self.parameters.type)
event = utils.parse_source_time(event, "source_time")
event = utils.generate_observation_time(event,
"observation_time")
event = utils.generate_reported_fields(event)
self.send_message(event)
self.logger.info("Processed %d event" % rowcount)
self.acknowledge_message()
if __name__ == "__main__":
bot = GenericBot(sys.argv[1])
bot.start()
| from intelmq.lib.bot import Bot, sys
from intelmq.lib.message import Event
from intelmq.bots import utils
import re
class GenericBot(Bot):
# Generic parser, will simply parse and add named group to event
# for example if you have the regex :
# '^\s*(?P<ip>(?:(?:\d){1,3}\.){3}\d{1,3})'
# You will have an item 'ip' in your event.
def process(self):
report = self.receive_message()
if report:
rowcount = 0
for row in report.split('\n'): # For each line
self.logger.debug(row)
self.logger.debug(self.parameters.regex)
event = Event()
match = re.search(self.parameters.regex, row)
if match:
for key in match.groupdict():
event.add(key, matchtuple[key])
else:
continue # skip lines without matching regex
rowcount += 1
# Get detail from parser parameters, will be nice to have it by
# source parameters..
event.add('feed', self.parameters.feed)
event.add('feed_url', self.parameters.feed_url)
event.add('type', self.parameters.type)
event = utils.parse_source_time(event, "source_time")
event = utils.generate_observation_time(event,
"observation_time")
event = utils.generate_reported_fields(event)
self.send_message(event)
self.logger.info("Processed %d event" % rowcount)
self.acknowledge_message()
if __name__ == "__main__":
bot = GenericBot(sys.argv[1])
bot.start()
| Python | 0 |
9b5590458463744597da1769694e826ed9c27414 | Comment failing doctests. | scikits/learn/utils/crossval.py | scikits/learn/utils/crossval.py | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD Style.
# $Id$
import exceptions
import numpy as np
def leave_one_out(n):
"""
Leave-One-Out cross validation:
Provides train/test indexes to split data in train test sets
Parameters
===========
n: int
Total number of elements
Examples
========
# commented doctest, see issue #34
# >>> import numpy as np
# >>> from scikits.learn.utils import crossval
# >>> n_samples, n_features = 5, 10
# >>> X = np.random.randn(n_samples, n_features)
# >>> loo = crossval.leave_one_out(n_samples)
# >>> for train_index, test_index in loo:
# ... print "TRAIN:", train_index, "TEST:", test_index
# ... Xtrain, Xtest, Ytrain, Ytest = split(train_index, test_index, X, y)
# ... print Xtrain, Xtest, Ytrain, Ytest
"""
for i in xrange(n):
test_index = np.zeros(n, dtype=np.bool)
test_index[i] = True
train_index = np.logical_not(test_index)
yield train_index, test_index
def k_fold(n, k):
"""
K-Folds cross validation:
Provides train/test indexes to split data in train test sets
Parameters
===========
n: int
Total number of elements
k: int
number of folds
Note
====
All the folds have size trunc(n/k), the last one has the complementary
"""
assert k>0, ValueError('cannot have k below 1')
assert k<n, ValueError('cannot have k=%d greater than %d'% (k, n))
j = np.ceil(n/k)
for i in xrange(k):
test_index = np.zeros(n, dtype=np.bool)
if i<k-1:
test_index[i*j:(i+1)*j] = True
else:
test_index[i*j:] = True
train_index = np.logical_not(test_index)
yield train_index, test_index
def split(train_indexes, test_indexes, *args):
"""
For each arg return a train and test subsets defined by indexes provided
in train_indexes and test_indexes
"""
ret = []
for arg in args:
arg_train = arg[train_indexes,:]
arg_test = arg[test_indexes,:]
ret.append(arg_train)
ret.append(arg_test)
return ret
if __name__ == "__main__":
print "Leave One Out crossvalidation"
n_samples, n_features = 4, 2
X = np.random.randn(n_samples, n_features)
y = np.random.randn(n_samples)
print X
loo = leave_one_out(n_samples)
for train_index, test_index in loo:
print "TRAIN:", train_index, "TEST:", test_index
Xtrain, Xtest, Ytrain, Ytest = split(train_index, test_index, X, y)
print Xtrain, Xtest, Ytrain, Ytest
print "K-Fold crossvalidation"
k = 2
kf = k_fold(n_samples, k)
for train_index, test_index in kf:
print "TRAIN:", train_index, "TEST:", test_index
| # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD Style.
# $Id$
import exceptions
import numpy as np
def leave_one_out(n):
"""
Leave-One-Out cross validation:
Provides train/test indexes to split data in train test sets
Parameters
===========
n: int
Total number of elements
Examples
========
>>> import numpy as np
>>> from scikits.learn.utils import crossval
>>> n_samples, n_features = 5, 10
>>> X = np.random.randn(n_samples, n_features)
>>> loo = crossval.leave_one_out(n_samples)
>>> for train_index, test_index in loo:
... print "TRAIN:", train_index, "TEST:", test_index
... Xtrain, Xtest, Ytrain, Ytest = split(train_index, test_index, X, y)
... print Xtrain, Xtest, Ytrain, Ytest
"""
for i in xrange(n):
test_index = np.zeros(n, dtype=np.bool)
test_index[i] = True
train_index = np.logical_not(test_index)
yield train_index, test_index
def k_fold(n, k):
"""
K-Folds cross validation:
Provides train/test indexes to split data in train test sets
Parameters
===========
n: int
Total number of elements
k: int
number of folds
Note
====
All the folds have size trunc(n/k), the last one has the complementary
"""
assert k>0, ValueError('cannot have k below 1')
assert k<n, ValueError('cannot have k=%d greater than %d'% (k, n))
j = np.ceil(n/k)
for i in xrange(k):
test_index = np.zeros(n, dtype=np.bool)
if i<k-1:
test_index[i*j:(i+1)*j] = True
else:
test_index[i*j:] = True
train_index = np.logical_not(test_index)
yield train_index, test_index
def split(train_indexes, test_indexes, *args):
"""
For each arg return a train and test subsets defined by indexes provided
in train_indexes and test_indexes
"""
ret = []
for arg in args:
arg_train = arg[train_indexes,:]
arg_test = arg[test_indexes,:]
ret.append(arg_train)
ret.append(arg_test)
return ret
if __name__ == "__main__":
print "Leave One Out crossvalidation"
n_samples, n_features = 4, 2
X = np.random.randn(n_samples, n_features)
y = np.random.randn(n_samples)
print X
loo = leave_one_out(n_samples)
for train_index, test_index in loo:
print "TRAIN:", train_index, "TEST:", test_index
Xtrain, Xtest, Ytrain, Ytest = split(train_index, test_index, X, y)
print Xtrain, Xtest, Ytrain, Ytest
print "K-Fold crossvalidation"
k = 2
kf = k_fold(n_samples, k)
for train_index, test_index in kf:
print "TRAIN:", train_index, "TEST:", test_index
| Python | 0 |
7490c39f958291cc99913d0f36581439d8efdf77 | Add a command to fix candidate image metadata | candidates/management/commands/candidates_fix_image_metadata.py | candidates/management/commands/candidates_fix_image_metadata.py | from PIL import Image
from hashlib import md5
import re
import requests
import sys
from StringIO import StringIO
from candidates.popit import PopItApiMixin, popit_unwrap_pagination
from candidates.update import fix_dates
from moderation_queue.views import PILLOW_FORMAT_MIME_TYPES
from django.core.management.base import BaseCommand
from slumber.exceptions import HttpClientError
def fix_image_mime_type(image):
mime_type = image.get('mime_type')
if mime_type:
return
try:
image_url = image['url']
r = requests.get(image_url)
pillow_image = Image.open(StringIO(r.content))
except IOError as e:
if 'cannot identify image file' in unicode(e):
print "Unknown image format in {0}".format(image_url)
return
raise
new_mime_type = PILLOW_FORMAT_MIME_TYPES[pillow_image.format]
image['mime_type'] = new_mime_type
print " Setting mime_type to", new_mime_type
def fix_image_metadata(image):
notes = image.get('notes', '')
# If the notes field has an MD5sum in it, then it was from the
# import PPC script, so move that to an md5sum field (as
# organization images have) and set the moderator_why_allowed to
# 'profile-photo'
m = re.search(r'^md5sum:([a-f0-9]+)', notes)
if m:
image['md5sum'] = m.group(1)
image['moderator_why_allowed'] = 'profile-photo'
image['notes'] = 'Scraped from the official party PPC page'
print " Migrated old PPC scraped image"
# If there is a 'why_allowed' and 'justification_for_use' field,
# this was from before we switched to separating the user's and
# moderator's reason for allowing the photo, so migrate those
# fields.
if image.get('why_allowed') and image.get('justification_for_use'):
why_allowed = image.pop('why_allowed')
justification_for_use = image.pop('justification_for_use')
image['moderator_why_allowed'] = why_allowed
image['user_why_allowed'] = why_allowed
image['user_justification_for_use'] = justification_for_use
print " Migrated from old why_allowed", why_allowed
print " Migrated from old justification_for_use", justification_for_use
def ensure_md5sum_present(image):
if image.get('md5sum'):
return
image_url = image['url']
# Otherwise get the file and calculate its MD5sum
r = requests.get(image_url)
md5sum = md5(r.content).hexdigest()
image['md5sum'] = md5sum
print " Setting md5sum field to", md5sum
def fix_image(image):
fix_image_mime_type(image)
fix_image_metadata(image)
ensure_md5sum_present(image)
class Command(PopItApiMixin, BaseCommand):
def handle(self, **options):
for person in popit_unwrap_pagination(
self.api.persons,
embed='',
per_page=100
):
msg = "Person {0}persons/{1}"
print msg.format(self.get_base_url(), person['id'])
for image in person.get('images', []):
print " Image with URL:", image['url']
fix_image(image)
image.pop('_id', None)
# Some images have an empty 'created' field, which
# causes an Elasticsearch indexing error, so remove
# that if it's the case:
if not image.get('created'):
image.pop('created', None)
fix_dates(person)
try:
self.api.persons(person['id']).put(person)
except HttpClientError as e:
print "HttpClientError", e.content
sys.exit(1)
| Python | 0.999987 | |
ae66cf3153f7285d3ff4430af79c380881b2eb32 | Add a very primitive clang based multifile 'delta'. - Interface is more or less like multidelta. | utils/token-delta.py | utils/token-delta.py | #!/usr/bin/env python
import os
import re
import subprocess
import sys
import tempfile
###
class DeltaAlgorithm(object):
def __init__(self):
self.cache = set()
def test(self, changes):
abstract
###
def getTestResult(self, changes):
# There is no reason to cache successful tests because we will
# always reduce the changeset when we see one.
changeset = frozenset(changes)
if changeset in self.cache:
return False
elif not self.test(changes):
self.cache.add(changeset)
return False
else:
return True
def run(self, changes, force=False):
# Make sure the initial test passes, if not then (a) either
# the user doesn't expect monotonicity, and we may end up
# doing O(N^2) tests, or (b) the test is wrong. Avoid the
# O(N^2) case unless user requests it.
if not force:
if not self.getTestResult(changes):
raise ValueError,'Initial test passed to delta fails.'
# Check empty set first to quickly find poor test functions.
if self.getTestResult(set()):
return set()
else:
return self.delta(changes, self.split(changes))
def split(self, S):
"""split(set) -> [sets]
Partition a set into one or two pieces.
"""
# There are many ways to split, we could do a better job with more
# context information (but then the API becomes grosser).
L = list(S)
mid = len(L)//2
if mid==0:
return L,
else:
return L[:mid],L[mid:]
def delta(self, c, sets):
# assert(reduce(set.union, sets, set()) == c)
# If there is nothing left we can remove, we are done.
if len(sets) <= 1:
return c
# Look for a passing subset.
res = self.search(c, sets)
if res is not None:
return res
# Otherwise, partition sets if possible; if not we are done.
refined = sum(map(list, map(self.split, sets)), [])
if len(refined) == len(sets):
return c
return self.delta(c, refined)
def search(self, c, sets):
for i,S in enumerate(sets):
# If test passes on this subset alone, recurse.
if self.getTestResult(S):
return self.delta(S, self.split(S))
# Otherwise if we have more than two sets, see if test
# pases without this subset.
if len(sets) > 2:
complement = sum(sets[:i] + sets[i+1:],[])
if self.getTestResult(complement):
return self.delta(complement, sets[:i] + sets[i+1:])
###
class Token:
def __init__(self, type, data, flags, file, line, column):
self.type = type
self.data = data
self.flags = flags
self.file = file
self.line = line
self.column = column
kTokenRE = re.compile(r"""([a-z_]+) '(.*)'\t(.*)\tLoc=<(.*):(.*):(.*)>""",
re.DOTALL | re.MULTILINE)
def getTokens(path):
p = subprocess.Popen(['clang','-dump-raw-tokens',path],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out,err = p.communicate()
tokens = []
collect = None
for ln in err.split('\n'):
# Silly programmers refuse to print in simple machine readable
# formats. Whatever.
if collect is None:
collect = ln
else:
collect = collect + '\n' + ln
if 'Loc=<' in ln and ln.endswith('>'):
ln,collect = collect,None
tokens.append(Token(*kTokenRE.match(ln).groups()))
return tokens
###
class TMBDDelta(DeltaAlgorithm):
def __init__(self, testProgram, tokenLists, log):
def patchName(name, suffix):
base,ext = os.path.splitext(name)
return base + '.' + suffix + ext
super(TMBDDelta, self).__init__()
self.testProgram = testProgram
self.tokenLists = tokenLists
self.tempFiles = [patchName(f,'tmp')
for f,_ in self.tokenLists]
self.targetFiles = [patchName(f,'ok')
for f,_ in self.tokenLists]
self.log = log
self.numTests = 0
def writeFiles(self, changes, fileNames):
assert len(fileNames) == len(self.tokenLists)
byFile = [[] for i in self.tokenLists]
for i,j in changes:
byFile[i].append(j)
for i,(file,tokens) in enumerate(self.tokenLists):
f = open(fileNames[i],'w')
for j in byFile[i]:
f.write(tokens[j])
f.close()
return byFile
def test(self, changes):
self.numTests += 1
byFile = self.writeFiles(changes, self.tempFiles)
if self.log:
print >>sys.stderr, 'TEST - ',
if self.log > 1:
for i,(file,_) in enumerate(self.tokenLists):
indices = byFile[i]
if i:
sys.stderr.write('\n ')
sys.stderr.write('%s:%d tokens: [' % (file,len(byFile[i])))
prev = None
for j in byFile[i]:
if prev is None or j != prev + 1:
if prev:
sys.stderr.write('%d][' % prev)
sys.stderr.write(str(j))
sys.stderr.write(':')
prev = j
if byFile[i]:
sys.stderr.write(str(byFile[i][-1]))
sys.stderr.write('] ')
else:
print >>sys.stderr, ', '.join(['%s:%d tokens' % (file, len(byFile[i]))
for i,(file,_) in enumerate(self.tokenLists)]),
p = subprocess.Popen([self.testProgram] + self.tempFiles)
res = p.wait() == 0
if res:
self.writeFiles(changes, self.targetFiles)
if self.log:
print >>sys.stderr, '=> %s' % res
else:
if res:
print '\nSUCCESS (%d tokens)' % len(changes)
else:
sys.stderr.write('.')
return res
def run(self):
res = super(TMBDDelta, self).run([(i,j)
for i,(file,tokens) in enumerate(self.tokenLists)
for j in range(len(tokens))])
self.writeFiles(res, self.targetFiles)
if not self.log:
print >>sys.stderr
return res
def tokenBasedMultiDelta(program, files, log):
# Read in the lists of tokens.
tokenLists = [(file, [t.data for t in getTokens(file)])
for file in files]
numTokens = sum([len(tokens) for _,tokens in tokenLists])
print "Delta on %s with %d tokens." % (', '.join(files), numTokens)
tbmd = TMBDDelta(program, tokenLists, log)
res = tbmd.run()
print "Finished %s with %d tokens (in %d tests)." % (', '.join(tbmd.targetFiles),
len(res),
tbmd.numTests)
def main():
from optparse import OptionParser, OptionGroup
parser = OptionParser("%prog <test program> {files+}")
parser.add_option("", "--debug", dest="debugLevel",
help="set debug level [default %default]",
action="store", type=int, default=0)
(opts, args) = parser.parse_args()
if len(args) <= 1:
parser.error('Invalid number of arguments.')
program,files = args[0],args[1:]
md = tokenBasedMultiDelta(program, files, log=opts.debugLevel)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print >>sys.stderr,'Interrupted.'
os._exit(1) # Avoid freeing our giant cache.
| Python | 0.003954 | |
d046968c5b16239b4ce3fbe17b6359339f3e7b9b | Add vcf convertor | utils/vcf_convertor.py | utils/vcf_convertor.py | #! -*- coding: utf-8 -*-
import re
import json
person_patten = re.compile(r'BEGIN:VCARD(.*?)END:VCARD', re.DOTALL)
fullname_patten = re.compile(r'FN:(.*?)\n')
mobile_patten = re.compile(r':\+*?(\d{9}\d*?)\n')
f = open(r'iCloud vCard.vcf')
fc = f.read()
people = person_patten.findall(fc)
names = {}
for p in people:
for i in fullname_patten.findall(p):
name = i
p = p.replace("-", "")
for i in mobile_patten.findall(p):
if len(i) == 13 and i[:2] == "86":
i = i[2:]
names[i] = name
fl = open("dump", "w")
fl.write(json.dumps(names))
fl.close()
| Python | 0.000001 | |
3a1b4ceb2ae989495d2453c612ac6645fdf59726 | Create cisco_vlan_extract.py | cisco/cisco_vlan_extract.py | cisco/cisco_vlan_extract.py | from ciscoconfparse import CiscoConfParse as ccp
def extract_vlan(vlans):
"""
Will convert ACTIVE vlans in the 'show vlan' command .....
switch#show vlan
VLAN Name Status Ports
---- -------------------------------- --------- -------------------------------
1 default active Fa0/48
2 AAAAA active
3 BBBBB active
4 CCCCC active Fa0/1, Fa0/2, Fa0/3, Fa0/4, Fa0/5, Fa0/6, Fa0/7
5 DDDDD active
6 EEEEE active
7 FFFFF active Fa0/25, Fa0/26, Fa0/27, Fa0/28, Fa0/29, Fa0/30
1002 fddi-default act/unsup
1003 token-ring-default act/unsup
1004 fddinet-default act/unsup
1005 trnet-default act/unsup
To configuration like this .....
vlan 2
name AAAAA
vlan 3
name BBBBB
vlan 4
name CCCCC
vlan 5
name DDDDD
vlan 6
name EEEEE
vlan 7
name FFFFF
"""
active_vlans = vlans.find_objects("active")
for i in active_vlans:
if not " ".join(i.text.split()[0:1]) == "1":
print("vlan", " ".join(i.text.split()[0:1]))
print(" name"," ".join(i.text.split()[1:2]))
extract_vlan(ccp("show_vlan.txt"))
| Python | 0.000049 | |
08d66a82ea47832654aa17f0323df6ce57691fcb | add setup.py | verdenskart/setup.py | verdenskart/setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name="bokeh-worldmap",
version="0.1.0",
packages=find_packages("src"),
package_data={},
package_dir={"": "src"},
entry_points={"console_scripts": []},
)
| Python | 0.000001 | |
d33bd223ec35712d0aa9e4ab3da83a19cf1a1120 | Create httpclient.py | httpclient.py | httpclient.py | #!/usr/bin/env python
# coding: utf-8
# Copyright 2013 Abram Hindle
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Do not use urllib's HTTP GET and POST mechanisms.
# Write your own HTTP GET and POST
# The point is to understand what you have to send and get experience with it
import sys
import socket
import re
# you may use urllib to encode data appropriately
import urllib
def help():
print "httpclient.py [GET/POST] [URL]\n"
class HTTPRequest(object):
def __init__(self, code=200, body=""):
self.code = code
self.body = body
class HTTPClient(object):
def get_host_port(self,url):
#sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
hostname = url.split('://')[1]
url2 = hostname.split('/')[0]
host = socket.gethostbyname(url2)
return (host,80)
# creates a socket connected to host via port
# REMEMBER TO CLOSE THE SOCKETS WHEN YOU USE THEM
def connect(self, host, port):
# use sockets!
# sew the sock
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
# put on the sock
sock.connect((host,port))
print 'connected to ' + host, port
return sock
def get_code(self, data):
return data.split()[1]
def get_headers(self,data):
data = data.split('\r\n\r\n')
data = data - data[-1]
return data
def get_body(self, data):
return data.split('\r\n\r\n')[-1]
# read everything from the socket
def recvall(self, sock):
buffer = bytearray()
done = False
while not done:
try:
part = sock.recv(1024)
if (part):
buffer.extend(part)
else:
done = not part
except:
return str(buffer)
return str(buffer)
# Perform an HTTP GET request
def GET(self, url, args=None):
code = 200
(http, uri) = re.split('://',url)
target = ""
hostname = ""
try:
hostname = uri.split('/')[0]
target = uri.split('/')[1]
except:
hostname = uri
target = ""
body = "GET /"+target+" HTTP/1.1 \r\nHost: "+hostname+" \r\n\r\n"
host = ""
port = 80
try:
(host,port) = self.get_host_port(url)
sock = self.connect(host,port)
sock.sendall(body)
buff = self.recvall(sock)
code = self.get_code(buff)
body = self.get_body(buff)
if len(buff) == 0:
code = 404
sock.close()
except:
code = 404
return HTTPRequest(code, body)
# Perform an HTTP POST request
def POST(self, url, args=None):
code = 200
(http, uri) = re.split('://',url)
target = ""
hostname = ""
try:
hostname = uri.split('/')[0]
target = uri.split('/')[1]
except:
hostname = uri
target = ""
body = "POST "+ target +" / HTTP/1.1 \r\n content-type:application/x-www-form-urlencoded;charset=utf-8 \r\n Host: www."+hostname+" \r\n "
try:
query = re.split('\?', target)
query = query[1]
body += len(query)+"\r\n" +query + '\r\n\r\n'
except:
body += "\r\n"
#sock_host = ""
host = ""
port = 80
try:
(host,port) = self.get_host_port(url)
sock = self.connect(host,port)
sock.sendall(body)
buff = self.recvall(sock)
code = self.get_code(buff)
body = self.get_body(buff)
if len(buff) == 0:
code = 404
sock.close()
except:
code = 404
return HTTPRequest(code, body)
def command(self, url, command="GET", args=None):
if (command == "POST"):
return self.POST( url, args )
else:
return self.GET( url, args )
if __name__ == "__main__":
client = HTTPClient()
command = "GET"
if (len(sys.argv) <= 1):
help()
sys.exit(1)
elif (len(sys.argv) == 3):
print client.command( sys.argv[1], sys.argv[2] )
else:
print client.command( command, sys.argv[1] )
| Python | 0.000001 | |
5bb7d25765655f83c42b5e7abc1093f7f85f7950 | bump version to 0.8.16 | mycroft/version/__init__.py | mycroft/version/__init__.py | # Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
import json
from genericpath import exists, isfile
from mycroft.util.log import getLogger
__author__ = 'augustnmonteiro'
# The following lines are replaced during the release process.
# START_VERSION_BLOCK
CORE_VERSION_MAJOR = 0
CORE_VERSION_MINOR = 8
CORE_VERSION_BUILD = 16
# END_VERSION_BLOCK
CORE_VERSION_STR = (str(CORE_VERSION_MAJOR) + "." +
str(CORE_VERSION_MINOR) + "." +
str(CORE_VERSION_BUILD))
LOG = getLogger(__name__)
class VersionManager(object):
__location = "/opt/mycroft/version.json"
@staticmethod
def get():
if (exists(VersionManager.__location) and
isfile(VersionManager.__location)):
try:
with open(VersionManager.__location) as f:
return json.load(f)
except:
LOG.error("Failed to load version from '%s'"
% VersionManager.__location)
return {"coreVersion": None, "enclosureVersion": None}
| # Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
import json
from genericpath import exists, isfile
from mycroft.util.log import getLogger
__author__ = 'augustnmonteiro'
# The following lines are replaced during the release process.
# START_VERSION_BLOCK
CORE_VERSION_MAJOR = 0
CORE_VERSION_MINOR = 8
CORE_VERSION_BUILD = 15
# END_VERSION_BLOCK
CORE_VERSION_STR = (str(CORE_VERSION_MAJOR) + "." +
str(CORE_VERSION_MINOR) + "." +
str(CORE_VERSION_BUILD))
LOG = getLogger(__name__)
class VersionManager(object):
__location = "/opt/mycroft/version.json"
@staticmethod
def get():
if (exists(VersionManager.__location) and
isfile(VersionManager.__location)):
try:
with open(VersionManager.__location) as f:
return json.load(f)
except:
LOG.error("Failed to load version from '%s'"
% VersionManager.__location)
return {"coreVersion": None, "enclosureVersion": None}
| Python | 0 |
bfc6083d9f08e33ec1f96fe66252915da4e1a0d8 | Test suite | minusconf_test.py | minusconf_test.py | #!/usr/bin/env python
import unittest
import minusconf
import socket
import time
class MinusconfUnitTest(unittest.TestCase):
def setUp(self):
sharp_s = chr(223)
self.svc1 = minusconf.Service('-conf-test-service', 'strangeport', 'some name')
self.svc2 = minusconf.Service('-conf-test-service' + sharp_s, 'strangeport', 'some name')
self.svc3 = minusconf.Service('-conf-test-service' + sharp_s, 'svcp3', 'svc3: sharp s = ' + sharp_s)
self.svc4 = minusconf.Service('-conf-test-service' + sharp_s, 'svcp4', 'svc4')
def testServiceMatching(self):
a = minusconf.Advertiser()
def assert_sm(stype, sname, expected):
self.assertEquals(set(a.services_matching(stype, sname)), set(expected))
assert_sm('', '', [])
a.services.append(self.svc1)
assert_sm(self.svc1.stype, self.svc1.sname, [self.svc1])
assert_sm(self.svc1.stype, '', [self.svc1])
a.services.append(self.svc2)
assert_sm(self.svc2.stype, self.svc2.sname, [self.svc2])
a.services.append(self.svc3)
assert_sm(self.svc3.stype, self.svc3.sname, [self.svc3])
assert_sm('', self.svc3.sname, [self.svc3])
assert_sm('', '', [self.svc1, self.svc2, self.svc3])
def testServiceRepresentation(self):
for reprfunc in (repr,str):
for svc in [self.svc1, self.svc2, self.svc3, self.svc4, minusconf.ServiceAt('a', 'b', 'c', 'd', 'e', 'f')]:
r = reprfunc(svc)
self.assertTrue(r.find(svc.stype) >= 0)
self.assertTrue(r.find(svc.port) >= 0)
self.assertTrue(r.find(svc.sname) >= 0)
def testRealExample(self):
a1 = minusconf.Advertiser([self.svc1])
a1.start()
a2 = minusconf.Advertiser([self.svc3])
a2.start()
self.assertEquals(self.svc2.stype, self.svc3.stype)
self.assertEquals(self.svc2.stype, self.svc4.stype)
a1.services.append(self.svc2)
a2.services.append(self.svc4)
# Wait for advertisers
time.sleep(0.5)
s = minusconf.Seeker(self.svc2.stype, timeout=0.5)
svc_eq = lambda svc, exp: (svc.sname == exp.sname and svc.stype == exp.stype and svc.port == exp.port)
svc_in = lambda svc, svcs: any((svc_eq(svc, s) for s in svcs))
s.find_callback = lambda seeker,svcat: self.assertTrue(svc_in(svcat, [self.svc2, self.svc3, self.svc4]))
s.error_callback = lambda seeker,errorstr: self.fail('Got error ' + repr(errorstr) + ' from ' + repr(seeker))
s.run()
self.assertTrue(not svc_in(self.svc1, s.results))
self.assertTrue(svc_in(self.svc2, s.results))
self.assertTrue(svc_in(self.svc3, s.results))
self.assertTrue(svc_in(self.svc4, s.results))
def testInetPton(self):
bts = minusconf._compat_bytes
testVals = [
(socket.AF_INET, '1.2.3.4', bts('\x01\x02\x03\x04')),
(socket.AF_INET, '255.254.253.252', bts('\xff\xfe\xfd\xfc')),
(socket.AF_INET6, '::', bts('\x00')*16),
(socket.AF_INET6, '::1', bts('\x00')*15 + bts('\x01')),
(socket.AF_INET6, '100::', bts('\x01') + bts('\x00')*15),
(socket.AF_INET6, '0100::', bts('\x01') + bts('\x00')*15),
(socket.AF_INET6, '1000::', bts('\x10') + bts('\x00')*15),
(socket.AF_INET6, 'ff25::12:2:254.232.3.4', bts('\xff\x25\x00\x00\x00\x00\x00\x00\x00\x12\x00\x02\xfe\xe8\x03\x04')),
(socket.AF_INET6, 'ffff:2:3:4:ffff::', bts('\xff\xff\x00\x02\x00\x03\x00\x04\xff\xff') + bts('\x00') * 6),
]
invalidVals = [
(socket.AF_INET, '1.2.3'),
(socket.AF_INET, '1.2.3.4.5'),
(socket.AF_INET, '301.2.2.2'),
(socket.AF_INET, '::1.2.2.2'),
(socket.AF_INET6, '1:2:3:4:5:6:7'),
(socket.AF_INET6, '1:2:3:4:5:6:7:'),
(socket.AF_INET6, ':2:3:4:5:6:7:8'),
(socket.AF_INET6, '1:2:3:4:5:6:7:8:9'),
(socket.AF_INET6, '1:2:3:4:5:6:7:8:'),
(socket.AF_INET6, '1::3:4:5:6::8'),
(socket.AF_INET6, 'a:'),
(socket.AF_INET6, ':'),
(socket.AF_INET6, ':::'),
(socket.AF_INET6, '::a:'),
(socket.AF_INET6, ':a::'),
(socket.AF_INET6, '1ffff::'),
(socket.AF_INET6, '0xa::'),
(socket.AF_INET6, '1:2:3:4:5:6:300.2.3.4'),
(socket.AF_INET6, '1:2:3:4:5:6:1a.2.3.4'),
(socket.AF_INET6, '1:2:3:4:5:1.2.3.4:8'),
]
for ptonf in (minusconf._inet_pton, minusconf._compat_inet_pton):
for (family, arg, expected) in testVals:
self.assertEquals(ptonf(family, arg), expected)
for (family, arg) in invalidVals:
self.assertRaises((ValueError, socket.error), ptonf, family, arg)
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 | |
32a1781bb5ba4f143e5910fbd841ca6aeeebc8fe | Add test script for color histogram matcher | jsk_2015_05_baxter_apc/node_scripts/test_color_histogram_matcher.py | jsk_2015_05_baxter_apc/node_scripts/test_color_histogram_matcher.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""This script is to test color histogram & its matcher
Usage
-----
$ # to extract color histogram
$ roslaunch jsk_2014_picking_challenge extract_color_histogram.launch
input_image:=/test_color_histogram/train_image
$ rosrun jsk_2014_picking_challenge test_color_histogram.py --extract
$ # to test color histogram matcher
$ roslaunch jsk_2014_picking_challenge \
test_color_histogram_matching.launch
$ rosrun jsk_2014_picking_challenge test_color_histogram.py --test
"""
from __future__ import division
import os
import argparse
import numpy as np
import rospy
from jsk_2014_picking_challenge.srv import ObjectMatch, StringEmpty
from extract_color_histogram import ExtractColorHistogram
from matcher_common import listdir_for_img
from test_object_matching import TestObjectMatching
def get_nations():
data_dir = os.path.join(os.path.dirname(__file__),
'../data/national_flags')
return os.listdir(data_dir)
def get_data_dirs():
data_dir = os.path.join(os.path.dirname(__file__),
'../data/national_flags')
for nation in get_nations():
yield os.path.join(data_dir, nation)
def prepare_train_data():
for data_dir in get_data_dirs():
nation_nm = os.path.basename(data_dir)
raw_paths = map(lambda x: os.path.join(data_dir, x),
listdir_for_img(data_dir))
for color in ['red', 'blue', 'green']:
extractor = ExtractColorHistogram(object_nm=nation_nm,
color=color, raw_paths=raw_paths)
extractor.extract_and_save()
def test():
client_of_matcher = rospy.ServiceProxy('/semi/color_histogram_matcher',
ObjectMatch)
client_of_img = rospy.ServiceProxy('/image_publish_server', StringEmpty)
nations = np.array(get_nations())
for i, target_obj in enumerate(nations):
# request to publish image
rospy.loginfo('target: {}'.format(target_obj))
imgpath = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../data/national_flags/{0}/{0}.png'.format(target_obj))
client_of_img(string=imgpath)
rospy.sleep(3)
# request to object matcher
probs = client_of_matcher(objects=nations).probabilities
probs = np.array(probs)
rospy.loginfo('correct?: {}'.format(probs.argmax() == i))
rospy.loginfo('similar: {}'.format(
nations[probs.argsort()][::-1][:3]))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--extract', action='store_true',
help='flag to extract color histogram')
parser.add_argument('-t', '--test', action='store_true',
help='flag to test color histogram matcher')
args = parser.parse_args(rospy.myargv()[1:])
flags = dict(args._get_kwargs()).values()
if not any(flags) or all(flags):
print('either -e or -t should be set (both is not allowed)')
parser.print_help()
parser.exit()
return args
def main():
args = parse_args()
if args.extract:
prepare_train_data()
elif args.test:
test()
else:
rospy.logerr('Unknown args')
if __name__ == '__main__':
rospy.init_node('test_color_histogram_matcher')
main()
| Python | 0 | |
3088fcd2d42b4e59601c103cc01cec1d949f6f57 | Improve OldPersian | ielex/lexicon/migrations/0093_fix_oldPersian.py | ielex/lexicon/migrations/0093_fix_oldPersian.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def forwards_func(apps, schema_editor):
'''
OldPersian doesn't have lexemes for some meanings.
This migration generates them.
'''
# Models to work with:
Language = apps.get_model('lexicon', 'Language')
MeaningList = apps.get_model('lexicon', 'MeaningList')
Lexeme = apps.get_model('lexicon', 'Lexeme')
# Data to work with:
target = Language.objects.get(ascii_name='OldPersian')
# Mapping meaning.id -> Lexeme
mIdLexemeMap = {}
for l in Lexeme.objects.filter(language=target).all():
mIdLexemeMap[l.meaning_id] = l
# Searching for missing lexemes:
mList = MeaningList.objects.get(name='Jena200')
for m in mList.meanings.all():
if m.id not in mIdLexemeMap:
Lexeme.objects.create(
meaning=m,
language=target)
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [('lexicon', '0092_set_cjc_reliabilities_high')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
| Python | 0 | |
6516b73210a575376bc78005ae28c0e843303b24 | add theano how-to-perform | Theano/how-to-perform-stencil-computations-element-wise-on-a-matrix-in-theano.py | Theano/how-to-perform-stencil-computations-element-wise-on-a-matrix-in-theano.py | import numpy as np
import theano
import theano.tensor as T
from theano.tensor.nnet import conv2d
# original image 3D (3x3x4) (RGB Channel, height, width)
img = [[[1, 2, 3, 4],
[1, 1, 3, 1],
[1, 3, 1, 1]],
[[2, 2, 3, 4],
[2, 2, 3, 2],
[2, 3, 2, 2]],
[[3, 2, 3, 4],
[3, 3, 3, 3],
[3, 3, 3, 3]]]
# separate and reshape each channel to 4D
# separated because convolution works on each channel only
R = np.asarray([[img[0]]], dtype='float32')
G = np.asarray([[img[1]]], dtype='float32')
B = np.asarray([[img[2]]], dtype='float32')
# 4D kernel from the original : [1,0,1]
# rotated because convolution works only on column
kernel = np.asarray([[[[1],[0],[1]]]], dtype='float32')
# theano convolution
t_img = T.ftensor4("t_img")
t_kernel = T.ftensor4("t_kernel")
result = conv2d(
input = t_img,
filters=t_kernel,
filter_shape=(1,1,1,3),
border_mode = 'half')
f = theano.function([t_img,t_kernel],result)
# compute each channel
R = f(R,kernel)
G = f(G,kernel)
B = f(B,kernel)
# merge and reshape again
img = np.asarray([R,G,B])
img = np.reshape(img,(3,3,4))
print img | Python | 0.000071 | |
a99f0678815c2e998c25a0aaf9f2c79ad0d18610 | Add package 'ui' | source/ui/__init__.py | source/ui/__init__.py | # -*- coding: utf-8 -*-
## \package ui
# MIT licensing
# See: LICENSE.txt
| Python | 0.000034 | |
00b995719aaf11c2d7c3126e29b94b74f0edf8d2 | add test | osf_tests/test_downloads_summary.py | osf_tests/test_downloads_summary.py | # encoding: utf-8
import mock
import pytest
import pytz
import datetime
from django.utils import timezone
from addons.osfstorage import utils
from addons.osfstorage.tests.utils import StorageTestCase
from osf_tests.factories import ProjectFactory
from scripts.analytics.download_count_summary import DownloadCountSummary
@pytest.mark.django_db
class TestDownloadCount(StorageTestCase):
def test_download_count(self):
# Keen does not allow same day requests so we have to do some time traveling to my birthday
timezone.now = mock.Mock(return_value=datetime.datetime(1991, 9, 25).replace(tzinfo=pytz.utc))
node = ProjectFactory()
utils.update_analytics(node, 'fake id', {'contributors': node.contributors})
# Now back to the future, querying old date.
timezone.now = mock.Mock(return_value=datetime.datetime.now().replace(tzinfo=pytz.utc))
query_date = datetime.date(1991, 9, 25)
event = DownloadCountSummary().get_events(query_date)
assert event[0]['files']['total'] == 1
| Python | 0.000002 | |
d764a483497afc5d029a82db14cc5cc88f45f4c0 | Add an extension to allow for an addFixedIp action on instances | nova/api/openstack/contrib/multinic.py | nova/api/openstack/contrib/multinic.py | # Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The multinic extension."""
from webob import exc
from nova import compute
from nova import log as logging
from nova.api.openstack import extensions
from nova.api.openstack import faults
LOG = logging.getLogger("nova.api.multinic")
class Multinic(extensions.ExtensionDescriptor):
def __init__(self, *args, **kwargs):
super(Multinic, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def get_name(self):
return "Multinic"
def get_alias(self):
return "NMN"
def get_description(self):
return "Multiple network support"
def get_namespace(self):
return "http://docs.openstack.org/ext/multinic/api/v1.1"
def get_updated(self):
return "2011-06-09T00:00:00+00:00"
def get_actions(self):
actions = []
# Add the add_fixed_ip action
act = extensions.ActionExtension("servers", "addFixedIp",
self._add_fixed_ip)
actions.append(act)
# Add the remove_fixed_ip action
act = extensions.ActionExtension("servers", "removeFixedIp",
self._remove_fixed_ip)
actions.append(act)
return actions
def _add_fixed_ip(self, input_dict, req, id):
"""Adds an IP on a given network to an instance."""
try:
# Validate the input entity
if 'networkId' not in input_dict['addFixedIp']:
LOG.exception(_("Missing 'networkId' argument for addFixedIp"))
return faults.Fault(exc.HTTPUnprocessableEntity())
# Add the fixed IP
network_id = input_dict['addFixedIp']['networkId']
self.compute_api.add_fixed_ip(req.environ['nova.context'], id,
network_id)
except Exception, e:
LOG.exception(_("Error in addFixedIp %s"), e)
return faults.Fault(exc.HTTPBadRequest())
return exc.HTTPAccepted()
def _remove_fixed_ip(self, input_dict, req, id):
# Not yet implemented
raise faults.Fault(exc.HTTPNotImplemented())
| Python | 0 | |
c13d1347889cf574d3e6b9b835dadbca5fdc2d6c | Add wheel module for the salt key system | salt/wheel/key.py | salt/wheel/key.py | '''
Wheel system wrapper for key system
'''
import salt.key
def list_all():
'''
List the keys under a named status
'''
skey = salt.key.Key(__opts__)
return skey.list_all()
def accept(match):
'''
Accept keys based on a glob match
'''
skey = salt.key.Key(__opts__)
return skey.accept(match)
def delete(match):
'''
Delete keys based on a glob match
'''
skey = salt.key.Key(__opts__)
return skey.delete(match)
def reject(match):
'''
Delete keys based on a glob match
'''
skey = salt.key.Key(__opts__)
return skey.reject(match)
def key_str(match):
'''
Return the key strings
'''
skey = salt.key.Key(__opts__)
return skey.key_str(match)
def finger(match):
'''
Return the matching key fingerprints
'''
skey = salt.key.Key(__opts__)
return skey.finger(match)
| Python | 0 | |
c0ebb74ad0ee2eb210266e3610e0b44474628872 | add ismount function from python Lib/posixpath.py | lib/ansible/module_utils/ismount.py | lib/ansible/module_utils/ismount.py | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is based on
# Lib/posixpath.py of cpython
# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python alone or in any derivative version,
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved"
# are retained in Python alone or in any derivative version prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
import os
def ismount(path):
"""Test whether a path is a mount point
clone of os.path.ismount (from cpython Lib/posixpath.py)
fixed to solve https://github.com/ansible/ansible-modules-core/issues/2186
and workaround non-fixed http://bugs.python.org/issue2466
this should be rewritten as soon as python issue 2466 is fixed
probably check for python version and use os.path.ismount if fixed
to remove replace in this file ismount( -> os.path.ismount( and remove this
function"""
try:
s1 = os.lstat(path)
except OSError:
# the OSError should be handled with more care
# it could be a "permission denied" but path is still a mount
return False
else:
# A symlink can never be a mount point
if os.stat.S_ISLNK(s1.st_mode):
return False
parent = os.path.join(path, os.path.pardir)
parent = os.path.realpath(parent)
try:
s2 = os.lstat(parent)
except OSError:
# one should handle the returned OSError with more care to figure
# out whether this is still a mount
return False
if s1.st_dev != s2.st_dev:
return True # path/.. on a different device as path
if s1.st_ino == s2.st_ino:
return True # path/.. is the same i-node as path, i.e. path=='/'
return False
| Python | 0.000004 | |
95a8ed6dcb19f322c9a14957da207efb8be10f5d | Customize makemessages to support ignoring fuzzy | hqscripts/management/commands/makemessages.py | hqscripts/management/commands/makemessages.py | from django.core.management.commands import makemessages
class Command(makemessages.Command):
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument('--no-fuzzy', action='store_true', help='Remove fuzzy strings.')
def handle(self, *args, **options):
no_fuzzy = options['no_fuzzy']
if no_fuzzy:
# The underlying parser only passes custom msgattrib_options if '--no-obsolete' is true,
# so we have to do a bit of hacking here
no_obsolete = options['no_obsolete']
if no_obsolete:
# If we are removing obsolete messages already, just add in removing fuzzy messages
self.msgattrib_options += ['--no-fuzzy']
else:
# Otherwise, we need to fake obsolete messages while only actually removing fuzzy messages
options['no_obsolete'] = True
self.msgattrib_options = ['--no-fuzzy']
super().handle(*args, **options)
| Python | 0 | |
1bd6d53c7ab8d7b2c2fdfbb8eb2fab2e1cfa1537 | Implement statistics & logger class | mugloar/logger.py | mugloar/logger.py | from datetime import datetime
from tabulate import tabulate
import sys
RED = "\033[1;31m"
BLUE = "\033[1;34m"
CYAN = "\033[1;36m"
GREEN = "\033[0;32m"
RESET = "\033[0;0m"
BOLD = "\033[;1m"
class Logger:
stats = {'NMR': {'win': 0, 'lose': 0},
'FUNDEFINEDG': {'win': 0, 'lose': 0},
'HVA': {'win': 0, 'lose': 0},
'SRO': {'win': 0, 'lose': 0},
'T E': {'win': 0, 'lose': 0}}
weather_code = ""
def new_game(self, params):
text = '------------------------------------------\n' + \
time() + 'Started game id ' + str(params['gameId']) + ' against ' + params['knight']['name'] + \
' (\u2694: ' + str(params['knight']['attack']) + ', ' + \
'\u26E8: ' + str(params['knight']['armor']) + ', ' + \
'\N{RUNNER}: ' + str(params['knight']['agility']) + ', ' + \
'\N{RAM}: ' + str(params['knight']['endurance']) + ')\n'
sys.stdout.buffer.write(text.encode('utf8'))
def dragon(self, dragon, weather):
self.weather_code = weather['code']
text = time() + 'Sending dragon (\u26E8: ' + str(dragon.scaleThickness) + ', ' + \
'\u2694: ' + str(dragon.clawSharpness) + ', ' + \
'\N{DRAGON}: ' + str(dragon.wingStrength) + ', ' + \
'\N{FIRE}: ' + str(dragon.fireBreath) + \
') in ' + weather['code'] + ' weather.\n'
sys.stdout.buffer.write(text.encode('utf8'))
def result(self, result):
print(time(), end='')
if result['status'] == 'Victory':
sys.stdout.write(GREEN)
self.stats[self.weather_code]['win'] += 1
else:
sys.stdout.write(RED)
self.stats[self.weather_code]['lose'] += 1
print(result['status'], end='')
sys.stdout.write(RESET)
print(': ' + result['message'])
def print_stats(self):
print('------------------------------------------\n' +
'STATISTICS\n' +
'Iterations: ' + '\n\n' +
'By weather type:')
table = []
for weather_code, stat in self.stats.items():
table.append([weather_code, str(stat['win'] + stat['lose']), str(stat['win']), str(stat['lose']), str(survival_rate(stat['win'], stat['lose']))])
print(tabulate(table, headers=['Weather', 'Battles', 'Wins', 'Losses', 'Survival rate']))
print('\n\nOVERALL SUCCESS RATE: ', end='')
success_rate = 0
if success_rate < 60:
sys.stdout.write(RED)
else:
sys.stdout.write(GREEN)
print(str(success_rate) + '%')
def survival_rate(wins, losses):
total = wins + losses
if total == 0:
return '-'
if wins == 0:
return '0%'
rate = wins/total
return '{0:g}'.format(rate*100) + '%'
def time():
return '[' + datetime.now().strftime('%d.%m.%Y %H:%m:%S') + '] ' | Python | 0 | |
597a1c12223fec5deefcd31b3a00b06d1095b32d | Add check replication step | dbaas/workflow/steps/util/region_migration/check_replication.py | dbaas/workflow/steps/util/region_migration/check_replication.py | # -*- coding: utf-8 -*-
import logging
from util import full_stack
from workflow.steps.util.base import BaseStep
from workflow.exceptions.error_codes import DBAAS_0020
from time import sleep
LOG = logging.getLogger(__name__)
class CheckReplication(BaseStep):
def __unicode__(self):
return "Checking replication..."
def do(self, workflow_dict):
try:
databaseinfra = workflow_dict['databaseinfra']
driver = databaseinfra.get_driver()
instance = workflow_dict['source_instances'][0].future_instance
for attempt in range(0, 21):
LOG.info("Waiting 10s to check replication...")
sleep(10)
if driver.is_replication_ok(instance):
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
return True
| Python | 0 | |
fb6c84e7703092f495324fe57041717403803e7f | Add scrape_symbols.py placeholder. | scrape_symbols.py | scrape_symbols.py | #!/usr/bin/env python
# encoding: utf-8
def main():
pass
if __name__ == '__main__':
main()
| Python | 0 | |
fe479bf2a8ec547922c6643bbdf0ba768eb79c9d | Add script to simulate multiple games | ludo/simulator.py | ludo/simulator.py | #!/usr/bin/env python3
from game import Game
print("Welcome to a game of ludo!")
average_throw_counter = 0
min_throws_per_game = 10000000
max_throws_per_game = 0
NUM_GAMES = 100
for i in range(0, NUM_GAMES):
game = Game()
throw_counter = 0
while game.next_move():
throw_counter += 1
average_throw_counter += throw_counter
if throw_counter < min_throws_per_game:
min_throws_per_game = throw_counter
if throw_counter > max_throws_per_game:
max_throws_per_game = throw_counter
print("Game:", i+1)
print("Average throws:", average_throw_counter/NUM_GAMES)
print("Min", min_throws_per_game)
print("Max", max_throws_per_game)
| Python | 0 | |
b5b21a151b219ae5f9a017ea0bda95c1d0be92ca | Fix Csv validation | tools/telemetry/telemetry/csv_page_benchmark_results.py | tools/telemetry/telemetry/csv_page_benchmark_results.py | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page_benchmark_results import PageBenchmarkResults
class CsvPageBenchmarkResults(PageBenchmarkResults):
def __init__(self, results_writer, output_after_every_page):
super(CsvPageBenchmarkResults, self).__init__()
self._results_writer = results_writer
self._did_output_header = False
self._header_names_written_to_writer = None
self._output_after_every_page = output_after_every_page
def DidMeasurePage(self):
assert self.values_for_current_page, 'Failed to call WillMeasurePage'
if not self._output_after_every_page:
super(CsvPageBenchmarkResults, self).DidMeasurePage()
return
if not self._did_output_header:
self._OutputHeader()
else:
self._ValidateOutputNamesForCurrentPage()
self._OutputValuesForPage(self.values_for_current_page)
super(CsvPageBenchmarkResults, self).DidMeasurePage()
def PrintSummary(self, trace_tag):
if not self._output_after_every_page:
self._OutputHeader()
for page_values in self.all_values_for_all_pages:
self._OutputValuesForPage(page_values)
super(CsvPageBenchmarkResults, self).PrintSummary(trace_tag)
def _ValidateOutputNamesForCurrentPage(self):
assert self._did_output_header
current_page_measurement_names = \
set(self.values_for_current_page.measurement_names)
header_names_written_to_writer = \
set(self._header_names_written_to_writer)
if header_names_written_to_writer == current_page_measurement_names:
return
assert False, """To use CsvPageBenchmarkResults, you must add the same
result names for every page. In this case, first page output:
%s
Thus, all subsequent pages must output this as well. Instead, the current page
output:
%s
Change your test to produce the same thing each time, or modify
MultiPageBenchmark.results_are_the_same_on_every_page to return False.
""" % (repr(header_names_written_to_writer),
repr(current_page_measurement_names))
def _OutputHeader(self):
assert not self._did_output_header
all_measurement_names = list(
self.all_measurements_that_have_been_seen.keys())
all_measurement_names.sort()
self._did_output_header = True
self._header_names_written_to_writer = list(all_measurement_names)
row = ['url']
for measurement_name in all_measurement_names:
measurement_data = \
self.all_measurements_that_have_been_seen[measurement_name]
row.append('%s (%s)' % (measurement_name, measurement_data['units']))
self._results_writer.writerow(row)
def _OutputValuesForPage(self, page_values):
row = [page_values.page.url]
for measurement_name in self._header_names_written_to_writer:
value = page_values.FindValueByMeasurementName(measurement_name)
if value:
row.append('%s' % value.output_value)
else:
row.append('-')
self._results_writer.writerow(row)
| # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page_benchmark_results import PageBenchmarkResults
class CsvPageBenchmarkResults(PageBenchmarkResults):
def __init__(self, results_writer, output_after_every_page):
super(CsvPageBenchmarkResults, self).__init__()
self._results_writer = results_writer
self._did_output_header = False
self._header_names_written_to_writer = None
self._output_after_every_page = output_after_every_page
def DidMeasurePage(self):
assert self.values_for_current_page, 'Failed to call WillMeasurePage'
if not self._output_after_every_page:
super(CsvPageBenchmarkResults, self).DidMeasurePage()
return
if not self._did_output_header:
self._OutputHeader()
else:
self._ValidateOutputNamesForCurrentPage()
self._OutputValuesForPage(self.values_for_current_page)
super(CsvPageBenchmarkResults, self).DidMeasurePage()
def PrintSummary(self, trace_tag):
if not self._output_after_every_page:
self._OutputHeader()
for page_values in self.all_values_for_all_pages:
self._OutputValuesForPage(page_values)
super(CsvPageBenchmarkResults, self).PrintSummary(trace_tag)
def _ValidateOutputNamesForCurrentPage(self):
assert self._did_output_header
current_page_measurement_names = \
self.values_for_current_page.measurement_names
if self._header_names_written_to_writer == current_page_measurement_names:
return
assert False, """To use CsvPageBenchmarkResults, you must add the same
result names for every page. In this case, first page output:
%s
Thus, all subsequent pages must output this as well. Instead, the current page
output:
%s
Change your test to produce the same thing each time, or modify
MultiPageBenchmark.results_are_the_same_on_every_page to return False.
""" % (repr(self._header_names_written_to_writer),
repr(current_page_measurement_names))
def _OutputHeader(self):
assert not self._did_output_header
all_measurement_names = list(
self.all_measurements_that_have_been_seen.keys())
all_measurement_names.sort()
self._did_output_header = True
self._header_names_written_to_writer = list(all_measurement_names)
row = ['url']
for measurement_name in all_measurement_names:
measurement_data = \
self.all_measurements_that_have_been_seen[measurement_name]
row.append('%s (%s)' % (measurement_name, measurement_data['units']))
self._results_writer.writerow(row)
def _OutputValuesForPage(self, page_values):
row = [page_values.page.url]
for measurement_name in self._header_names_written_to_writer:
value = page_values.FindValueByMeasurementName(measurement_name)
if value:
row.append('%s' % value.output_value)
else:
row.append('-')
self._results_writer.writerow(row)
| Python | 0.000011 |
a8f172752a72d93537820322b9ce62b601be6c5f | Fix cpplint warning. | script/cpplint.py | script/cpplint.py | #!/usr/bin/env python
import fnmatch
import os
import subprocess
import sys
IGNORE_FILES = [
'browser/atom_application_mac.h',
'browser/atom_application_delegate_mac.h',
'browser/native_window_mac.h',
'browser/resources/win/resource.h',
'browser/ui/cocoa/event_processing_window.h',
'browser/ui/cocoa/atom_menu_controller.h',
'browser/ui/cocoa/nsalert_synchronous_sheet.h',
'common/api/api_messages.cc',
'common/api/api_messages.h',
'common/atom_version.h',
'common/swap_or_assign.h',
]
SOURCE_ROOT = os.path.dirname(os.path.dirname(__file__))
def main():
os.chdir(SOURCE_ROOT)
files = list_files(['app', 'browser', 'common', 'renderer'],
['*.cc', '*.h'])
call_cpplint(list(set(files) - set(IGNORE_FILES)))
def list_files(directories, filters):
matches = []
for directory in directories:
for root, _, filenames, in os.walk(directory):
for f in filters:
for filename in fnmatch.filter(filenames, f):
matches.append(os.path.join(root, filename))
return matches
def call_cpplint(files):
cpplint = os.path.join(SOURCE_ROOT, 'vendor', 'depot_tools', 'cpplint.py')
rules = '--filter=-build/header_guard,-build/include_what_you_use'
subprocess.check_call([sys.executable, cpplint, rules] + files)
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/env python
import fnmatch
import os
import subprocess
import sys
IGNORE_FILES = [
'app/win/resource.h',
'browser/atom_application_mac.h',
'browser/atom_application_delegate_mac.h',
'browser/native_window_mac.h',
'browser/ui/cocoa/event_processing_window.h',
'browser/ui/cocoa/atom_menu_controller.h',
'browser/ui/cocoa/nsalert_synchronous_sheet.h',
'common/api/api_messages.cc',
'common/api/api_messages.h',
'common/atom_version.h',
'common/swap_or_assign.h',
]
SOURCE_ROOT = os.path.dirname(os.path.dirname(__file__))
def main():
os.chdir(SOURCE_ROOT)
files = list_files(['app', 'browser', 'common', 'renderer'],
['*.cc', '*.h'])
call_cpplint(list(set(files) - set(IGNORE_FILES)))
def list_files(directories, filters):
matches = []
for directory in directories:
for root, _, filenames, in os.walk(directory):
for f in filters:
for filename in fnmatch.filter(filenames, f):
matches.append(os.path.join(root, filename))
return matches
def call_cpplint(files):
cpplint = os.path.join(SOURCE_ROOT, 'vendor', 'depot_tools', 'cpplint.py')
rules = '--filter=-build/header_guard,-build/include_what_you_use'
subprocess.check_call([sys.executable, cpplint, rules] + files)
if __name__ == '__main__':
sys.exit(main())
| Python | 0 |
334aa288fc38636f10e25b0d8ab4ecb91d198c9b | Add example SNP analysis script. | examples/nature_protocols/phylogeny/summarize_heterozygosity.py | examples/nature_protocols/phylogeny/summarize_heterozygosity.py | #!/usr/bin/python
#
# Copyright (c) 2012 Mikkel Schubert <MSchubert@snm.ku.dk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import sys
import pysam
from pypeline.common.vcfwrap import \
get_ml_genotype
import pypeline.common.timer as timer
def read_bed_records(filename):
"""Reads a bed-file (i.e. for a set of regions of interest), and returns
a sorted list containing each line as a tuple containing the contig name,
the start position, and the end position."""
regions = []
bed_parser = pysam.asBed()
with open(filename) as bed_file:
for line in bed_file:
line = line.strip()
if not line or line.startswith('#'):
continue
regions.append(bed_parser(line, len(line)))
return regions
def select_vcf_records(bed_records, vcf_records):
"""Returns an iterable of VCF records, corresponding to the contents of each
region specified by the BED records. Records are returned at most once, even
if covered by multiple BED records."""
contigs = frozenset(vcf_records.contigs)
vcf_parser = pysam.asVCF()
# Timer class used processing progress; meant primarily for BAM files
progress = timer.BAMTimer(None)
# Cache of positions observed for this contig, to prevent returning
# positions in overlapping regions multiple times
contig_cache = None
contig_cache_name = None
for bed in sorted(bed_records):
if bed.contig not in contigs:
# Skip contigs for which no calls have been made (e.g. due to
# low coverage. Otherwise Pysam raises an exception.
continue
elif contig_cache_name != bed.contig:
# Reset cache per contig, to save memory
contig_cache = set()
contig_cache_name = bed.contig
for record in vcf_records.fetch(bed.contig, bed.start, bed.end, parser = vcf_parser):
progress.increment()
if record.pos in contig_cache:
# We've already reported this VCF record
continue
contig_cache.add(record.pos)
# Skip records filtered by VCF_filter
if record.filter in ('.', "PASS"):
yield record
progress.finalize()
def main(argv):
if len(argv) != 2:
sys.stderr.write("Usage: %s <BED-file> <VCF.bgz>\n")
return 1
sites = 0
sites_non_ref = 0
sites_homo_non_ref = 0
sites_het_one_non_ref = 0
sites_het_two_non_ref = 0
vcf_records = pysam.Tabixfile(argv[1])
bed_records = read_bed_records(argv[0])
for record in select_vcf_records(bed_records, vcf_records):
if record.alt != '.':
# Get the most likely diploid genotype
nt_a, nt_b = get_ml_genotype(record)
if (nt_a, nt_b) == ('N', 'N'):
# Skip sites with no most likely genotype
continue
sites += 1
sites_non_ref += 1
if nt_a == nt_b:
sites_homo_non_ref += 1
elif record.ref not in (nt_a, nt_b):
sites_het_two_non_ref += 1
else:
sites_het_one_non_ref += 1
else:
# Heterozygous for the reference allele
sites += 1
print
print "%i sites kept after filtering:" % (sites,)
print " % 10i homozygous sites containing the reference allele (%.2f%%)" % (sites - sites_non_ref, 100.0 * (sites - sites_non_ref) / float(sites))
print " % 10i heterozygous sites containing the reference and a non-reference allele (%.2f%%)" % (sites_het_one_non_ref, (100.0 * sites_het_one_non_ref) / sites)
print " % 10i homozygous sites containing a single non-reference allele (%.2f%%)" % (sites_homo_non_ref, (100.0 * sites_homo_non_ref) / sites)
print " % 10i heterozygous sites containing two different non-reference alleles (%.2f%%)" % (sites_het_two_non_ref, (100.0 * sites_het_two_non_ref) / sites)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| Python | 0 | |
4c5e4cb960a266482dac21eaeb0b568359c58b39 | Add py-backcall (#8701) | var/spack/repos/builtin/packages/py-backcall/package.py | var/spack/repos/builtin/packages/py-backcall/package.py | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyBackcall(PythonPackage):
"""Specifications for callback functions passed in to an API"""
homepage = "https://github.com/takluyver/backcall"
url = "https://pypi.io/packages/source/b/backcall/backcall-0.1.0.tar.gz"
version('0.1.0', '87ce0c7839808e6a3427d57df6a792e7')
| Python | 0.000001 | |
7975ef9f34cc578de968e1a1c8e6f731c164641a | Create 1.5_countstrings.py | CrackingCodingInterview/1.5_countstrings.py | CrackingCodingInterview/1.5_countstrings.py | """
given a string, return a string counting all the occurences
of each character if the count > 1
"""
def compress(string_to_compress):
if len(string_to_compress) < 2
return string_to_compress
groups = []
previous_character = string_to_compress[0]
counter = 1
for c in string_to_compress[1:]:
if c == previous_character:
counter += 1
else:
groups.append(previous_character + str(counter))
previous_character = c
counter = 1
groups.append(c + str(counter))
result = ''.join(groups)
if len(result) < len(string_to_compress):
return result
else:
return string_to_compress
| Python | 0.001266 | |
c89cce1a47c1e379958d7cced624ec0317cd3407 | Add demo for non-blocking with poll(). | examples/demo3.py | examples/demo3.py | import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import logging
import threading
import xmpp2
import time
import select
from xmpp2 import XML
# non-blocking, poll example.
USERNAME = 'yourusername'
PASSWORD = 'yourpassword'
SERVER = 'example.com'
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('xmpp2.xml.handler').setLevel(logging.INFO)
c = xmpp2.Client(SERVER, stream_log_level=xmpp2.LOG_NONE)
c.connect()
c.auth(USERNAME, password=PASSWORD)
c.write(XML.presence.add(XML.priority.add(1)))
c.setblocking(False)
po = select.poll()
po.register(c, select.POLLIN)
while True:
for fd, event in po.poll():
msg = c.gen.next()
if msg:
sys.stdout.write(msg.pretty_print() + '\n')
| Python | 0 | |
bb1ce480184d4e78f121f9e473e58f47b80de53a | Create FirstLinuxFile.py | FirstLinuxFile.py | FirstLinuxFile.py | #!/usr/bin
| Python | 0 | |
be5db45702c01aadb5ac323cbb6b0ef53c5d1d4c | add mobility/debug.py | mobility/debug.py | mobility/debug.py | #!/usr/bin/python
#coding:utf-8
import numpy as np
import math
import sys
import os
import time
import matplotlib.pyplot as plt
from pprint import pprint
import matplotlib.animation as animation
import cPickle as pickle
from copy import deepcopy
def load_coordiantes(file_path):
with open(file_path, 'rb') as f:
coordiantes_l = pickle.load(f)
for coordiantes in coordiantes_l:
yield coordiantes
def init2():
cmd = ""
cmd = cmd + "/usr/bin/ntpq -c peers & "
cmd = cmd + "/usr/bin/ntpq -c assoc & "
os.system(cmd)
flag = 0
def main():
try:
start_time = float(sys.argv[1])
#default time_unit 997
time_unit = int(sys.argv[2])
except:
help_info = "Usage:%s <start_time> <time_unit(ms)>\n" % sys.argv[0]
colors = ['b', 'g', 'r', 'c', 'm' , 'y', 'k', 'slategrey', 'orange',
'mediumblue', 'brown', 'orchid']
# print(len(colors))
file_path = './coordiantes_l'
init2()
coordiantes_iter = load_coordiantes(file_path)
patchs = []
fig = plt.figure()
fig.set_dpi(100)
fig.set_size_inches(7, 6.5)
axes = plt.axes(xlim=(0, 3000), ylim=(0, 1500))
axes.grid(True)
# axes.set_xticks([0, 750, 1500, 2250, 3000])
# axes.set_xticks(range(0, 3750, 375))
# axes.set_yticks(range(0, 1750, 250))
axes.set_xticks(range(0, 3750, 750))
axes.set_yticks(range(0, 1750, 500))
def init():
global flag
if flag == 0:
flag = 1
else:
return tuple(patchs)
coordiantes = coordiantes_iter.next()
print "只执行一次"
print time.time()
for index, coordiante in enumerate(coordiantes):
i = index/5
# if i >0: break
# coordiante = [int(e) for e in coordiante]
patch = plt.Circle(coordiante, radius=50, color=colors[i],alpha=0.5)
patchs.append(patch)
axes.add_patch(patch)
axes.axis('equal')
axes.margins(0)
return tuple(patchs)
def animate(i):
try:
coordiantes = coordiantes_iter.next()
except:
sys.exit(0)
for index,coordiante in enumerate(coordiantes):
patchs[index].center = coordiante
return tuple(patchs)
print "Waiting for starting"
while time.time() < (start_time - 5):
time.sleep(0.1)
print "Start now!!!"
print time.time()
#anim = animation.FuncAnimation(fig, animate,
# init_func=init,
# frames=360,
# interval=time_unit,
# blit=True)
#plt.title('IPLAB Community Mobility Models', fontsize=10)
init()
plt.xlabel('1500m')
plt.ylabel('3000m')
plt.show()
if __name__ == '__main__':
main()
| Python | 0.000001 | |
f724f5b488f23a6ceb2314aa18933b5fac3f5aab | Add courseware migration. | lms/djangoapps/courseware/migrations/0013_auto_20191001_1858.py | lms/djangoapps/courseware/migrations/0013_auto_20191001_1858.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.25 on 2019-10-01 18:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('courseware', '0012_adjust_fields'),
]
operations = [
migrations.AlterModelOptions(
name='coursedynamicupgradedeadlineconfiguration',
options={},
),
migrations.AlterModelOptions(
name='orgdynamicupgradedeadlineconfiguration',
options={},
),
]
| Python | 0 | |
1e65555a08ff3ee1a06e92d9dd054abf3cfaf711 | Add a migration to update to final tree fields | media_tree/migrations/0003_alter_tree_fields.py | media_tree/migrations/0003_alter_tree_fields.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('media_tree', '0002_mptt_to_treebeard'),
]
operations = [
migrations.AlterField(
model_name='filenode',
name='depth',
field=models.PositiveIntegerField(db_index=True),
),
migrations.AlterField(
model_name='filenode',
name='lft',
field=models.PositiveIntegerField(db_index=True),
),
migrations.AlterField(
model_name='filenode',
name='rgt',
field=models.PositiveIntegerField(db_index=True),
),
migrations.AlterField(
model_name='filenode',
name='tree_id',
field=models.PositiveIntegerField(db_index=True),
),
]
| Python | 0 | |
fb5f6bf999b2cd8b674bc2c89f74f1413fc8ee1e | Add command line interface to play | command_line_tic_tac_toe.py | command_line_tic_tac_toe.py | #!/usr/bin/env python3
import cmd
from tictactoe.ai_player import AIPlayer
from tictactoe.human_player import HumanPlayer
from tictactoe.game_controller import GameController
from tictactoe.board_stringification import BoardStringification
class CommandLineTicTacToe(cmd.Cmd):
def __init__(self,
intro="Tic Tac Toe CLI. Type help for help.\n\nHuman. You are X. Good luck. Your move\n\n",
prompt="→ "):
cmd.Cmd.__init__(self)
self.intro = intro
self.prompt = prompt
self._human = HumanPlayer("X", self._notify_move)
self._ai = AIPlayer("O", "X")
self._controller = GameController(self._human, self._ai, self._won_notification, self._draw_notification)
def _won_notification(self):
print("Game over. It was won\n\n")
self._print_board()
self.do_reset(None)
def _draw_notification(self):
print("Game over. It was a draw\n\n")
self._print_board()
self.do_reset(None)
def do_end(self, args):
return True
def help_end(self):
print("End session")
do_EOF = do_end
help_EOF = help_end
def do_reset(self, args):
self.do_human_start(None)
def help_reset(self):
print("Reset the current game")
def do_move(self, args):
print("Move passed in is: {0}".format(args))
try:
self._controller.place_move(self._human, int(args))
except ValueError as e:
print("Sorry, can't make that move: {0}".format(e.args[0]))
def help_move(self):
print("move x: Make a move at position x on the board")
def do_show_board(self, args):
print("Current game state\n")
self._print_board()
def help_show_board(self):
print("Shows the current state of the game")
def do_ai_start(self, args):
self._controller = GameController(self._ai, self._human, self._won_notification, self._draw_notification)
self._controller.notify_play()
def help_ai_start(self):
print("Initiate a new game where the AI starts")
def do_human_start(self, args):
self._controller = GameController(self._human, self._ai, self._won_notification, self._draw_notification)
self._controller.notify_play()
def help_human_start(self):
print("Initiate a new game where the AI starts")
def _notify_move(self):
print("Human, your move:\n")
self._print_board()
def _print_board(self):
print(BoardStringification().print_game_positions(self._controller._board))
if __name__ == '__main__':
cli = CommandLineTicTacToe()
cli.cmdloop()
| Python | 0.000001 | |
f91db461b5745689ed356dd740ed7ff3b27524e4 | Add page base classes | feincms3/pages.py | feincms3/pages.py | from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.core.validators import RegexValidator
from django.db import models
from django.db.models import signals
from django.dispatch import receiver
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from mptt.models import MPTTModel, TreeForeignKey
from mptt.signals import node_moved
@python_2_unicode_compatible
class AbstractPage(MPTTModel):
parent = TreeForeignKey(
'self',
on_delete=models.CASCADE,
null=True, blank=True, related_name='children', db_index=True)
is_active = models.BooleanField(_('is active'), default=True)
title = models.CharField(_('title'), max_length=200)
slug = models.SlugField(_('slug'))
# Who even cares about MySQL
path = models.CharField(
_('path'), max_length=1000, blank=True, unique=True,
help_text=_('Generated automatically if \'static path\' is unset.'),
validators=[RegexValidator(
regex=r'^/(|.+/)$',
message=_('Path must start and end with a slash (/).'),
)])
static_path = models.BooleanField(_('static path'), default=False)
class Meta:
abstract = True
verbose_name = _('page')
verbose_name_plural = _('pages')
def __str__(self):
return self.title
def save(self, *args, **kwargs):
save_descendants = kwargs.pop('save_descendants', True)
if not self.static_path:
self.path = '{0}{1}/'.format(
self.parent.path if self.parent else '/',
self.slug)
super(AbstractPage, self).save(*args, **kwargs)
if save_descendants:
nodes = {self.pk: self}
for node in self.get_descendants():
# Assign already-saved instance
node.parent = nodes[node.parent_id]
# Descendants of inactive nodes cannot be active themselves.
if not node.parent.is_active:
node.is_active = False
node.save(save_descendants=False)
nodes[node.id] = node
save.alters_data = True
def get_absolute_url(self):
if self.path == '/':
return reverse('pages:root')
return reverse('pages:page', kwargs={'path': self.path.strip('/')})
@receiver(node_moved)
def handle_node_moved(instance, **kwargs):
print(instance, kwargs)
if not instance._meta.abstract and 'position' in kwargs:
# We were called from move_node, not from save()
instance.save()
class MenuMixin(models.Model):
menu = models.CharField(
_('menu'),
max_length=20,
blank=True,
)
class Meta:
abstract = True
@receiver(signals.class_prepared)
def _fill_in_menu_choices(sender, **kwargs):
if issubclass(sender, MenuMixin) and not sender._meta.abstract:
field = sender._meta.get_field('menu')
field.choices = sender.MENUS
field.default = field.choices[0][0]
class TemplatesMixin(models.Model):
template_key = models.CharField(_('template'), max_length=100)
class Meta:
abstract = True
@property
def template(self):
for t in self.TEMPLATES:
if t.key == self.template_key:
return t
else:
return None
@property
def regions(self):
return self.template.regions if self.template else []
@receiver(signals.class_prepared)
def _fill_template_choices(sender, **kwargs):
if issubclass(sender, TemplatesMixin) and not sender._meta.abstract:
field = sender._meta.get_field('template_key')
field.choices = [
(t.key, t.title) for t in sender.TEMPLATES
]
field.default = sender.TEMPLATES[0].key
| Python | 0.000001 | |
6d59e6d37d6f33f3513a1c6b1cb7d0d9062f391e | Create ClassesandInstances.py | EmployeeManagementSystem/Findings/ClassesandInstances.py | EmployeeManagementSystem/Findings/ClassesandInstances.py | #Creating and instantiating python classes
#classes - they allow us to logically group data(attributes) and functions (methods)
'''class Employee:
pass
print ("Class (Blueprint) vs Instance")
emp1 = Employee()
emp2 = Employee()
print (emp1)
print (emp2)
print ("instance variables contains data unique to each instance")
emp1.first ='Manoj'
emp1.last = 'Putchala'
emp1.email = 'manojkumar@gmail.com'
emp1.pay = 5000
emp2.first ='Lalitha'
emp2.last = 'Putchala'
emp2.email = 'Lalithakumar@gmail.com'
emp2.pay = 6000
print (emp1.email)
print (emp2.email)
'''
class Employee:
#Constructor or Initializer #Instance is called as self(by default should be used)
#Name, email and pay are attributes
def __init__(self,first,last, pay):
self.fname = first
self.lname = last
self.epay = pay
self.email = first+'.'+last+'@company.com'
def fullname(self): #method for code reuse #self should not be forgotten (one common mistake)
return '{} {}'.format(emp1.fname,self.lname)
emp1 = Employee('Manoj','Kumar',100000)
print (emp1.epay)
print (emp1.fullname())
print (Employee.fullname(emp1)) # another way of calling the instance using class
| Python | 0 | |
86618e2e30aa4a129041bd2b6b8c312b00de9ce5 | use separate modules for netlink | shadow/netlink.py | shadow/netlink.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import struct
import os
# Flag values
NLM_F_REQUEST = 1
NLM_F_MULTI = 2
NLM_F_ACK = 4
NLM_F_ECHO = 8
NLM_F_DUMP_INTR = 16
# Modifiers to GET request
NLM_F_ROOT = 0x100
NLM_F_MATCH = 0x200
NLM_F_ATOMIC = 0x400
NLM_F_DUMP = (NLM_F_ROOT | NLM_F_MATCH)
# Modifiers to NEW requests
NLM_F_REPLACE = 0x100
NLM_F_EXCL = 0x200
NLM_F_CREATE = 0x400
NLM_F_APPEND = 0x800
NETLINK_GENERIC = 16
NLMSG_ALIGNTO = 4
NLMSG_MIN_TYPE = 0x10
NLMSG_ERROR = 0x2
GENL_ID_CTRL = NLMSG_MIN_TYPE
class Nlmsghdr(object):
'''
The NetlinkMessage class handles the assembly of netlink headers.
'''
def __init__(self):
super(NetlinkMessage, self).__init__()
self.pid = os.getpid()
self.flags = NLM_F_REQUEST
self.genl_version = 0
def build_nlmsghdr(self, nlmsg_type, nlmsg_len):
seq = 0
nlmsg_len += struct.calcsize('IHHII')
hdr = [nlmsg_len, nlmsg_type, self.flags, seq, self.pid]
nlmsghdr = struct.pack('IHHII', *hdr)
return nlmsghdr
# Genetlink Controller command and attribute values
CTRL_CMD_UNSPEC = 0
CTRL_CMD_NEWFAMILY = 1
CTRL_CMD_DELFAMILY = 2
CTRL_CMD_GETFAMILY = 3
CTRL_CMD_NEWOPS = 4
CTRL_CMD_DELOPS = 5
CTRL_CMD_GETOPS = 6
CTRL_CMD_NEWMCAST_GRP = 7
CTRL_CMD_DELMCAST_GRP = 8
CTRL_CMD_GETMCAST_GRP = 9
__CTRL_CMD_MAX = 10
CTRL_ATTR_UNSPEC = 0
CTRL_ATTR_FAMILY_ID = 1
CTRL_ATTR_FAMILY_NAME = 2
CTRL_ATTR_VERSION = 3
CTRL_ATTR_HDRSIZE = 4
CTRL_ATTR_MAXATTR = 5
CTRL_ATTR_OPS = 6
CTRL_ATTR_MCAST_GROUPS = 7
__CTRL_ATTR_MAX = 8
CTRL_ATTR_OP_UNSPEC = 0
CTRL_ATTR_OP_ID = 1
CTRL_ATTR_OP_FLAGS = 2
__CTRL_ATTR_OP_MAX = 3
class Genlmsghdr(object):
def __init__(self):
pass
def build_genlmsghdr(self, cmd):
genlhdr = struct.pack('BBxx', cmd, self.genl_version)
genl_len = struct.calcsize('BBxx')
return genlhdr, genl_len
class Nlattr(object):
def __init__(self):
pass
def build_nlattr(self, nla_type, nla_data):
if isinstance(nla_data, str):
padding = self.calc_alignment(nla_data)
nla_len = struct.calcsize('HH') + padding
nla_hdr = struct.pack('HH', nla_len, nla_type)
data = struct.pack('%ds' % padding, nla_data)
nlattr = b''.join([nla_hdr, data])
elif isinstance(nla_data, int):
nla_len = struct.calcsize('HHI')
nla = [nla_len, nla_type, nla_data]
nlattr = struct.pack('HHI', *nla)
else:
return [], 0
return nlattr, nla_len
| Python | 0 | |
ec22c2d82ff4f045b992014d17ada850359c2ab6 | change folder layout | patterning_algorithm/color_halftone.py | patterning_algorithm/color_halftone.py | # This program takes a raster color image and produces its raster color halftone using patterning algorithm .
# Split the image into C, M, Y, K.
# Rotate each separated image by 0, 15, 30, and 45 degrees respectively.
# Take the half-tone of each image (dot size will be proportional to the intensity).
# Rotate back each half-toned image.
# Now you have your colour separated images. The rotation step reduces
# dot alignment issues (which would mess everything up), and things like Moire pattern
# effects will be reasonably minimized.
import numpy as np
from PIL import Image
from patterning_clustered_dot import intensity, patterning
def gcr(im, percentage):
# basic "Gray Component Replacement" function. Returns a CMYK image with
# percentage gray component removed from the CMY halftones and put in the
# K halftone, ie. for percentage=100, (41, 100, 255, 0) >> (0, 59, 214, 41)
cmyk_im = im.convert('CMYK')
if not percentage:
return cmyk_im
cmyk_im = cmyk_im.split()
cmyk = []
for i in range(4):
cmyk.append(cmyk_im[i].load())
for x in range(im.size[0]):
for y in range(im.size[1]):
gray = min(cmyk[0][x,y], cmyk[1][x,y], cmyk[2][x,y]) * percentage / 100
for i in range(3):
cmyk[i][x,y] = cmyk[i][x,y] - gray
cmyk[3][x,y] = gray
return Image.merge('CMYK', cmyk_im)
def color_halftoning_with_rotation(cmyk,increment_in_angle):
dots=[]
angle=0
for i in range(4):
channel = Image.fromarray(patterning(cmyk[i].rotate(angle,expand=1))).convert('L')
channel = channel.rotate(-angle,expand=1)
width_half, height_half = channel.size
xx = (width_half-cmyk[i].size[0]*3) / 2
yy = (height_half-cmyk[i].size[1]*3) / 2
channel = channel.crop((xx, yy, xx + cmyk[i].size[0]*3, yy + cmyk[i].size[1]*3))
dots.append(channel)
angle += increment_in_angle
return dots
def main():
fname = 'tree.jpg'
image = Image.open(fname)
image = gcr(image,100)
cmyk = image.split()
dots = color_halftoning_with_rotation(cmyk,15)
new_cmyk = Image.merge('CMYK',dots)
new_cmyk.save("output.jpg")
new_cmyk.show()
if __name__=="__main__":
main()
| Python | 0.000001 | |
eced1499c4b82ce83f954a0364b02f2116a11326 | Add quick verification checker. | src/Scripts/verify.py | src/Scripts/verify.py | # Take a ground truth file produced by the verifier and a match file and compare them.
# Output is in fully normalized format, the same as VerifyCommand.cpp produces.
#
# TODO: remove hardcoded paths.
# file format:
# term,docId,[0-3]
# 0: true positive
# 1: false postive
# 2: false negative
# 3: unverified
from collections import defaultdict
import csv
true_matches = defaultdict(set)
with open("/tmp/groundTruth.csv") as f:
reader = csv.reader(f)
for row in reader:
if (len(row) == 3 and (row[2] == '0' or row[2] == '2')):
true_matches[row[0]].add(row[1])
with open("/tmp/unknowns.csv") as f:
reader = csv.reader(f)
for row in reader:
# TODO: assert that value is '3'
#
# TODO: handle false negatives. Could keep a counter of how many matches
# we've seen and compare, then iterate over the set in the rare instance
# we see a false negative.
if (len(row) == 3):
if row[1] in true_matches[row[0]]:
print(row[0] + "," + row[1] + ",0")
else:
print(row[0] + "," + row[1] + ",1")
| Python | 0 | |
d16d66e520c5f80870957c63694708118d6f9f69 | Add module for MOC (music on console) | i3pystatus/moc.py | i3pystatus/moc.py | import re
from i3pystatus import IntervalModule
from i3pystatus import formatp
from i3pystatus.core.command import run_through_shell
from i3pystatus.core.util import TimeWrapper
class Moc(IntervalModule):
"""
Display various information from MOC (musci on console)
.. rubric:: Available formatters
* `{status}` — current status icon (paused/playing/stopped)
* `{song_elapsed}` — song elapsed time (mm:ss format)
* `{song_length}` — total song duration (mm:ss format)
* `{artist}` — artist
* `{title}` — title
* `{album}` — album
* `{tracknumber}` — tracknumber
* `{file}` — file or url name
"""
settings = (
('format', 'formatp string'),
('format_not_running', 'Text to show if cmus is not running'),
('color', 'The color of the text'),
('color_not_running', 'The color of the text, when cmus is not running'),
('status', 'Dictionary mapping status to output'),
)
color = '#ffffff'
color_not_running = '#ffffff'
format = '{status} {song_elapsed}/{song_length} {artist} - {title}'
format_not_running = 'Not running'
interval = 1
status = {
'pause': '▷',
'play': '▶',
'stop': '◾',
}
on_leftclick = 'toggle_pause'
on_rightclick = 'next_song'
on_upscroll = 'next_song'
on_downscroll = 'previous_song'
def _moc_command(self, command):
cmdline = 'mocp --{command}'.format(command=command)
return run_through_shell(cmdline, enable_shell=True)
def _query_cmus(self):
response = {}
# Get raw information
cmd = self._moc_command('info')
# Now we make it useful
if not cmd.rc:
for line in cmd.out.splitlines():
key, _, value = line.partition(': ')
response[key] = value
return response
def run(self):
response = self._query_cmus()
if response:
fdict = {
'album': response.get('Album', ''),
'artist': response.get('Artist', ''),
'file': response.get('File', ''),
'song_elapsed': TimeWrapper(response.get('CurrentSec', 0)),
'song_length': TimeWrapper(response.get('TotalSec', 0)),
'status': self.status[response['State'].lower()],
'title': response.get('SongTitle', ''),
'tracknumber': re.match(r'(\d*).*', response.get('Title', '')).group(1) or 0,
}
self.output = {
'full_text': formatp(self.format, **fdict),
'color': self.color,
}
else:
self.output = {
'full_text': self.format_not_running,
'color': self.color_not_running,
}
def toggle_pause(self):
self._moc_command('toggle-pause')
def next_song(self):
self._moc_command('next')
def previous_song(self):
self._moc_command('previous')
| Python | 0 | |
b1ef133904540b7f49e22ac52a0f844963be829e | Add basic test for discovery loader | nose2/tests/functional/test_discovery_loader.py | nose2/tests/functional/test_discovery_loader.py | from nose2.tests._common import FunctionalTestCase, support_file
from nose2 import events, loader, session
from nose2.plugins.loader.discovery import DiscoveryLoader
class Watcher(events.Plugin):
def __init__(self):
self.called = []
def loadTestsFromModule(self, event):
self.called.append(event)
class DiscoveryFunctionalTest(FunctionalTestCase):
def setUp(self):
self.session = session.Session()
self.plug = DiscoveryLoader(session=self.session)
self.loader = loader.PluggableTestLoader(self.session)
def test_createTests_hook(self):
self.plug.start_dir = support_file('scenario/tests_in_package')
watcher = Watcher(session=self.session)
watcher.register()
event = events.CreateTestsEvent(self.loader, None, None)
result = self.session.hooks.createTests(event)
assert isinstance(result, self.loader.suiteClass)
assert watcher.called
| Python | 0 | |
2de3ab69c0725312663ecd94378c5b267a6c5ab1 | Add graph_data.py with a graph_ratings function | graph_data.py | graph_data.py | """Graph properties and patterns of the raw data
.. moduleauthor:: Jan Van Bruggen <jancvanbruggen@gmail.com>
"""
import matplotlib.pyplot as plt
def graph_ratings():
num_points = 1e5
ratings = rating_counts('data/mu/all.dta', num_points)
rating_numbers = sorted(ratings.keys())
x = [i - 0.4 for i in rating_numbers]
y = [ratings[i] for i in rating_numbers]
plt.bar(x, y, width=0.8)
plt.title('Number of Ratings by Rating ({:n} points)'.format(num_points))
plt.xlabel('Rating')
plt.xlim(-0.4, 5.4)
plt.ylabel('Number of Ratings')
plt.show()
def rating_counts(data_file_name, num_points=float('inf'), rating_column=3):
ratings = {}
count = 0
with open(data_file_name, 'r') as data_file:
for line in data_file:
count += 1
if count > num_points:
break
values = line.split()
rating = int(values[rating_column])
try:
ratings[rating] += 1
except KeyError:
ratings[rating] = 1
return ratings
def run():
graph_ratings()
if __name__ == '__main__':
run()
| Python | 0.000053 | |
7a861623987225bd786301dfe6dea78173ddaf1a | Create generator.py | Testing_Hadoop/generator.py | Testing_Hadoop/generator.py | import time
start_time = time.time()
fo = open("hadoop_test_data.txt", "wb")
for i in range(0,9):
for i in range(0,10000000):
fo.write("Hadoop ");
fo.close()
print("--- %s seconds ---" % (time.time() - start_time))
| Python | 0.000001 | |
7c782954134bbfb7603af7cefd265f85afaf081e | add version.py back | grizli/version.py | grizli/version.py | # Autogenerated by Astropy-affiliated package grizli's setup.py on 2019-10-31 17:36:12 UTC
import datetime
import locale
import os
import subprocess
import warnings
__all__ = ['get_git_devstr']
def _decode_stdio(stream):
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'utf-8'
except ValueError:
stdio_encoding = 'utf-8'
try:
text = stream.decode(stdio_encoding)
except UnicodeDecodeError:
# Final fallback
text = stream.decode('latin1')
return text
def update_git_devstr(version, path=None):
"""
Updates the git revision string if and only if the path is being imported
directly from a git working copy. This ensures that the revision number in
the version string is accurate.
"""
try:
# Quick way to determine if we're in git or not - returns '' if not
devstr = get_git_devstr(sha=True, show_warning=False, path=path)
except OSError:
return version
if not devstr:
# Probably not in git so just pass silently
return version
if 'dev' in version: # update to the current git revision
version_base = version.split('.dev', 1)[0]
devstr = get_git_devstr(sha=False, show_warning=False, path=path)
return version_base + '.dev' + devstr
else:
# otherwise it's already the true/release version
return version
def get_git_devstr(sha=False, show_warning=True, path=None):
"""
Determines the number of revisions in this repository.
Parameters
----------
sha : bool
If True, the full SHA1 hash will be returned. Otherwise, the total
count of commits in the repository will be used as a "revision
number".
show_warning : bool
If True, issue a warning if git returns an error code, otherwise errors
pass silently.
path : str or None
If a string, specifies the directory to look in to find the git
repository. If `None`, the current working directory is used, and must
be the root of the git repository.
If given a filename it uses the directory containing that file.
Returns
-------
devversion : str
Either a string with the revision number (if `sha` is False), the
SHA1 hash of the current commit (if `sha` is True), or an empty string
if git version info could not be identified.
"""
if path is None:
path = os.getcwd()
if not os.path.isdir(path):
path = os.path.abspath(os.path.dirname(path))
if sha:
# Faster for getting just the hash of HEAD
cmd = ['rev-parse', 'HEAD']
else:
cmd = ['rev-list', '--count', 'HEAD']
def run_git(cmd):
try:
p = subprocess.Popen(['git'] + cmd, cwd=path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
except OSError as e:
if show_warning:
warnings.warn('Error running git: ' + str(e))
return (None, b'', b'')
if p.returncode == 128:
if show_warning:
warnings.warn('No git repository present at {0!r}! Using '
'default dev version.'.format(path))
return (p.returncode, b'', b'')
if p.returncode == 129:
if show_warning:
warnings.warn('Your git looks old (does it support {0}?); '
'consider upgrading to v1.7.2 or '
'later.'.format(cmd[0]))
return (p.returncode, stdout, stderr)
elif p.returncode != 0:
if show_warning:
warnings.warn('Git failed while determining revision '
'count: {0}'.format(_decode_stdio(stderr)))
return (p.returncode, stdout, stderr)
return p.returncode, stdout, stderr
returncode, stdout, stderr = run_git(cmd)
if not sha and returncode == 128:
# git returns 128 if the command is not run from within a git
# repository tree. In this case, a warning is produced above but we
# return the default dev version of '0'.
return '0'
elif not sha and returncode == 129:
# git returns 129 if a command option failed to parse; in
# particular this could happen in git versions older than 1.7.2
# where the --count option is not supported
# Also use --abbrev-commit and --abbrev=0 to display the minimum
# number of characters needed per-commit (rather than the full hash)
cmd = ['rev-list', '--abbrev-commit', '--abbrev=0', 'HEAD']
returncode, stdout, stderr = run_git(cmd)
# Fall back on the old method of getting all revisions and counting
# the lines
if returncode == 0:
return str(stdout.count(b'\n'))
else:
return ''
elif sha:
return _decode_stdio(stdout)[:40]
else:
return _decode_stdio(stdout).strip()
# This function is tested but it is only ever executed within a subprocess when
# creating a fake package, so it doesn't get picked up by coverage metrics.
def _get_repo_path(pathname, levels=None): # pragma: no cover
"""
Given a file or directory name, determine the root of the git repository
this path is under. If given, this won't look any higher than ``levels``
(that is, if ``levels=0`` then the given path must be the root of the git
repository and is returned if so.
Returns `None` if the given path could not be determined to belong to a git
repo.
"""
if os.path.isfile(pathname):
current_dir = os.path.abspath(os.path.dirname(pathname))
elif os.path.isdir(pathname):
current_dir = os.path.abspath(pathname)
else:
return None
current_level = 0
while levels is None or current_level <= levels:
if os.path.exists(os.path.join(current_dir, '.git')):
return current_dir
current_level += 1
if current_dir == os.path.dirname(current_dir):
break
current_dir = os.path.dirname(current_dir)
return None
_packagename = "grizli"
_last_generated_version = "1.0.dev1353"
_last_githash = "9b6125d14a8934007645d49c07841bc1c24ee13f"
# Determine where the source code for this module
# lives. If __file__ is not a filesystem path then
# it is assumed not to live in a git repo at all.
if _get_repo_path(__file__, levels=len(_packagename.split('.'))):
version = update_git_devstr(_last_generated_version, path=__file__)
githash = get_git_devstr(sha=True, show_warning=False,
path=__file__) or _last_githash
else:
# The file does not appear to live in a git repo so don't bother
# invoking git
version = _last_generated_version
githash = _last_githash
major = 1
minor = 0
bugfix = 0
version_info = (major, minor, bugfix)
release = False
timestamp = datetime.datetime(2019, 10, 31, 17, 36, 12)
debug = False
astropy_helpers_version = "3.2.2"
| Python | 0 | |
963aa3fd9830d1a4817a26a2e8a5676174e30d19 | Add new migration | planner/migrations/0005_auto_20150711_1117.py | planner/migrations/0005_auto_20150711_1117.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('planner', '0004_auto_20150616_1926'),
]
operations = [
migrations.RenameField(
model_name='route',
old_name='destination',
new_name='end',
),
migrations.RenameField(
model_name='route',
old_name='origin',
new_name='start',
),
]
| Python | 0 | |
ea9b6920c88ac40a72aadd70199a52f27a1c097e | Create RespostaListar.py | backend/Models/Predio/RespostaListar.py | backend/Models/Predio/RespostaListar.py | from Framework.Resposta import Resposta
from Models.Predio.Predio import Predio as ModelPredio
class RespostaListar(Resposta):
def __init__(self,predios):
self.corpo = []
for predio in predios:
self.corpo.append(ModelPredio(predio))
| Python | 0 | |
87804aef17874339e7b58df0c3bcb29338fa412a | add country regions include Minsk | belarus_region_borders_include_minsk.py | belarus_region_borders_include_minsk.py | from _helpers import cursor_wrap, dump
@cursor_wrap
def main(cursor):
sql = """
SELECT r.osm_id, c.name AS country, r.name AS region, ST_AsGeoJSON(r.way)
FROM osm_polygon c
LEFT JOIN osm_polygon r ON ST_Contains(c.way, r.way)
WHERE c.osm_id = -59065 AND r.admin_level = '4'
AND r.osm_id IN (-59189, -59506, -59161, -59275, -59162)
UNION
SELECT -59752, FIRST(c.name) AS country, FIRST(r.name) AS region, ST_AsGeoJSON(ST_Union(r.way))
FROM osm_polygon c
LEFT JOIN osm_polygon r ON ST_Contains(c.way, r.way)
WHERE c.osm_id = -59065 AND r.admin_level = '4'
AND r.osm_id IN (-59752, -59195)
"""
cursor.execute(sql)
dump(__file__, sorted(cursor.fetchall(), key=lambda item: item[1:3]),
('osmid', 'country', 'region', 'geojson'))
if __name__ == '__main__':
main()
| Python | 0.999999 | |
b5bc7827fb2452e82789129b918861157010c58e | Create pokebot.py | pokebot.py | pokebot.py | #!/usr/bin/python3
#
# Author: Luke
import time, ts3, sys, traceback
USER = 'serveradmin' # Query user
PASS = '' # Query Password
HOST = 'localhost' # Query Server-host
PORT = '10011' # Query Server-Port
SID = 1 # Serveradmin sid (dont touch)
def usage():
print ('\n./Poke-bot.py <Name> <how many times> <Message>\n')
sys.exit(0)
def Poke(ts3conn,target,timesMany,msg):
try:
clientlist = ts3conn.clientlist()
clientlist = [client for client in clientlist \
if client["client_type"] != "1"]
for client in clientlist:
clid = client['clid']
nickname = client['client_nickname']
if str(nickname) == str(target):
print (' \nFound target',target,'\n\nPoking now!...\n')
for i in range(int(timesMany)):
time.sleep(0.5)
ts3conn.clientpoke(clid=clid, msg=msg)
sys.exit(0)
sys.exit(0)
except KeyboardInterrupt:
print (' \nExiting...\n')
except Exception:
traceback.print_exc(file=sys.stdout)
sys.exit(0)
def main(target,timesMany,message):
with ts3.query.TS3Connection(HOST,PORT) as ts3conn:
try:
ts3conn.login(client_login_name=USER, client_login_password=PASS)
ts3conn.use(sid=SID)
Poke(ts3conn,target,timesMany,message)
except ts3.query.TS3QueryError as err:
if err.resp.error["id"] == "520":
print ('\nWrong Username Or Password!\n')
sys.exit(0)
if __name__ == '__main__':
try:
if len(sys.argv) != 4:
usage()
int(sys.argv[2])
main(sys.argv[1],sys.argv[2],sys.argv[3])
except ValueError:
print ('\nSecond Arg \''+sys.argv[2]+'\' Must Be Integer Value!\n')
usage()
| Python | 0.000005 | |
4f87a0e144bf738e523cd1f8d914f39090275fee | add review status to individuals | xbrowse_server/base/migrations/0008_individual_review_status.py | xbrowse_server/base/migrations/0008_individual_review_status.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-10-05 09:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0007_auto_20160826_1327'),
]
operations = [
migrations.AddField(
model_name='individual',
name='review_status',
field=models.CharField(blank=True, choices=[(b'A', b'Accepted'), (b'E', b'Accepted - Exome'), (b'G', b'Accepted - Genome'), (b'R', b'Not Accepted'), (b'N', b'See Notes'), (b'H', b'Hold')], default=b'', max_length=1, null=True),
),
]
| Python | 0 | |
85b9d1eed3aea2ed56b85819f6e2269aef9dd128 | Add MemAvailable to default keys. | src/collectors/memory/memory.py | src/collectors/memory/memory.py | # coding=utf-8
"""
This class collects data on memory utilization
Note that MemFree may report no memory free. This may not actually be the case,
as memory is allocated to Buffers and Cache as well. See
[this link](http://www.linuxatemyram.com/) for more details.
#### Dependencies
* /proc/meminfo or psutil
"""
import diamond.collector
import diamond.convertor
import os
try:
import psutil
except ImportError:
psutil = None
_KEY_MAPPING = [
'MemAvailable',
'MemTotal',
'MemFree',
'Buffers',
'Cached',
'Active',
'Dirty',
'Inactive',
'Shmem',
'SwapTotal',
'SwapFree',
'SwapCached',
'VmallocTotal',
'VmallocUsed',
'VmallocChunk',
'Committed_AS',
]
class MemoryCollector(diamond.collector.Collector):
PROC = '/proc/meminfo'
def get_default_config_help(self):
config_help = super(MemoryCollector, self).get_default_config_help()
config_help.update({
'detailed': 'Set to True to Collect all the nodes',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(MemoryCollector, self).get_default_config()
config.update({
'path': 'memory',
# Collect all the nodes or just a few standard ones?
# Uncomment to enable
# 'detailed': 'True'
})
return config
def collect(self):
"""
Collect memory stats
"""
if os.access(self.PROC, os.R_OK):
file = open(self.PROC)
data = file.read()
file.close()
for line in data.splitlines():
try:
name, value, units = line.split()
name = name.rstrip(':')
value = int(value)
if (name not in _KEY_MAPPING
and 'detailed' not in self.config):
continue
for unit in self.config['byte_unit']:
value = diamond.convertor.binary.convert(value=value,
oldUnit=units,
newUnit=unit)
self.publish(name, value, metric_type='GAUGE')
# TODO: We only support one unit node here. Fix it!
break
except ValueError:
continue
return True
else:
if not psutil:
self.log.error('Unable to import psutil')
self.log.error('No memory metrics retrieved')
return None
phymem_usage = psutil.phymem_usage()
virtmem_usage = psutil.virtmem_usage()
units = 'B'
for unit in self.config['byte_unit']:
value = diamond.convertor.binary.convert(
value=phymem_usage.total, oldUnit=units, newUnit=unit)
self.publish('MemTotal', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=phymem_usage.available, oldUnit=units, newUnit=unit)
self.publish('MemAvailable', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=phymem_usage.free, oldUnit=units, newUnit=unit)
self.publish('MemFree', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=virtmem_usage.total, oldUnit=units, newUnit=unit)
self.publish('SwapTotal', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=virtmem_usage.free, oldUnit=units, newUnit=unit)
self.publish('SwapFree', value, metric_type='GAUGE')
# TODO: We only support one unit node here. Fix it!
break
return True
return None
| # coding=utf-8
"""
This class collects data on memory utilization
Note that MemFree may report no memory free. This may not actually be the case,
as memory is allocated to Buffers and Cache as well. See
[this link](http://www.linuxatemyram.com/) for more details.
#### Dependencies
* /proc/meminfo or psutil
"""
import diamond.collector
import diamond.convertor
import os
try:
import psutil
except ImportError:
psutil = None
_KEY_MAPPING = [
'MemTotal',
'MemFree',
'Buffers',
'Cached',
'Active',
'Dirty',
'Inactive',
'Shmem',
'SwapTotal',
'SwapFree',
'SwapCached',
'VmallocTotal',
'VmallocUsed',
'VmallocChunk',
'Committed_AS',
]
class MemoryCollector(diamond.collector.Collector):
PROC = '/proc/meminfo'
def get_default_config_help(self):
config_help = super(MemoryCollector, self).get_default_config_help()
config_help.update({
'detailed': 'Set to True to Collect all the nodes',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(MemoryCollector, self).get_default_config()
config.update({
'path': 'memory',
# Collect all the nodes or just a few standard ones?
# Uncomment to enable
# 'detailed': 'True'
})
return config
def collect(self):
"""
Collect memory stats
"""
if os.access(self.PROC, os.R_OK):
file = open(self.PROC)
data = file.read()
file.close()
for line in data.splitlines():
try:
name, value, units = line.split()
name = name.rstrip(':')
value = int(value)
if (name not in _KEY_MAPPING
and 'detailed' not in self.config):
continue
for unit in self.config['byte_unit']:
value = diamond.convertor.binary.convert(value=value,
oldUnit=units,
newUnit=unit)
self.publish(name, value, metric_type='GAUGE')
# TODO: We only support one unit node here. Fix it!
break
except ValueError:
continue
return True
else:
if not psutil:
self.log.error('Unable to import psutil')
self.log.error('No memory metrics retrieved')
return None
phymem_usage = psutil.phymem_usage()
virtmem_usage = psutil.virtmem_usage()
units = 'B'
for unit in self.config['byte_unit']:
value = diamond.convertor.binary.convert(
value=phymem_usage.total, oldUnit=units, newUnit=unit)
self.publish('MemTotal', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=phymem_usage.available, oldUnit=units, newUnit=unit)
self.publish('MemAvailable', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=phymem_usage.free, oldUnit=units, newUnit=unit)
self.publish('MemFree', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=virtmem_usage.total, oldUnit=units, newUnit=unit)
self.publish('SwapTotal', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=virtmem_usage.free, oldUnit=units, newUnit=unit)
self.publish('SwapFree', value, metric_type='GAUGE')
# TODO: We only support one unit node here. Fix it!
break
return True
return None
| Python | 0 |
34bc4b9e5731c94ae4655deb338d67aa3f9a1f63 | Create project.py | project.py | project.py | from ggame import App, RectangleAsset, ImageAsset, SoundAsset, Sprite, Sound
from ggame import LineStyle, Color
SCREEN_WIDTH = 640
SCREEN_HEIGHT = 480
green = Color(0x00ff00, 1)
black = Color(0, 1)
noline = LineStyle(0, black)
bg_asset = RectangleAsset(SCREEN_WIDTH, SCREEN_HEIGHT, noline, green)
bg = Sprite(bg_asset, (0,0))
# Sounds
pew1_asset = SoundAsset("sounds/pew1.mp3")
pew1 = Sound(pew1_asset)
pop_asset = SoundAsset("sounds/reappear.mp3")
pop = Sound(pop_asset)
# A ball! This is already in the ggame-tutorials repository
ball_asset = ImageAsset("images/orb-150545_640.png")
ball = Sprite(ball_asset, (0, 0))
# Original image is too big. Scale it to 1/10 its original size
ball.scale = 0.1
ball.y = 200
# custom attributes
ball.dir = 1
ball.go = True
# Sounds
pew1_asset = SoundAsset("sounds/pew1.mp3")
pew1 = Sound(pew1_asset)
pop_asset = SoundAsset("sounds/reappear.mp3")
pop = Sound(pop_asset)
def reverse(b):
b.dir *= -1
pop.play()
# Set up function for handling screen refresh
def step():
if ball.go:
ball.x += ball.dir
if ball.x + ball.width > SCREEN_WIDTH or ball.x < 0:
ball.x -= ball.dir
reverse(ball)
# Handle the space key
def spaceKey(event):
ball.go = not ball.go
# Handle the "reverse" key
def reverseKey(event):
reverse(ball)
# Handle the mouse click
def mouseClick(event):
ball.x = event.x
ball.y = event.y
pew1.play()
myapp = App(SCREEN_WIDTH, SCREEN_HEIGHT)
# Set up event handlers for the app
myapp.listenKeyEvent('keydown', 'space', spaceKey)
myapp.listenKeyEvent('keydown', 'r', reverseKey)
myapp.listenMouseEvent('click', mouseClick)
myapp.run(step)
| Python | 0.000001 | |
080df88609ac25eff0b4379e31acb63654d3314c | Create randfor.py | randfor.py | randfor.py | #!/usr/bin/env python
#This script performs randomforests on the blocks for the three variation of the method.
import sys
blocn=sys.argv[1]
min_samples_leaf=int(sys.argv[2])
import math
#The function evi for evidence is meant to make the result homogeneous to
#logistic regression. The if loop avoids having any infinity in the output as
#evidence is not defined for 0 or 1.
def evi(x):
if x<0.001:
return -7
elif x>0.999:
return 7
else:
return math.log(x/(1-x))
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
dischaps=pd.read_csv("discovery.phased."+str(blocn)+".haps",sep=" ",header=None)
validhaps=pd.read_csv("validation.phased."+str(blocn)+".haps",sep=" ",header=None)
dischaps=dischaps.ix[:,5:]
validhaps=validhaps.ix[:,5:]
dischaps=dischaps.T
validhaps=validhaps.T
dischaps=np.array(dischaps)
validhaps=np.array(validhaps)
label=pd.read_csv("discovery.double.label",header=None)
label=np.array(label)
#c refers to PH. Each haplotype is treated as an observation and then the evidence is combined
#to create a new variable.
rfc = RandomForestClassifier(n_estimators=500, max_features="auto",min_samples_leaf=min_samples_leaf,oob_score=True)
rfc.fit(dischaps,np.ravel(label))
predc=rfc.oob_decision_function_
predc=predc[:,1]
predc=map(evi,predc)
predc=np.array([predc[i] for i in range(0,len(predc),2)]) +np.array([predc[i] for i in range(1,len(predc),2)])
predc=pd.DataFrame(predc)
predc.to_csv("c.disc.bloc"+str(blocn),na_rep='NA',sep=" ",line_terminator=" ",header=False,index=False)
validc=rfc.predict_proba(validhaps)
validc=validc[:,1]
validc=map(evi,validc)
validc=np.array([validc[i] for i in range(0,len(validc),2)])+np.array([validc[i] for i in range(1,len(validc),2)])
validc=pd.DataFrame(validc)
validc.to_csv("c.valid.bloc"+str(blocn),na_rep='NA',sep=" ",line_terminator=" ",header=False,index=False)
#To see the interest of using haplotypes, we take this information out and see what happens.
disc=np.array([dischaps[i,:] for i in range(0,len(dischaps),2)])+np.array([dischaps[i,:] for i in range(1,len(dischaps),2)])
valid=np.array([validhaps[i,:] for i in range(0,len(validhaps),2)])+np.array([validhaps[i,:] for i in range(1,len(validhaps),2)])
labelsimple=[label[i] for i in range(0,len(label),2)]
#b refers to PwoH
rfc = RandomForestClassifier(n_estimators=500, max_features="auto", min_samples_leaf=min_samples_leaf,oob_score=True)
rfc.fit(disc,np.ravel(labelsimple))
predb=rfc.oob_decision_function_
predb=predb[:,1]
predb=map(evi,predb)
predb=pd.DataFrame(predb)
predb.to_csv("b.disc.bloc"+str(blocn),na_rep='NA',sep=" ",line_terminator=" ",header=False,index=False)
validb=rfc.predict_proba(valid)
validb=validb[:,1]
validb=map(evi,validb)
validb=pd.DataFrame(validb)
validb.to_csv("b.valid.bloc"+str(blocn),na_rep='NA',sep=" ",line_terminator=" ",header=False,index=False)
#This is to try and capture dominace effect. We concatenate the two haplotypes twice (in the two possible orders)
#and we take the mean of the prediction. d refers to PHd
swch=[1,-1]*len(disc)
swch=np.array(range(len(dischaps)))+swch
discd=np.concatenate((dischaps,dischaps[swch,:]),axis=1)
swch=[1,-1]*len(valid)
swch=np.array(range(len(validhaps)))+swch
validd=np.concatenate((validhaps,validhaps[swch,:]),axis=1)
rfc = RandomForestClassifier(n_estimators=500, max_features="auto", min_samples_leaf=min_samples_leaf,oob_score=True)
rfc.fit(dischaps,np.ravel(label))
predd=rfc.oob_decision_function_
predd=predd[:,1]
predd=map(evi,predd)
predd=np.array([predd[i] for i in range(0,len(predd),2)]) +np.array([predd[i] for i in range(1,len(predd),2)])
predd=pd.DataFrame(predd)
predd.to_csv("d.disc.bloc"+str(blocn),na_rep='NA',sep=" ",line_terminator=" ",header=False,index=False)
validd=rfc.predict_proba(validhaps)
validd=validd[:,1]
validd=map(evi,validd)
validd=np.array([validd[i] for i in range(0,len(validd),2)])+np.array([validd[i] for i in range(1,len(validd),2)])
validd=pd.DataFrame(validd)
validd.to_csv("d.valid.bloc"+str(blocn),na_rep='NA',sep=" ",line_terminator=" ",header=False,index=False)
| Python | 0.000008 | |
6a33fe22f3de00ada2650007731ff19803b60381 | Add script to compute efficiency from gsc parameters | projects/whydense/computation_table.py | projects/whydense/computation_table.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import click
import numpy as np
from tabulate import tabulate
from nupic.research.support import parse_config
STRIDE = 1
PADDING = 0
KERNEL_SIZE = 5
@click.command()
@click.option(
"-c",
"--config",
metavar="FILE",
type=open,
default="gsc/experiments.cfg",
show_default=True,
help="your experiments config file",
)
@click.option(
"-e",
"--experiment",
default=["denseCNN2", "sparseCNN2"],
multiple=True,
help="Selected 2 experiments to compare.",
)
@click.option(
"-f",
"--format",
"tablefmt",
help="Table format",
type=click.Choice(choices=["grid", "latex"]),
show_default=True,
default="grid",
)
@click.option(
"-l",
"--list",
"show_list",
is_flag=True,
help="show list of available experiments.",
)
def main(config, experiment, tablefmt, show_list):
assert len(experiment) == 2, "Select 2 experiments (denseCNN2, sparseCNN2)"
configs = parse_config(config, experiment, globals_param=globals())
if show_list:
print("Experiments:", list(configs.keys()))
return
# Sort with dense configurations first
configs = sorted(configs.items(),
key=lambda x: 0 if x[0].lower().startswith("dense") else 1)
params_table = [
[
" ",
"L1 non-zero multiplies",
"L2 non-zero multiplies",
"L3 non-zero multiplies",
"Output non-zero multiplies"
]
]
l1_ratio = l2_ratio = l3_ratio = output_ratio = 1.0
for name, params in configs:
input_shape = params["input_shape"]
input_c, height, width = input_shape
# CNN configuration
cnn_out_channels = params["cnn_out_channels"]
cnn_percent_on = params["cnn_percent_on"]
cnn_weight_sparsity = params["cnn_weight_sparsity"]
# Linear configuration
linear_n = params["linear_n"]
linear_percent_on = params["linear_percent_on"]
weight_sparsity = params["weight_sparsity"]
# Compute L1 non-zero weights
l1_out_c = cnn_out_channels[0]
l1_w = input_c * l1_out_c * KERNEL_SIZE * KERNEL_SIZE
l1_w = l1_w * cnn_weight_sparsity[0]
# L1 multiplies = input * L1 weights
l1_mul = np.prod(input_shape) * l1_w
# L1 Output after pool
l1_out_width = (width - KERNEL_SIZE + 1) / 2
l1_out_height = (height - KERNEL_SIZE + 1) / 2
l1_out = [l1_out_c, l1_out_height, l1_out_width]
# L1 activation after k-winner
l1_nnz_out = np.prod(l1_out) * cnn_percent_on[0]
# Compute L2 non-zero weights
l2_out_c = cnn_out_channels[1]
l2_w = l1_out_c * l2_out_c * KERNEL_SIZE * KERNEL_SIZE
l2_w = l2_w * cnn_weight_sparsity[1]
# L2 multiplies = L1 non-zero output * L2 weights
l2_mul = l1_nnz_out * l2_w
# L2 Output after pool
l2_out_height = (l1_out[1] - KERNEL_SIZE + 1) / 2
l2_out_width = (l1_out[2] - KERNEL_SIZE + 1) / 2
l2_out = [l2_out_c, l2_out_height, l2_out_width]
# L2 activation after k-winner
l2_nnz_out = np.prod(l2_out) * cnn_percent_on[1]
# Compute L3 non-zero weights
l3_w = np.prod(l2_out) * linear_n[0]
l3_w = l3_w * weight_sparsity[0]
# L3 multiplies = l2 non-zero output * L3 weights
l3_mul = l2_nnz_out * l3_w
# L3 Output
l3_out = linear_n[0]
l3_nnz_out = l3_out * linear_percent_on[0]
# Output layer multiplies = l3 non-zero output * weights
output_w = l3_out * params["num_classes"]
output_mul = l3_nnz_out * output_w
# Compute gain ratio against previous configuration
l1_ratio = l1_mul / l1_ratio
l2_ratio = l2_mul / l2_ratio
l3_ratio = l3_mul / l3_ratio
output_ratio = output_mul / output_ratio
params_table.append([name,
"{:,.0f}".format(l1_mul),
"{:,.0f}".format(l2_mul),
"{:,.0f}".format(l3_mul),
"{:,.0f}".format(output_mul)])
params_table.append(["Computation Efficiency",
"{:.0f} x".format(1.0 / l1_ratio),
"{:.0f} x".format(1.0 / l2_ratio),
"{:.0f} x".format(1.0 / l3_ratio),
"{:.0f} x".format(1 / output_ratio)])
print(tabulate(params_table, headers="firstrow", tablefmt=tablefmt,
stralign="center", floatfmt=",.0f"))
if __name__ == "__main__":
main()
| Python | 0 | |
681cc0a4160373fe82de59946b52e0e21611af84 | Print out all links on a page | linkLister.py | linkLister.py | import requests
import re
url = raw_input("Enter URL with http or https prefix : " )
print url
website= requests.get(url)
html = website.text
print html
linklist = re.findall('"((http|ftp)s?://.*?)"',html)
print linklist
for link in linklist:
print link[0]
| Python | 0 | |
6d4c3b77c9f0b4889ad5265113d9a87a0dc88377 | Add space in beused | src/ggrc/converters/errors.py | src/ggrc/converters/errors.py | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
WRONG_FILE_TYPE = (u"Line {line}: Wrong file type. Only .csv files are"
" supported. Please upload a .csv file.")
MISSING_COLUMN = (u"Line {line}: Missing mandatory column{s} {column_names},"
" when adding object.")
MISSING_CUSTOM_ATTRIBUTE_COLUMN = (u"Line {line}: Missing custom column"
" {column_name}, when adding object.")
WRONG_OBJECT_TYPE = "Line {line}: Unknown object type '{object_name}'"
UNKNOWN_COLUMN = (u"Line {line}: Attribute '{column_name}' does not"
" exist. Column will be ignored.")
DELETE_UNKNOWN_OBJECT = (u"Line {line}: Object '{slug}' does not exist, so it"
" can't be deleted.")
DUPLICATE_VALUE = (u"Line {line}: {column_name} '{title}' already exists."
"Record will be ignored.")
DUPLICATE_VALUE_IN_CSV = (u"Lines {line_list} have same {column_name}"
" '{value}'. Line{s} {ignore_lines} will be"
" ignored.")
MAP_UNMAP_CONFLICT = (u"Line {line}: Object '{slug}' scheduled for mapping and"
" unmapping at the same time. Mapping rule update will"
" be ignored.")
UNKNOWN_OBJECT = (u"Line {line}: {object_type} '{slug}' doesn't exist, so it"
" can't be mapped/unmapped.")
WHITESPACE_WARNING = (u"Line {line}: Field {column_name} contains multiple"
"spaces together, that will be merged into one.")
UNKNOWN_USER_WARNING = (u"Line {line}: Specified user '{email}' does not exist."
" That user will be ignored.")
UNKNOWN_USER_ERROR = (u"Specified user '{email}' does not exist. That user will"
" be ignored.")
OWNER_MISSING = (u"Line {line}: Owner field does not contain a valid owner."
" You will be assigned as object owner.")
WRONG_MULTI_VALUE = (u"Line {line}: {column_name} contains invalid line. The"
" value '{value}' will be ignored.")
WRONG_VALUE = (u"Line {line}: {column_name} contains invalid data. The value"
" will be ignored.")
WRONG_VALUE_ERROR = (u"Line {line}: {column_name} contains invalid data. The"
" line will be ignored.")
WRONG_REQUIRED_VALUE = (u"Line {line}: Required field {column_name} contains"
" invalid data '{value}'. The default value will be"
" used.")
MISSING_VALUE_ERROR = (u"Line {line}: Field {column_name} is required. The line"
" will be ignored.")
MAPPING_PERMISSION_ERROR = (u"Line {line}: You don't have permission to"
" map/unmap this record. Value {value} will be"
" ignored.")
PERMISSION_ERROR = (u"Line {line}: You don't have permission to update/delete"
" this record.")
MAPPING_PERMISSION_ERROR = (u"Line {line}: You don't have permission to update"
" mappings for {object_type}: {title} ({slug}).")
DELETE_NEW_OBJECT_ERROR = (u"Line {line}: Tried to create and delete the same"
" object {object_type}: {slug} in one import.")
DELETE_CASCADE_ERROR = (u"Line {line}: Cannot delete object {object_type}:"
" {slug} without deleting other objects")
UNKNOWN_ERROR = "Line {line}: Import failed due to unknown error."
| # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
WRONG_FILE_TYPE = (u"Line {line}: Wrong file type. Only .csv files are"
" supported. Please upload a .csv file.")
MISSING_COLUMN = (u"Line {line}: Missing mandatory column{s} {column_names},"
" when adding object.")
MISSING_CUSTOM_ATTRIBUTE_COLUMN = (u"Line {line}: Missing custom column"
" {column_name}, when adding object.")
WRONG_OBJECT_TYPE = "Line {line}: Unknown object type '{object_name}'"
UNKNOWN_COLUMN = (u"Line {line}: Attribute '{column_name}' does not"
" exist. Column will be ignored.")
DELETE_UNKNOWN_OBJECT = (u"Line {line}: Object '{slug}' does not exist, so it"
" can't be deleted.")
DUPLICATE_VALUE = (u"Line {line}: {column_name} '{title}' already exists."
"Record will be ignored.")
DUPLICATE_VALUE_IN_CSV = (u"Lines {line_list} have same {column_name}"
" '{value}'. Line{s} {ignore_lines} will be"
" ignored.")
MAP_UNMAP_CONFLICT = (u"Line {line}: Object '{slug}' scheduled for mapping and"
" unmapping at the same time. Mapping rule update will"
" be ignored.")
UNKNOWN_OBJECT = (u"Line {line}: {object_type} '{slug}' doesn't exist, so it"
" can't be mapped/unmapped.")
WHITESPACE_WARNING = (u"Line {line}: Field {column_name} contains multiple"
"spaces together, that will be merged into one.")
UNKNOWN_USER_WARNING = (u"Line {line}: Specified user '{email}' does not exist."
" That user will be ignored.")
UNKNOWN_USER_ERROR = (u"Specified user '{email}' does not exist. That user will"
" be ignored.")
OWNER_MISSING = (u"Line {line}: Owner field does not contain a valid owner."
" You will be assigned as object owner.")
WRONG_MULTI_VALUE = (u"Line {line}: {column_name} contains invalid line. The"
" value '{value}' will be ignored.")
WRONG_VALUE = (u"Line {line}: {column_name} contains invalid data. The value"
" will be ignored.")
WRONG_VALUE_ERROR = (u"Line {line}: {column_name} contains invalid data. The"
" line will be ignored.")
WRONG_REQUIRED_VALUE = (u"Line {line}: Required field {column_name} contains"
" invalid data '{value}'. The default value will be"
"used.")
MISSING_VALUE_ERROR = (u"Line {line}: Field {column_name} is required. The line"
" will be ignored.")
MAPPING_PERMISSION_ERROR = (u"Line {line}: You don't have permission to"
" map/unmap this record. Value {value} will be"
" ignored.")
PERMISSION_ERROR = (u"Line {line}: You don't have permission to update/delete"
" this record.")
MAPPING_PERMISSION_ERROR = (u"Line {line}: You don't have permission to update"
" mappings for {object_type}: {title} ({slug}).")
DELETE_NEW_OBJECT_ERROR = (u"Line {line}: Tried to create and delete the same"
" object {object_type}: {slug} in one import.")
DELETE_CASCADE_ERROR = (u"Line {line}: Cannot delete object {object_type}:"
" {slug} without deleting other objects")
UNKNOWN_ERROR = "Line {line}: Import failed due to unknown error."
| Python | 0.00469 |
51f8b228ff1096769a06b47d026e81a166503a82 | add missing unit tests for previous commit | pymatgen/util/tests/test_decorators.py | pymatgen/util/tests/test_decorators.py | import unittest
from pymatgen.util.decorators import lru_cache
class TestLRUCache(unittest.TestCase):
def test_function(self):
@lru_cache(2)
def cached_func(a, b):
return a + b
#call a few times to get some stats
self.assertEqual(cached_func(1, 2), 3)
self.assertEqual(cached_func(3, 2), 5)
self.assertEqual(cached_func(3, 2), 5)
self.assertEqual(cached_func(1, 2), 3)
self.assertEqual(cached_func(4, 2), 6)
self.assertEqual(cached_func(4, 2), 6)
self.assertEqual(cached_func(3, 2), 5)
self.assertEqual(cached_func(1, 2), 3)
self.assertEqual(cached_func.cache_info().hits, 3)
self.assertEqual(cached_func.cache_info().misses, 5)
def test_class_method(self):
class TestClass():
@lru_cache(10)
def cached_func(self, x):
return x
a = TestClass()
b = TestClass()
self.assertEqual(a.cached_func(1), 1)
self.assertEqual(b.cached_func(2), 2)
self.assertEqual(b.cached_func(3), 3)
self.assertEqual(a.cached_func(3), 3)
self.assertEqual(a.cached_func(1), 1)
self.assertEqual(a.cached_func.cache_info().hits, 1)
self.assertEqual(a.cached_func.cache_info().misses, 4)
| Python | 0 | |
3554160654a1cb8e7000ebeea06aecdabc91af8e | Create JustPremium.py | module/plugins/hooks/JustPremium.py | module/plugins/hooks/JustPremium.py | # -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: mazleu
"""
from module.plugins.Hook import Hook
from module.plugins.Account import Account
from module.plugins.Hoster import Hoster
class JustPremium(Hook):
__name__ = "JustPremium"
__version__ = "0.15"
__description__ = "If you add multiple links with at least one premium hoster link, all non premium links get removed"
__config__ = [("activated", "bool", "Activated", "False"),
("freehosters","bool", "Allow all freehosters and other unknown sites", "false"),
("nicehoster", "str", "unblock this hosters (comma seperated)", "Zippyshare.com")]
__author_name__ = ("mazleu")
__author_mail__ = ("mazleica@gmail.com")
event_list = ["linksAdded"]
def coreReady(self) :
accs=str(self.core.accountManager.getAccountInfos())
global badhosts
global hosts
hosts = ""
while "[{" in accs:
startid=accs.rfind("[], ", 0, accs.find("[{"))+2
endid=accs.find("}]",startid)+2
hosts=hosts+","+accs[startid+3:accs.find("'",startid+3)]
accs=accs[0:startid]+accs[endid:]
badhosts=accs.replace("': [], '",",")[2:-6]
hosts=hosts[1:]
hosts=hosts+","+self.getConfig("nicehoster")
self.logDebug("good hosts:",hosts)
self.logDebug("bad hosts:",badhosts)
def filterLinks(self, t):
links = self.core.api.checkURLs(t)
hosterlist =""
bhosters = [x.strip() for x in badhosts.split(",")]
ghosters = [x.strip() for x in hosts.split(",")]
premhoster = False
for hoster in links:
self.logDebug(hoster)
if hoster in ghosters:
premhoster = True
if premhoster :
for hoster in links:
if self.getConfig("freehosters"):
if hoster in bhosters:
for link in links[hoster]:
t.remove(link)
self.logDebug("removed link '%s'because hoster was: '%s' " % (link,hoster))
else:
if not hoster in ghosters:
for link in links[hoster]:
t.remove(link)
self.logDebug("removed link '%s'because hoster was: '%s' " % (link,hoster))
def linksAdded(self, links, pid):
self.filterLinks(links)
| Python | 0 | |
61a6f6468462ed5db6c8e6c55bf29f0c503ff899 | add solution for H-Index | algorithms/hIndex/hIndex.py | algorithms/hIndex/hIndex.py | class Solution(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
n = len(citations)
c = collections.Counter([min(x, n) for x in citations])
s = reduce(lambda a, x: a + [a[-1] + c[x]], reversed(range(n)), [c[n]])
return next((n-i for i, v in enumerate(s) if v >= n-i), 0)
| Python | 0.000002 | |
c2f0f5184665250949c32d16db0b521c357e3aa7 | Add solution to linkedListCycle problem. | python/src/linkedListCycle/linkedListCycle.py | python/src/linkedListCycle/linkedListCycle.py | # Given a linked list, determine if it has a cycle in it.
# Follow up:
# Can you solve it without using extra space?
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param head, a ListNode
# @return a boolean
def hasCycle0(self, head):
"""Solving the problem with a visited array: O(n^2) run time and
O(n) memory."""
visited = []
while head is not None:
if head in visited:
return True
visited.append(head)
head = head.next
return False
def hasCycle1(self, head):
"""Solving the problem iteratively with the tortise and the hare
pointers: O(n) run time and O(1) memory."""
if head is None or head.next is None:
return False
tortise = head.next
hare = head.next.next
while hare is not None and hare.next is not None:
if tortise == hare:
return True
else:
tortise = tortise.next
hare = hare.next.next
return False
def hasCycle(self, head):
"""Solving the problem recursively with the tortise and the hare
pointers: O(n) run time and O(1) memory."""
if head is None or head.next is None:
return False
else:
return self.hasCycleRecurse(head.next, head.next.next)
def hasCycleRecurse(self, tortise, hare):
"""Used in above recursive solution."""
if hare is None or hare.next is None:
return False
elif tortise == hare:
return True
else:
return self.hasCycleRecurse(tortise.next, hare.next.next)
| Python | 0 | |
2ab5d0bfdfe90279f3fffeeb51882cdbcb4e9135 | test genesis tests | tests/unit/modules/genesis_test.py | tests/unit/modules/genesis_test.py | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Rupesh Tare <rupesht@saltstack.com>`
'''
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
from salt.modules import genesis
# Globals
genesis.__grains__ = {}
genesis.__salt__ = {}
genesis.__context__ = {}
genesis.__opts__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class GenesisTestCase(TestCase):
'''
Test cases for salt.modules.genesis
'''
def test_bootstrap(self):
'''
Test for Create an image for a specific platform.
'''
mock = MagicMock(return_value=False)
with patch.dict(genesis.__salt__, {'file.directory_exists': mock}):
mock = MagicMock(side_effect=Exception('foo'))
with patch.dict(genesis.__salt__, {'file.mkdir': mock}):
self.assertEqual(genesis.bootstrap('platform', 'root'),
{'Error': "Exception('foo',)"})
with patch.object(genesis, '_bootstrap_yum', return_value='A'):
self.assertEqual(genesis.bootstrap('rpm', 'root', 'dir1'), 'A')
with patch.object(genesis, '_bootstrap_deb', return_value='A'):
self.assertEqual(genesis.bootstrap('deb', 'root', 'dir1'), 'A')
with patch.object(genesis, '_bootstrap_pacman', return_value='A'):
self.assertEqual(genesis.bootstrap('pacman', 'root', 'dir1'), 'A')
def test_avail_platforms(self):
'''
Test for Return which platforms are available
'''
self.assertFalse(genesis.avail_platforms()['deb'])
def test_pack(self):
'''
Test for Pack up a directory structure, into a specific format
'''
with patch.object(genesis, '_tar', return_value='tar'):
self.assertEqual(genesis.pack('name', 'root'), None)
def test_unpack(self):
'''
Test for Unpack an image into a directory structure
'''
with patch.object(genesis, '_untar', return_value='untar'):
self.assertEqual(genesis.unpack('name', 'root'), None)
if __name__ == '__main__':
from integration import run_tests
run_tests(GenesisTestCase, needs_daemon=False)
| Python | 0.000001 | |
4ec7abe5df2bdd4a68528fc9af14288b57fd72cc | add integration utest on Session | tests_with_openerp/test_session.py | tests_with_openerp/test_session.py | from unittest import TestCase
from anybox.recipe.odoo.runtime.session import Session
from openerp.tests.common import get_db_name
class SessionTestCase(TestCase):
def setUp(self):
super(SessionTestCase, self).setUp()
self.session = Session(None, None, parse_config=False)
def open_session(self):
self.session.open(db=get_db_name())
def test_env_after_install_module(self):
self.open_session()
self.assertAdminPresentWithV8API()
self.session.install_modules(['decimal_precision'])
self.assertAdminPresentWithV8API()
def assertAdminPresentWithV8API(self):
self.assertEqual(
u"Administrator",
self.session.env['res.users'].search([('login', '=', 'admin')]).name
)
| Python | 0 | |
da1bda146b4762bc572cb28da30cfb09b1d083aa | add hikvision (#243) | netdisco/discoverables/hikvision.py | netdisco/discoverables/hikvision.py | """Discover Hikvision cameras."""
from . import MDNSDiscoverable
class Discoverable(MDNSDiscoverable):
"""Add support for discovering Hikvision cameras."""
def __init__(self, nd):
"""Initialize Hikvision camera discovery."""
super(Discoverable, self).__init__(nd, '_http._tcp.local.')
def get_entries(self):
return self.find_by_device_name('HIKVISION')
| Python | 0 | |
273ab1b5f402e09a6f42fcfdb985fdf2dfa6b3ec | add test for program confirmation | web/impact/impact/tests/test_user_confirm_participation_view.py | web/impact/impact/tests/test_user_confirm_participation_view.py | from unittest.mock import patch
from django.urls import reverse
from accelerator.tests.contexts.context_utils import get_user_role_by_name
from accelerator.models import UserRole
from accelerator.tests.factories import (
ProgramFactory,
ProgramRoleFactory,
ProgramRoleGrantFactory,
UserFactory,
)
from impact.tests.api_test_case import APITestCase
from impact.v1.views import UserProgramConfirmationDetailView
from impact.v1.views.user_program_confirmation_detail_view import INVALID_INPUT_ERROR
IMPACT_BACKEND_PATH = 'impact.impact_email_backend.ImpactEmailBackend'
class TestUserConfirmParticipationView(APITestCase):
def setUp(self):
self.program = ProgramFactory()
ProgramRoleFactory(
user_role=get_user_role_by_name(UserRole.MENTOR),
program=self.program
)
ProgramRoleFactory(
user_role=get_user_role_by_name(UserRole.DEFERRED_MENTOR),
program=self.program
)
def test_user_get_program_participation_status(self):
user = self.basic_user()
deferred_program = _get_programrolegrant_for_role(
UserRole.DEFERRED_MENTOR, user).program_role.program
confirmed_program = _get_programrolegrant_for_role(
UserRole.MENTOR, user).program_role.program
with self.login(email=user.email):
url = reverse(UserProgramConfirmationDetailView.view_name,
args=[user.pk])
response = self.client.get(url)
expected = {
'confirmed': [confirmed_program.pk],
'deferred': [deferred_program.pk],
}
self.assertEqual(response.data['program_confirmation'], expected)
def test_user_post_confirmed_participation(self):
user = self.basic_user()
with self.login(email=user.email):
url = reverse(UserProgramConfirmationDetailView.view_name,
args=[user.pk])
self.client.post(url, {
'confirmed': [self.program.pk, ],
})
expected = _get_user_programs_for_role(
user, UserRole.MENTOR)
self.assertIn(self.program.pk, expected)
def test_user_post_deferred_participation(self):
user = self.basic_user()
with self.login(email=user.email):
url = reverse(UserProgramConfirmationDetailView.view_name,
args=[user.pk])
self.client.post(url, {
'deferred': [self.program.pk],
})
expected = _get_user_programs_for_role(
user, UserRole.DEFERRED_MENTOR)
self.assertIn(self.program.pk, expected)
def test_confirming_program_removes_deferred_status(self):
user = self.basic_user()
_get_programrolegrant_for_role(
UserRole.DEFERRED_MENTOR, user, self.program)
with self.login(email=user.email):
url = reverse(UserProgramConfirmationDetailView.view_name,
args=[user.pk])
self.client.post(url, {
'confirmed': [self.program.pk],
})
response = self.client.get(url)
expected = {
'deferred': [],
'confirmed': [self.program.pk]
}
self.assertEqual(response.data['program_confirmation'], expected)
def test_deferring_program_removes_confirmed_status(self):
user = self.basic_user()
_get_programrolegrant_for_role(
UserRole.MENTOR, user, self.program)
with self.login(email=user.email):
url = reverse(UserProgramConfirmationDetailView.view_name,
args=[user.pk])
self.client.post(url, {
'deferred': [self.program.pk],
})
response = self.client.get(url)
expected = {
'deferred': [self.program.pk],
'confirmed': []
}
self.assertEqual(response.data['program_confirmation'], expected)
def test_only_owner_can_access_participation_view(self):
non_owner_user = UserFactory()
with self.login(email=self.basic_user_without_api_groups().email):
url = reverse(UserProgramConfirmationDetailView.view_name,
args=[non_owner_user.pk])
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
def test_cannot_post_invalid_input(self):
user = self.basic_user()
with self.login(email=user.email):
url = reverse(UserProgramConfirmationDetailView.view_name,
args=[user.pk])
response = self.client.post(url, {
'deferred': ["test"],
})
expected = {
'detail': INVALID_INPUT_ERROR.format('deferred'),
}
self.assertEqual(response.data, expected)
@patch("django.core.mail.backends.smtp.EmailBackend.send_messages")
def test_email_is_sent_after_confirming_participation(
self,
mocked_backend):
user = self.basic_user()
with self.settings(
EMAIL_BACKEND=IMPACT_BACKEND_PATH):
with self.login(email=user.email):
url = reverse(UserProgramConfirmationDetailView.view_name,
args=[user.pk])
self.client.post(url, {
'confirmed': [self.program.pk],
})
self.assertTrue(mocked_backend.called)
def _get_user_programs_for_role(user, role):
return list(
user.programrolegrant_set.filter(
program_role__user_role__name=role,
).values_list('program_role__program', flat=True).distinct())
def _get_programrolegrant_for_role(role, user, program=None):
return ProgramRoleGrantFactory(
person=user,
program_role__program=program or ProgramFactory(),
program_role__user_role=get_user_role_by_name(role))
| Python | 0 | |
54181cf08a60a91abac3a5c7079e39d6496a4583 | Add unit tests for Binomial node | bayespy/inference/vmp/nodes/tests/test_binomial.py | bayespy/inference/vmp/nodes/tests/test_binomial.py | ######################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Unit tests for `binomial` module.
"""
import numpy as np
import scipy
from bayespy.nodes import Binomial
from bayespy.nodes import Beta
from bayespy.utils import utils
from bayespy.utils import random
from bayespy.utils.utils import TestCase
class TestBinomial(TestCase):
"""
Unit tests for Binomial node
"""
def test_init(self):
"""
Test the creation of binomial nodes.
"""
# Some simple initializations
X = Binomial(0.5, n=10)
X = Binomial(Beta([2,3]), n=10)
# Check that plates are correct
X = Binomial(0.7, n=10, plates=(4,3))
self.assertEqual(X.plates,
(4,3))
X = Binomial(0.7*np.ones((4,3)), n=10)
self.assertEqual(X.plates,
(4,3))
X = Binomial(0.7, n=10*np.ones((4,3), dtype=np.int))
self.assertEqual(X.plates,
(4,3))
X = Binomial(Beta([4,3], plates=(4,3)),
n=10)
self.assertEqual(X.plates,
(4,3))
# Missing the number of trials
self.assertRaises(ValueError,
Binomial,
0.5)
# Invalid probability
self.assertRaises(ValueError,
Binomial,
-0.5,
n=10)
self.assertRaises(ValueError,
Binomial,
1.5,
n=10)
# Invalid number of trials
self.assertRaises(ValueError,
Binomial,
0.5,
n=-1)
self.assertRaises(ValueError,
Binomial,
0.5,
n=8.5)
# Inconsistent plates
self.assertRaises(ValueError,
Binomial,
0.5*np.ones(4),
plates=(3,))
# Explicit plates too small
self.assertRaises(ValueError,
Binomial,
0.5*np.ones(4),
plates=(1,))
pass
def test_moments(self):
"""
Test the moments of binomial nodes.
"""
# Simple test
X = Binomial(0.7, n=1)
u = X._message_to_child()
self.assertEqual(len(u), 1)
self.assertAllClose(u[0],
0.7)
# Test n
X = Binomial(0.7, n=10)
u = X._message_to_child()
self.assertAllClose(u[0],
10*0.7)
# Test plates in p
n = np.random.randint(1, 10)
p = np.random.rand(3)
X = Binomial(p, n=n)
u = X._message_to_child()
self.assertAllClose(u[0],
p*n)
# Test plates in n
n = np.random.randint(1, 10, size=(3,))
p = np.random.rand()
X = Binomial(p, n=n)
u = X._message_to_child()
self.assertAllClose(u[0],
p*n)
# Test plates in p and n
n = np.random.randint(1, 10, size=(4,1))
p = np.random.rand(3)
X = Binomial(p, n=n)
u = X._message_to_child()
self.assertAllClose(u[0],
p*n)
# Test with beta prior
P = Beta([7, 3])
logp = P._message_to_child()[0]
p0 = np.exp(logp[0]) / (np.exp(logp[0]) + np.exp(logp[1]))
X = Binomial(P, n=1)
u = X._message_to_child()
self.assertAllClose(u[0],
p0)
# Test with broadcasted plates
P = Beta([7, 3], plates=(10,))
X = Binomial(P, n=5)
u = X._message_to_child()
self.assertAllClose(u[0] * np.ones(X.get_shape(0)),
5*p0*np.ones(10))
pass
| Python | 0 | |
f7b2b511bd6cca122782b39c9eb75ed4a4736717 | add benchmark | test/benchmark.py | test/benchmark.py | import urllib2
import json
url = "http://localhost:3000/api?package=com.whatsapp"
for i in range(5):
print 'Downloading '+ str(i)
res = urllib2.urlopen(url).read()
file = "data-"+str(i)+".json"
with open(file, 'w') as outfile:
json.dump(res, outfile)
| Python | 0.000002 | |
27622185e04bb652284597783287262e23bafa7d | Add minimal test case (failing) | plenum/test/node_request/test_apply_stashed_partially_ordered.py | plenum/test/node_request/test_apply_stashed_partially_ordered.py | import pytest
from plenum.common.constants import DOMAIN_LEDGER_ID
from plenum.common.startable import Mode
from plenum.common.txn_util import reqToTxn
from plenum.test.delayers import cDelay
from plenum.test.helper import sdk_get_and_check_replies, sdk_send_random_requests, logger
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.stasher import delay_rules
from plenum.test.test_node import getNonPrimaryReplicas
from stp_core.loop.eventually import eventually
TOTAL_REQUESTS = 10
@pytest.fixture(scope="module")
def tconf(tconf):
old_max_batch_wait = tconf.Max3PCBatchWait
old_max_batch_size = tconf.Max3PCBatchSize
tconf.Max3PCBatchWait = 1000
tconf.Max3PCBatchSize = TOTAL_REQUESTS
yield tconf
tconf.Max3PCBatchWait = old_max_batch_wait
tconf.Max3PCBatchSize = old_max_batch_size
def test_apply_stashed_partially_ordered(looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client):
test_node = getNonPrimaryReplicas(txnPoolNodeSet)[0].node
test_stasher = test_node.nodeIbStasher
ledger_size = max(node.domainLedger.size for node in txnPoolNodeSet)
def check_pool_ordered_some_requests():
assert max(node.domainLedger.size for node in txnPoolNodeSet) > ledger_size
def check_test_node_has_stashed_ordered_requests():
assert len(test_node.stashedOrderedReqs) > 0
with delay_rules(test_stasher, cDelay()):
reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, TOTAL_REQUESTS)
looper.run(eventually(check_pool_ordered_some_requests))
test_node.mode = Mode.syncing
looper.run(eventually(check_test_node_has_stashed_ordered_requests))
req_idr = test_node.stashedOrderedReqs[0].reqIdr
req_idr = req_idr[:len(req_idr) // 2]
assert len(req_idr) > 1
ledger_info = test_node.ledgerManager.getLedgerInfoByType(DOMAIN_LEDGER_ID)
for id in req_idr:
txn = reqToTxn(test_node.requests[id].finalised)
ledger_info.ledger.add(txn)
ledger_info.postTxnAddedToLedgerClbk(DOMAIN_LEDGER_ID, txn)
test_node.mode = Mode.participating
test_node.processStashedOrderedReqs()
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
sdk_get_and_check_replies(looper, reqs)
| Python | 0.000001 | |
95da7f3b6c03d3d8e711aea4195017a17cb63d5f | Add another version of write libsvm data format. | scripts/python/write_libsvm_data_format_v2.py | scripts/python/write_libsvm_data_format_v2.py | """
A script to write out lib svm expected data format from my collecting data
"""
import os
import sys
import csv
import json
import getopt
import subprocess
CMD_USAGE = """
usage: write_libsvm_data_format.py --inputs="/inputs/folder/" --output="/output/lib_svm_data" <options>
<options>:
-f, --features specify the feature space size, default is 10
-v, --verify the tool to verify output data no format error
"""
# by default feature space to be 10
FEATURE_SPACE = 10
LIMIT_SAMPLE_SIZE = 1000
def write_libsvm_data(input_files, output_file):
"""
:param input_files:
:param output_file:
"""
with open(output_file, 'wb') as output_csv_file:
output_writer = csv.writer(output_csv_file, delimiter=' ')
for input_file in input_files:
print "Write file: ", input_file
with open(input_file, 'rb') as signal_file:
json_data = json.load(signal_file)
beacons = json_data['interestedBeacons']
for beacon in beacons:
rssis = beacon['rssis']
length = len(rssis)
if length < FEATURE_SPACE:
continue
rows = 0
ending = FEATURE_SPACE
while ending <= length:
line = [beacon['realDistance']]
for idx, val in enumerate(rssis[(ending - FEATURE_SPACE):ending]):
if val != 0:
line.append(':'.join([str(idx + 1), str(val)]))
output_writer.writerow(line)
ending += FEATURE_SPACE
rows += 1
if rows >= LIMIT_SAMPLE_SIZE:
break
def check_data(check_tool, data_file):
"""
:param data_file: the input lib svm format data, to be verified.
"""
check_py = check_tool
if not os.path.exists(check_py):
print("checkdata.py not exist.")
return
subprocess.call([os.path.abspath(check_py), data_file], shell=True)
def main(argv):
"""
:param argv: command line arguments
:rtype : error code, success 0 and fail 1
"""
try:
optlist, _ = getopt.getopt(argv[1:], "hi:o:f:v:",
["help", "inputs=", "output=", "features=", "verify"])
except getopt.GetoptError:
print("Command line arguments error, please try --help for help")
return 1
inputs = ''
output_file = ''
check_tool = ''
for opt, opt_arg in optlist:
if opt in ("-h", "--help"):
print CMD_USAGE
return 0
if opt in ("-i", "--inputs"):
inputs = opt_arg
if not os.path.exists(inputs):
print("Input files folder not exist")
return 1
elif opt in ("-o", "--output"):
output_file = opt_arg
elif opt in ("-f", "--features"):
global FEATURE_SPACE
FEATURE_SPACE = int(opt_arg)
elif opt in ("-v", "--verify"):
check_tool = opt_arg
# print the messages
print("Inputs folder: " + inputs)
print("Output file: " + output_file)
print("Feature space size: " + str(FEATURE_SPACE))
print("Check tool: " + check_tool)
assert isinstance(output_file, str)
assert isinstance(inputs, str)
input_files = []
for root, _, files in os.walk(inputs):
for name in files:
if name.endswith('.raw_filter'):
input_files.append(os.path.abspath(os.path.join(root, name)))
if len(input_files) == 0:
print("No input files.")
return 1
write_libsvm_data(input_files, output_file)
check_data(check_tool, output_file)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv)) | Python | 0 | |
cc7eb329a7d132947861ca1f2d4713cba1e4274a | Add tests! | test_processor.py | test_processor.py | from ivl_enums import IvlElabType, IvlPortType, IvlDataDirection
from parsers import parse_modules_and_elabs
from utils import IvlNetManager
import pytest
import sure # noqa
@pytest.yield_fixture
def read_netlist():
# Read a netlist and parse it into modules and elabs.
# Create a new net manager.
with open('test.netlist') as f:
test_netlist = f.read()
net_manager = IvlNetManager()
modules, elabs = parse_modules_and_elabs(test_netlist, net_manager)
yield (modules, elabs, net_manager)
def test_counts(read_netlist):
# Make sure the right number of things are produced
modules, elabs, net_manager = read_netlist
len(modules).should.be.equal(6)
len(elabs).should.be.equal(27)
def test_types(read_netlist):
modules, elabs, net_manager = read_netlist
# Make sure the right types appear
len([m for m in modules if m.xtype == 'tff']).should.be.equal(3)
net_part_selects = [e for e in elabs if
e.xtype is IvlElabType.net_part_select]
len(net_part_selects).should.be.equal(18)
posedges = [e for e in elabs if e.xtype is IvlElabType.posedge]
len(posedges).should.be.equal(3)
logics = [e for e in elabs if e.xtype is IvlElabType.logic]
len(logics).should.be.equal(6)
def test_ports(read_netlist):
# Make sure ports are generated properly
modules, elabs, net_manager = read_netlist
tb = [m for m in modules if m.xtype == 'bargraph_testbench'][0]
len(tb.ports).should.be.equal(3)
regs = [p for p in tb.ports if p.xtype is IvlPortType.reg]
len(regs).should.be.equal(1)
wires = [p for p in tb.ports if p.xtype is IvlPortType.wire]
len(wires).should.be.equal(2)
def test_local_ports(read_netlist):
# Check for generation of local wire-type ports
modules, elabs, net_manager = read_netlist
bg = [m for m in modules if m.xtype == 'bargraph3'][0]
local_ports = [p for p in bg.ports if p.is_local]
len(local_ports).should.be.equal(15)
def test_port_types(read_netlist):
# Check for proper port typing
modules, elabs, net_manager = read_netlist
tff = [m for m in modules if m.xtype == 'tff'][0]
inputs = [p for p in tff.ports if
p.direction is IvlDataDirection.input]
len(inputs).should.be.equal(2)
outputs = [p for p in tff.ports if
p.direction is IvlDataDirection.output]
len(outputs).should.be.equal(2)
def test_nets(read_netlist):
# Check for proper net generation
modules, elabs, net_manager = read_netlist
to_bg = net_manager.get_net('0x7fbd08d0a950')
len(to_bg.members).should.be.equal(3)
| Python | 0 | |
823d10795b22b751647e79e77eecd381cf7a809d | create test file | test_threetaps.py | test_threetaps.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for threetaps."""
import unittest
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 | |
f9fd2e3dcc4c25fd7561f8898e3845992553a8a8 | add wrapper script to launch tests | tests/run.py | tests/run.py | #!/usr/bin/python
import os
root = os.path.join(os.path.dirname(__file__), '..')
prog = os.path.join(os.path.dirname(__file__), 'qdjango-tests')
path = []
for component in ['db', 'http', 'script']:
path.append(os.path.join(root, 'src', component))
os.system("LD_LIBRARY_PATH=%s %s" % (':'.join(path), prog))
| Python | 0.000001 | |
e8a6c0adc3aa77f8e0b1399fe076b43720acb823 | Test the API can run | tests/test_api.py | tests/test_api.py | # -*- coding: utf-8 -*-
import subprocess
import requests
from unittest import TestCase
from nose.tools import assert_equal
class Test(TestCase):
def setUp(self):
self.process = subprocess.Popen("openfisca-serve")
def tearDown(self):
self.process.terminate()
def test_response(self):
assert_equal(
requests.get("http://localhost:2000").status_code,
200
)
| Python | 0 | |
690c08b2b35df2d81dc0977d8bd593c45806e1c2 | Add dumb log view test cases | tests/test_log.py | tests/test_log.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from flask import url_for
def test_view_build_log(test_client):
test_client.get(url_for('log.build_log', sha='123456'))
def test_view_lint_log(test_client):
test_client.get(url_for('log.lint_log', sha='123456'))
| Python | 0 | |
31d8447c689616b5b8d1377461ab70c6ce6d6cb9 | Add doc2vecs module | thinc/doc2vecs.py | thinc/doc2vecs.py | from collections import defaultdict
from .base import Model
class SpacyWindowEncode(Model):
nr_piece = 3
nr_feat = 5
nr_out = None
nr_in = None
@property
def nr_weight(self):
nr_W = self.nr_out * self.nr_in
nr_b = self.nr_out
return self.nr_feat * self.nr_piece * (nr_W + nr_b)
def setup(self, *args, **kwargs):
self.data = None
self.W = None
if kwargs.get('W') is not None:
self.nr_out = kwargs.get('W').shape[0]
self.nr_piece = kwargs.get('W').shape[1]
self.nr_in = kwargs.get('W').shape[2]
if self.nr_out is not None and self.nr_in is not None \
and self.nr_piece is not None:
self.set_weights(initialize=True)
self.set_gradient()
if kwargs.get('W') is not None:
self.W[:] = kwargs.get('W')
if kwargs.get('b') is not None:
self.b[:] = kwargs.get('b')
def set_weights(self, data=None, initialize=True, example=None):
if data is None:
if self.data is None:
self.data = self.ops.allocate_pool(self.nr_weight,
name=(self.name, 'pool'))
data = self.data
self.W = data.allocate_shape((self.nr_out, self.nr_piece,
self.nr_feat, self.nr_in))
self.b = data.allocate_shape((self.nr_out, self.nr_piece))
if initialize:
scale = self.ops.xp.sqrt(2. / (self.W.shape[0] + self.W.shape[2] * self.W.shape[3]))
shape = (self.W.shape[0], self.W.shape[2], self.W.shape[3])
for i in range(self.nr_piece):
self.W[:,i] = self.ops.xp.random.uniform(-scale, scale, shape)
def set_gradient(self, data=None, initialize=False):
if data is None:
self.d_data = self.ops.allocate_pool(self.nr_weight,
name=(self.name, 'pool'))
else:
self.d_data = data
self.d_W = self.d_data.allocate_shape(self.W.shape)
self.d_b = self.d_data.allocate_shape(self.b.shape)
def predict_batch(self, X):
out, _ = self._forward(X)
out = self.ops.flatten(out)
return out
def begin_update(self, docs, dropout=0.0):
outputs, whiches = self._forward(docs)
flat_out = self.ops.flatten(outputs)
whiches = self.ops.flatten(whiches)
flat_out, bp_dropout = self.ops.dropout(flat_out, dropout,
inplace=True)
finish_update = self._get_finish_update(docs, flat_out, whiches)
return flat_out, bp_dropout(finish_update)
def add_vector(self, id_, shape, add_gradient=True):
pass
def get_vector(self, id_):
raise NotImplementedError
def _get_vector(self, word):
return word.orth, word.vector / (word.vector_norm or 1.)
def get_gradient(self, id_):
return None
def average_params(self, optimizer):
pass
def _forward(self, batch):
positions, vectors = self._get_positions(batch)
dotted = self._dot_ids(positions, vectors, [len(seq) for seq in batch])
out = [self.ops.allocate((len(x), self.nr_out)) for x in batch]
whiches = []
for i, cands in enumerate(dotted):
cands += self.b
which = self.ops.argmax(cands)
best = self.ops.take_which(cands, which)
out[i][:] = best
whiches.append(which)
return out, whiches
def _get_positions(self, batch):
ids = defaultdict(list)
vectors = {}
for i, doc in enumerate(batch):
for j, word in enumerate(doc):
key, vector = self._get_vector(word)
ids[key].append((i, j))
vectors[key] = vector
return ids, vectors
def _get_finish_update(self, docs, outputs, whiches):
def finish_update(gradients, optimizer=None, **kwargs):
all_inputs = self._get_all_inputs(docs)
all_gradients = self._get_all_gradients(gradients,
whiches)
if all_inputs.shape[0] == 0 or all_gradients.shape[0] == 0:
return None
self.d_b += all_gradients.sum(axis=0)
# Bop,Bfi->opfi
self.d_W += self.ops.batch_outer(all_gradients, all_inputs)
return None
return finish_update
def _dot_ids(self, ids, vectors, lengths):
out = [self.ops.allocate((length, self.nr_out, self.nr_piece))
for length in lengths]
for id_, egs in ids.items():
vector = vectors[id_]
if vector is None:
continue
# opFi,i->Fop
hidden = self.ops.xp.tensordot(self.W, vector, axes=[[3], [0]])
hidden = hidden.transpose((2, 0, 1))
for i, j in egs:
out_i = out[i]
if j >= 2 and (j+2) < lengths[i]:
out_i[j-2:j+3] += hidden
else:
if j >= 2:
out_i[j-2] += hidden[0]
if j >= 1:
out_i[j-1] += hidden[1]
out_i[j] += hidden[2]
if (j+1) < lengths[i]:
out_i[j+1] += hidden[3]
if (j+2) < lengths[i]:
out_i[j+2] += hidden[4]
return out
def _get_all_inputs(self, X):
total_length = sum(len(x) for x in X)
all_inputs = self.ops.allocate((total_length, 5, self.nr_in))
i = 0
for doc in X:
vectors = [self._get_vector(word)[1] for word in doc]
for vector in vectors:
if i >= 2:
all_inputs[i-2, 0] = vector
if i >= 1:
all_inputs[i-1, 1] = vector
all_inputs[i, 2] = vector
if (i+1) < total_length:
all_inputs[i+1, 3] = vector
if (i+2) < total_length:
all_inputs[i+2, 4] = vector
i += 1
return all_inputs
def _get_all_gradients(self, gradients, whiches):
all_gradients = self.ops.allocate((len(gradients), self.nr_out,
self.nr_piece))
for i in range(self.nr_piece):
all_gradients[:, :, i] += gradients * (whiches == i)
return all_gradients
| Python | 0 | |
4d500d9abe2da28cdd9bd95019048de445aac265 | Add a history demo in documentation. | docs/source/tutorial/v5/history_demo.py | docs/source/tutorial/v5/history_demo.py | # coding: utf-8
from deprecated.history import deprecated
from deprecated.history import versionadded
from deprecated.history import versionchanged
@deprecated(
reason="""
This is deprecated, really. So you need to use another function.
But I don\'t know which one.
- The first,
- The second.
Just guess!
""",
version='0.3.0')
@versionchanged(
reason='Well, I add a new feature in this function. '
'It is very useful as you can see in the example below, so try it. '
'This is a very very very very very long sentence.',
version='0.2.0')
@versionadded(
reason='Here is my new function.',
version='0.1.0')
def successor(n):
"""
Calculate the successor of a number.
:param n: a number
:return: number + 1
"""
return n + 1
help(successor)
| Python | 0 | |
361333f8b214097469389d0219f339fc59ea469b | Add permissions.py | teams/permisssions.py | teams/permisssions.py | from rest_framework.permissions import BasePermission
class IsOwnerPermission(BasePermission):
def has_permission(self, request, view):
return request.user.is_authenticated()
def has_object_permission(self, request, view, obj):
return request.user == obj.owner
| Python | 0.000001 | |
20ac8a830ef59abc51afe13ac102521767d47c22 | test uffd bad socket path scenarios | tests/integration_tests/functional/test_uffd.py | tests/integration_tests/functional/test_uffd.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Test UFFD related functionality when resuming from snapshot."""
import logging
import os
from framework.artifacts import SnapshotMemBackendType
from framework.builder import MicrovmBuilder, SnapshotBuilder
from framework.utils import run_cmd
import host_tools.network as net_tools
def create_snapshot(bin_cloner_path):
"""Create a snapshot of a microVM."""
vm_builder = MicrovmBuilder(bin_cloner_path)
vm_instance = vm_builder.build_vm_nano()
basevm = vm_instance.vm
root_disk = vm_instance.disks[0]
ssh_key = vm_instance.ssh_key
basevm.start()
ssh_connection = net_tools.SSHConnection(basevm.ssh_config)
# Verify if guest can run commands.
exit_code, _, _ = ssh_connection.execute_command("sync")
assert exit_code == 0
# Create a snapshot builder from a microvm.
snapshot_builder = SnapshotBuilder(basevm)
# Create base snapshot.
snapshot = snapshot_builder.create([root_disk.local_path()],
ssh_key)
basevm.kill()
return snapshot
def test_bad_socket_path(bin_cloner_path, test_microvm_with_api):
"""
Test error scenario when socket path does not exist.
@type: negative
"""
logger = logging.getLogger("uffd_bad_socket_path")
logger.info("Create snapshot")
snapshot = create_snapshot(bin_cloner_path)
logger.info("Load snapshot, mem %s", snapshot.mem)
vm = test_microvm_with_api
vm.spawn()
jailed_vmstate = vm.create_jailed_resource(snapshot.vmstate)
response = vm.snapshot.load(
mem_backend={
'type': SnapshotMemBackendType.UFFD,
'path': 'inexsistent'
},
snapshot_path=jailed_vmstate
)
assert vm.api_session.is_status_bad_request(response.status_code)
assert "Load microVM snapshot error: Cannot connect to UDS in order to " \
"send information on handling guest memory page-faults due to: " \
"No such file or directory (os error 2)" in response.text
def test_unbinded_socket(bin_cloner_path, test_microvm_with_api):
"""
Test error scenario when PF handler has not yet called bind on socket.
@type: negative
"""
logger = logging.getLogger("uffd_unbinded_socket")
logger.info("Create snapshot")
snapshot = create_snapshot(bin_cloner_path)
logger.info("Load snapshot, mem %s", snapshot.mem)
vm = test_microvm_with_api
vm.spawn()
jailed_vmstate = vm.create_jailed_resource(snapshot.vmstate)
socket_path = os.path.join(vm.path, "firecracker-uffd.sock")
run_cmd("touch {}".format(socket_path))
jailed_sock_path = vm.create_jailed_resource(socket_path)
response = vm.snapshot.load(
mem_backend={
'type': SnapshotMemBackendType.UFFD,
'path': jailed_sock_path
},
snapshot_path=jailed_vmstate
)
assert vm.api_session.is_status_bad_request(response.status_code)
assert "Load microVM snapshot error: Cannot connect to UDS in order to" \
" send information on handling guest memory page-faults due to: " \
"Connection refused (os error 111)" in response.text
| Python | 0 | |
7cb839279bc62b95eb7367814ef71c046d4b2184 | Add 'examples' module which contains some examplary function examples. | tssim/examples.py | tssim/examples.py | """This module contains example time functions"""
import numpy as np
def rand_lin_noise():
beta = np.random.normal()
return lambda x: beta * x + np.random.random(size=len(x))
def const_lin_noise(x):
beta = np.random.normal()
return beta * x + np.random.random(size=len(x))
def random_walk(x):
return np.cumsum(np.random.normal(size=x.shape[0]))
def random_walk_limit(limit=2):
vals = {"current": 0}
def walk(value):
new_val = np.random.normal()
if vals["current"] >= limit:
new_val = -abs(new_val)
elif vals["current"] <= -limit:
new_val = abs(new_val)
vals["current"] += new_val
return vals["current"]
return walk
| Python | 0 | |
c156ad1379d842924b928c6c80f668f9875e840a | Remove page-filter flag. (which is now user-filter) | tools/telemetry/telemetry/story/story_filter.py | tools/telemetry/telemetry/story/story_filter.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import re
from telemetry.internal.util import command_line
class _StoryMatcher(object):
def __init__(self, pattern):
self._regex = None
self.has_compile_error = False
if pattern:
try:
self._regex = re.compile(pattern)
except re.error:
self.has_compile_error = True
def __nonzero__(self):
return self._regex is not None
def HasMatch(self, story):
return self and bool(
self._regex.search(story.display_name) or
(story.name and self._regex.search(story.name)))
class _StoryLabelMatcher(object):
def __init__(self, labels_str):
self._labels = labels_str.split(',') if labels_str else None
def __nonzero__(self):
return self._labels is not None
def HasLabelIn(self, story):
return self and bool(story.labels.intersection(self._labels))
class StoryFilter(command_line.ArgumentHandlerMixIn):
"""Filters stories in the story set based on command-line flags."""
@classmethod
def AddCommandLineArgs(cls, parser):
group = optparse.OptionGroup(parser, 'User story filtering options')
group.add_option('--story-filter',
help='Use only stories whose names match the given filter regexp.')
group.add_option('--story-filter-exclude',
help='Exclude stories whose names match the given filter regexp.')
group.add_option('--story-label-filter',
help='Use only stories that have any of these labels')
group.add_option('--story-label-filter-exclude',
help='Exclude stories that have any of these labels')
parser.add_option_group(group)
@classmethod
def ProcessCommandLineArgs(cls, parser, args):
cls._include_regex = _StoryMatcher(args.story_filter)
cls._exclude_regex = _StoryMatcher(args.story_filter_exclude)
cls._include_labels = _StoryLabelMatcher(args.story_label_filter)
cls._exclude_labels = _StoryLabelMatcher(args.story_label_filter_exclude)
if cls._include_regex.has_compile_error:
raise parser.error('--story-filter: Invalid regex.')
if cls._exclude_regex.has_compile_error:
raise parser.error('--story-filter-exclude: Invalid regex.')
@classmethod
def IsSelected(cls, story):
# Exclude filters take priority.
if cls._exclude_labels.HasLabelIn(story):
return False
if cls._exclude_regex.HasMatch(story):
return False
if cls._include_labels and not cls._include_labels.HasLabelIn(story):
return False
if cls._include_regex and not cls._include_regex.HasMatch(story):
return False
return True
| # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import re
from telemetry.internal.util import command_line
class _StoryMatcher(object):
def __init__(self, pattern):
self._regex = None
self.has_compile_error = False
if pattern:
try:
self._regex = re.compile(pattern)
except re.error:
self.has_compile_error = True
def __nonzero__(self):
return self._regex is not None
def HasMatch(self, story):
return self and bool(
self._regex.search(story.display_name) or
(story.name and self._regex.search(story.name)))
class _StoryLabelMatcher(object):
def __init__(self, labels_str):
self._labels = labels_str.split(',') if labels_str else None
def __nonzero__(self):
return self._labels is not None
def HasLabelIn(self, story):
return self and bool(story.labels.intersection(self._labels))
class StoryFilter(command_line.ArgumentHandlerMixIn):
"""Filters stories in the story set based on command-line flags."""
@classmethod
def AddCommandLineArgs(cls, parser):
group = optparse.OptionGroup(parser, 'User story filtering options')
group.add_option('--story-filter',
help='Use only stories whose names match the given filter regexp.')
group.add_option('--page-filter', dest='story_filter',
help='Deprecated. Use --story-filter instead.')
group.add_option('--story-filter-exclude',
help='Exclude stories whose names match the given filter regexp.')
group.add_option('--story-label-filter',
help='Use only stories that have any of these labels')
group.add_option('--story-label-filter-exclude',
help='Exclude stories that have any of these labels')
parser.add_option_group(group)
@classmethod
def ProcessCommandLineArgs(cls, parser, args):
cls._include_regex = _StoryMatcher(args.story_filter)
cls._exclude_regex = _StoryMatcher(args.story_filter_exclude)
cls._include_labels = _StoryLabelMatcher(args.story_label_filter)
cls._exclude_labels = _StoryLabelMatcher(args.story_label_filter_exclude)
if cls._include_regex.has_compile_error:
raise parser.error('--story-filter: Invalid regex.')
if cls._exclude_regex.has_compile_error:
raise parser.error('--story-filter-exclude: Invalid regex.')
@classmethod
def IsSelected(cls, story):
# Exclude filters take priority.
if cls._exclude_labels.HasLabelIn(story):
return False
if cls._exclude_regex.HasMatch(story):
return False
if cls._include_labels and not cls._include_labels.HasLabelIn(story):
return False
if cls._include_regex and not cls._include_regex.HasMatch(story):
return False
return True
| Python | 0 |
c39c086f51963678769c1066637ca573c721e827 | Create a simple static gallery script. | static_gallery.py | static_gallery.py | from . import flag
#from go import html
from go import os
from go import path/filepath
def ReadAlbumDirs(input_dir):
f = os.Open(input_dir)
with defer f.Close():
names = f.Readdirnames(-1)
for name in names:
stat = os.Stat(filepath.Join(input_dir, name))
if stat.IsDir():
yield name
def RenderDir(album_names, output_dir):
index = filepath.Join(output_dir, 'index.html')
f = os.Create(index)
with defer f.Close():
f.Write('<html><body><h3>Gallery %s</h3> <ul>\n' % output_dir)
for name in album_names:
f.Write('<li><a href="%s">%q</a></li>\n' % (name, name))
def ReadPhotosInDir(input_dir):
f = os.Open(input_dir)
with defer f.Close():
names = f.Readdirnames(-1)
for name in names:
stat = os.Stat(filepath.Join(input_dir, name))
if stat.IsDir() == False:
yield name
def RenderAlbum(photo_names, output_dir):
index = filepath.Join(output_dir, 'index.html')
f = os.Create(index)
with defer f.Close():
f.Write('<html><body><h3>Album %s</h3> <ul>\n' % output_dir)
for name in photo_names:
f.Write('<li><a href="%s"><img src="%s" /></a></li>\n' % (name, name))
def LinkPhotos(photo_names, input_dir, output_dir):
for photo in photo_names:
photo_orig = filepath.Join(input_dir, photo)
photo_dest = filepath.Join(output_dir, photo)
os.Link(photo_orig, photo_dest)
input_dir = flag.String('input', '', 'The input directory.')
output_dir = flag.String('output', '', 'The output directory.')
def main(argv):
argv = flag.Munch(argv)
album_dirs = list(ReadAlbumDirs(input_dir.X))
RenderDir(album_dirs, output_dir.X)
for dir in album_dirs:
photo_dir = filepath.Join(input_dir.X, dir)
output_dir = filepath.Join(output_dir.X, dir)
photos = list(ReadPhotosInDir(photo_dir))
os.MkdirAll(output_dir, os.ModePerm)
RenderAlbum(photos, output_dir)
LinkPhotos(photos, photo_dir, output_dir)
| Python | 0 | |
f083789e5615d15715f49a7dbdb25505aa5efae2 | Initialize P1_assignChores | books/AutomateTheBoringStuffWithPython/Chapter16/PracticeProjects/P1_assignChores.py | books/AutomateTheBoringStuffWithPython/Chapter16/PracticeProjects/P1_assignChores.py | # Write a program that takes a list of people’s email addresses and a list of chores
# that need to be done and randomly assigns chores to people. Email each person their
# assigned chores.
#
# If you’re feeling ambitious, keep a record of each person’s previously assigned
# chores so that you can make sure the program avoids assigning anyone the same chore
# they did last time.
#
# For another possible feature, schedule the program to run once a week automatically.
| Python | 0.000124 | |
54a9b637aad85a20f3e865185ffed0abfd4192cd | Create tutorial4.py | tutorial4.py | tutorial4.py | from ggame import App, RectangleAsset, ImageAsset, Sprite, LineStyle, Color, Frame
SCREEN_WIDTH = 640
SCREEN_HEIGHT = 480
class SpaceShip(Sprite):
"""
Animated space ship
"""
asset = ImageAsset("images/four_spaceship_by_albertov_with_thrust.png",
Frame(227,0,292-227,125), 4, 'vertical')
def __init__(self, position):
super().__init__(SpaceShip.asset, position)
class SpaceGame(App):
"""
Tutorial4 space game example.
"""
def __init__(self, width, height):
super().__init__(width, height)
# Background
black = Color(0, 1)
noline = LineStyle(0, black)
bg_asset = RectangleAsset(SCREEN_WIDTH, SCREEN_HEIGHT, noline, black)
bg = Sprite(bg_asset, (0,0))
myapp = SpaceGame(SCREEN_WIDTH, SCREEN_HEIGHT)
myapp.run()
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.