commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
7b7aa0506d8a9c2f12a41a3bcff84645dfb1e9b5 | Create ScikitSVM.py | vidhyal/WitchMusic | Scikit/ScikitSVM.py | Scikit/ScikitSVM.py | import numpy as np
from sklearn import svm
from sklearn.metrics import *
from sklearn import cross_validation
from sklearn.cross_validation import KFold
from BalanceData import *
import matplotlib.pyplot as plt
cParam = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
kFold =10
def ShuffleTrainFeatures(feats, labs):
tempArr = np.c_[feats.reshape(len(feats),-1), labs.reshape(len(labs), -1)]
np.random.shuffle(tempArr)
a2 = tempArr[:,:feats.size//len(feats)].reshape(feats.shape)
b2 = tempArr[:,feats.size//len(feats):].reshape(labs.shape)
return a2, b2
def runkFoldCrossValidation(features, labels, model):
#scores = {}
scores =[]
kf = KFold(len(features), kFold , shuffle=True)
for k, (train, test) in enumerate(kf):
model.C = cParam[k]
model.fit(features[train], labels[train])
score = model.score(features[test], labels[test])
print (k, model.C, score)
scores.append(score)
plt.figure(figsize=(4, 3))
plt.semilogx(cParam, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(cParam, np.array(scores) + np.array(scores.std()) / np.sqrt(len(features)),
'b--')
plt.semilogx(cParam, np.array(scores) - np.array(scores.std()) / np.sqrt(len(features)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
index = getMaxIndex(scores)
return index
def getMaxIndex(scores):
maxVal =0
maxIndex = 0
for c in range(len(scores)):
if maxVal < scores[c]:
maxVal = scores[c]
maxIndex = c
print maxIndex
return maxIndex
rootdir = os.getcwd()
if not os.path.exists('sklearnTry'):
os.makedirs('sklearnTry')
newdir = os.path.join(rootdir,'sklearnTry')
fout = open(os.path.join(newdir,'SVMOut'),'w+')
train_features, train_labels, test_features, test_labels, test_keys = GetData()
train_features, train_labels = ShuffleTrainFeatures(train_features, train_labels)
model = svm.SVC(decision_function_shape ='ovr', class_weight='balanced' )
c = runkFoldCrossValidation(train_features, train_labels, model)
#c =0.8
model.set_params( C = c)
model.fit(train_features, train_labels)
pred = model.predict(test_features)
predictProb = model.decision_function(test_features)
train_acc = (model.score(train_features, train_labels))
line = str(train_acc )+"\n"
print train_acc
fout.write(line)
for key in range(len(test_keys)):
line = test_keys[key]+"\t"
for f in range(len(predictProb[key])):
line +="%i:%f\t" % (f+1 , predictProb[key][f])
line += "\n"
fout.write(line)
fout.close()
accuracy = accuracy_score(test_labels, pred)
print confusion_matrix(test_labels, pred)
print accuracy
| mit | Python | |
94d7a3b01b7360001817ef3ed3ad2003f0722b14 | Add complex parse scenario - environment variables and init | melkamar/webstore-manager,melkamar/webstore-manager | tests/script_parser/test_parsing_complex.py | tests/script_parser/test_parsing_complex.py | from script_parser import parser
import os
def test_environ_init():
""" Set up variables in environment and check parser uses them to init properly. """
os.environ['client_id'] = 'x'
os.environ['client_secret'] = 'y'
os.environ['refresh_token'] = 'z'
p = parser.Parser(['chrome.init ${env.client_id} ${env.client_secret} ${env.refresh_token}'])
p.execute()
assert p.variables['client_id'] == 'x'
assert p.variables['client_secret'] == 'y'
assert p.variables['refresh_token'] == 'z'
| mit | Python | |
569a7cd00af6d7d0fe43427982b54f3cab81ca4f | add test-fallback-socks5-proxy.py | Ziemin/telepathy-gabble,community-ssu/telepathy-gabble,community-ssu/telepathy-gabble,mlundblad/telepathy-gabble,Ziemin/telepathy-gabble,Ziemin/telepathy-gabble,Ziemin/telepathy-gabble,mlundblad/telepathy-gabble,jku/telepathy-gabble,mlundblad/telepathy-gabble,community-ssu/telepathy-gabble,jku/telepathy-gabble,jku/telepathy-gabble,community-ssu/telepathy-gabble | tests/twisted/test-fallback-socks5-proxy.py | tests/twisted/test-fallback-socks5-proxy.py | import dbus
from gabbletest import exec_test, make_result_iq, elem, elem_iq, sync_stream
from servicetest import call_async, EventPattern
from twisted.words.xish import domish, xpath
import ns
import constants as cs
from bytestream import create_from_si_offer, BytestreamS5B
# FIXME: stolen from offer-private-dbus-tube
def make_caps_disco_reply(stream, req, features):
iq = make_result_iq(stream, req)
query = iq.firstChildElement()
for f in features:
el = domish.Element((None, 'feature'))
el['var'] = f
query.addChild(el)
return iq
# FIXME: stolen from offer-private-dbus-tube
def make_presence(fromjid, tojid, caps=None):
el = domish.Element(('jabber:client', 'presence',))
el['from'] = fromjid
el['to'] = tojid
if caps:
cel = domish.Element(('http://jabber.org/protocol/caps', 'c'))
for key,value in caps.items():
cel[key] = value
el.addChild(cel)
return el
def test(q, bus, conn, stream):
conn.Connect()
# discard activities request and status change
_, e = q.expect_many(
EventPattern('dbus-signal', signal='StatusChanged', args=[0, 1]),
EventPattern('stream-iq', to='fallback-proxy.localhost', iq_type='get', query_ns=ns.BYTESTREAMS))
reply = elem_iq(stream, 'result', id=e.stanza['id'])(
elem(ns.BYTESTREAMS, 'query')(
elem('streamhost', jid='fallback-proxy.localhost', host='127.0.0.1', port='12345')()))
stream.send(reply)
# Offer a private D-Bus tube just to check if the proxy is present in the
# SOCKS5 offer
requestotron = dbus.Interface(conn, cs.CONN_IFACE_REQUESTS)
# Send Alice's presence
caps = { 'ext': '', 'ver': '0.0.0',
'node': 'http://example.com/fake-client0' }
presence = make_presence('alice@localhost/Test', 'test@localhost', caps)
stream.send(presence)
disco_event = q.expect('stream-iq', to='alice@localhost/Test',
query_ns=ns.DISCO_INFO)
stream.send(make_caps_disco_reply(stream, disco_event.stanza, [ns.TUBES]))
sync_stream(q, stream)
path, props = requestotron.CreateChannel({cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_DBUS_TUBE,
cs.TARGET_HANDLE_TYPE: cs.HT_CONTACT,
cs.TARGET_ID: 'alice@localhost',
cs.DBUS_TUBE_SERVICE_NAME: 'com.example.TestCase'})
tube_chan = bus.get_object(conn.bus_name, path)
dbus_tube_iface = dbus.Interface(tube_chan, cs.CHANNEL_TYPE_DBUS_TUBE)
dbus_tube_iface.OfferDBusTube({})
e = q.expect('stream-iq', to='alice@localhost/Test')
bytestream, profile = create_from_si_offer(stream, q, BytestreamS5B, e.stanza,
'test@localhost/Resource')
# Alice accepts the tube
result, si = bytestream.create_si_reply(e.stanza)
si.addElement((ns.TUBES, 'tube'))
stream.send(result)
e = q.expect('stream-iq', to='alice@localhost/Test')
found = False
nodes = xpath.queryForNodes('/iq/query/streamhost', e.stanza)
for node in nodes:
if node['jid'] == 'fallback-proxy.localhost':
found = True
assert node['host'] == '127.0.0.1'
assert node['port'] == '12345'
break
assert found
conn.Disconnect()
q.expect('dbus-signal', signal='StatusChanged', args=[2, 1])
return True
if __name__ == '__main__':
exec_test(test, params={'fallback-socks5-proxy': 'fallback-proxy.localhost'})
| lgpl-2.1 | Python | |
76d781b83e1ab124cf1ae405a6cd5ad5274eef2d | add 3d conv benchmark | diogo149/treeano,jagill/treeano,nsauder/treeano,jagill/treeano,diogo149/treeano,nsauder/treeano,diogo149/treeano,nsauder/treeano,jagill/treeano | benchmarks/conv_3d.py | benchmarks/conv_3d.py | import numpy as np
import theano
import theano.tensor as T
import treeano.nodes as tn
fX = theano.config.floatX
# TODO change me
conv3d_node = tn.Conv3DNode
# conv3d_node = tn.DnnConv3DNode
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(1, 1, 32, 32, 32)),
conv3d_node("conv", num_filters=32, filter_size=(3, 3, 3)),
tn.DnnMeanPoolNode("pool", pool_size=(30, 30, 30))]
).network()
fn = network.function(["i"], ["s"])
x = np.random.randn(1, 1, 32, 32, 32).astype(fX)
"""
20150916 results:
%timeit fn(x)
Conv3DNode => 86.2 ms
DnnConv3DNode => 1.85 ms
"""
| apache-2.0 | Python | |
76cc1118940850aed26f1d098ae8e23c2e17956d | create obfuscateion command | ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend | cla_backend/apps/legalaid/management/commands/obfuscate.py | cla_backend/apps/legalaid/management/commands/obfuscate.py | # -*- coding: utf-8 -*-
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
"""
Obfuscate all sensitive data in db
Personal details:
full_name
postcode
street
mobile_phone
home_phone
email
date_of_birth
ni_number
diversity
search_field
Third Party details:
pass_phrase
reason
personal_relationship
personal_relationship_note
organisation_name
Eligibility check:
notes
Case:
notes
provider_notes
search_field
Case Note History:
operator_notes
provider_notes
EOD details:
notes
Complaint:
description
"""
help = ('Obfuscate all sensitive data in the database')
def handle_noargs(self, *args, **kwargs):
pass
| mit | Python | |
0baf7e8863f7064dd0159c29616dd8b4751db15a | Test for issue #3. SQLite backend is not thread safe | femtotrader/requests-cache,YetAnotherNerd/requests-cache,reclosedev/requests-cache | tests/test_thread_safety.py | tests/test_thread_safety.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Path hack
import os, sys
sys.path.insert(0, os.path.abspath('..'))
from threading import Thread
import unittest
import requests
import requests_cache
CACHE_BACKEND = 'sqlite'
CACHE_NAME = 'requests_cache_test'
class ThreadSafetyTestCase(unittest.TestCase):
def test_caching_with_threads(self):
requests_cache.configure(CACHE_NAME, CACHE_BACKEND)
requests_cache.clear()
n = 5
url = 'http://httpbin.org/get'
def do_requests(url, params):
for i in range(10):
requests.get(url, params=params)
threads = [Thread(target=do_requests, args=(url, {'param': i})) for i in range(n)]
for t in threads:
t.start()
for t in threads:
t.join()
for i in range(n):
self.assert_(requests_cache.has_url('%s?param=%s' % (url, i)))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | Python | |
ceec6416f892a600adc41c0ee5bc75f7b340e200 | Deploy fabric task. | marcelor/xchange | fabfile.py | fabfile.py | import os
from fabric.api import run, cd
from fabric.contrib.console import confirm
from fabric.decorators import hosts
PROJECT_DIR = os.path.dirname(__file__)
@hosts('asimo@xchange.asimo.webfactional.com')
def deploy_production():
if confirm('This action will deploy to the site xchange.asimo.webfactional.com, are you sure?', default=True):
with cd('/home/asimo/webapps/xchange_flask/xchange/'):
run('git pull')
run('pip-2.7 install -r requirements.txt', pty=True)
with cd('/home/asimo/webapps/xchange_flask/'):
run('apache2/bin/restart') | mit | Python | |
d9720f1fcc3013324c9ea58620df9c458a2e314e | Add (broken) AWc test for debugging purposes | FInAT/FInAT | test/test_awc.py | test/test_awc.py | import pytest
import FIAT
import finat
import numpy as np
from gem.interpreter import evaluate
from fiat_mapping import MyMapping
def test_morley():
ref_cell = FIAT.ufc_simplex(2)
ref_element = finat.ArnoldWinther(ref_cell, 3)
ref_pts = finat.point_set.PointSet(ref_cell.make_points(2, 0, 3))
phys_cell = FIAT.ufc_simplex(2)
phys_cell.vertices = ((0.0, 0.1), (1.17, -0.09), (0.15, 1.84))
mppng = MyMapping(ref_cell, phys_cell)
z = (0, 0)
finat_vals_gem = ref_element.basis_evaluation(0, ref_pts, coordinate_mapping=mppng)[z]
finat_vals = evaluate([finat_vals_gem])[0].arr
phys_cell_FIAT = FIAT.ArnoldWinther(phys_cell, 3)
phys_points = phys_cell.make_points(2, 0, 3)
phys_vals = phys_cell_FIAT.tabulate(0, phys_points)[z]
phys_vals = phys_vals[:24].transpose((3, 0, 1, 2))
assert(np.allclose(finat_vals, phys_vals))
| mit | Python | |
e307dcd0a7e4ff6bdeeca65ae2fc09577fdecb1e | add anti hot-linking test | PegasusWang/wechannel,PegasusWang/wechannel,PegasusWang/wechannel,PegasusWang/wechannel | test/test_img.py | test/test_img.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""防盗链测试"""
from requests import get
url = 'http://read.html5.qq.com/image?src=forum&q=5&r=0&imgflag=7&imageUrl=http://mmbiz.qpic.cn/mmbiz/zYJiboYpSP4dxQ9bUDia7tXvc5xwAtibkff3wSPicGwdWAM1z9j8G5ajohicO5b46ePmv3ibxqRpnp7KfQtvKAR6zQlg/0?wx_fmt=jpeg'
def test_my():
#H = {'Referer': 'http://wechannel.io/'}
H = {'Referer': 'http://104.238.149.32/'}
r = get(url, headers=H)
with open('my.jpeg', 'wb') as f:
f.write(r.content)
def test_men():
H = {'Referer': 'http://chuansong.me:8888/'}
r = get(url, headers=H)
with open('men.jpeg', 'wb') as f:
f.write(r.content)
if __name__ == '__main__':
test_my()
test_men()
| mit | Python | |
a146670645eb5d225b82c56a580896fea3b6d32a | add pyfund1_sol | NYUDataBootcamp/Materials | Code/Python/pract_fund1_sol.py | Code/Python/pract_fund1_sol.py | """
Practice problems, Python fundamentals 1 -- Solutions
@authors: Balint Szoke, Daniel Csaba
@date: 06/02/2017
"""
#-------------------------------------------------------
# 1) Solution
good_string = "Sarah's code"
#or
good_string = """Sarah's code"""
#-------------------------------------------------------
# 2) Solution
i = 1234
list(str(i))
#-------------------------------------------------------
# 3) Solution
year = '2016'
next_year = str(int(year) + 1)
#-------------------------------------------------------
# 4) Solution
x, y = 3, 'hello'
print(x, y)
z = x
x = y
y = z
print(x, y)
#-------------------------------------------------------
# 5) Solution
name = 'Jones'
print(name.upper())
#-------------------------------------------------------
# 6) Solution
name = 'Ulysses'
print(name.count('s'))
#-------------------------------------------------------
# 7) Solution
long_string = 'salamandroid'
long_string = long_string.replace('a', '*')
print(long_string)
#-------------------------------------------------------
# 8) Solution
ll = [1, 2, 3, 4, 5]
ll.reverse()
print(ll)
#ll.pop(1)
# or better
ll.pop(ll.index(4))
print(ll)
ll.append(1.5)
print(ll)
ll.sort()
print(ll)
#%% #-------------------------------------------------------
# 9) Solution
number = "32,054.23"
number_no_comma = number.replace(',', '')
number_float = float(number_no_comma)
print(number_float)
#or
print(float(number.replace(',', '')))
#-------------------------------------------------------
# 10) Solution
firstname_lastname = 'john_doe'
firstname, lastname = firstname_lastname.split('_')
Firstname = firstname.capitalize()
Lastname = lastname.capitalize()
print(Firstname, Lastname)
#-------------------------------------------------------
# 11-12) Solution
l = [0, 1, 2, 4, 5]
index = l.index(4)
l.insert(index, 3)
print(l)
#-------------------------------------------------------
# 13) Solution
s = 'www.example.com'
s = s.lstrip('w.')
s = s.rstrip('.c')
# or in a single line
(s.lstrip('w.')).rstrip('.com')
#-------------------------------------------------------
# 14) Solution
link = 'https://play.spotify.com/collection/albums'
splitted_link = link.rsplit('/', 1)
print(splitted_link[0])
#or
link.rsplit('/', 1)[0]
#-------------------------------------------------------
# 15) Solution
amount = "32.054,23"
ms = amount.maketrans(',.', '.,')
amount = amount.translate(ms)
print(amount)
| mit | Python | |
8266673b2059a6cc583c5b5393d235d1a55b3ed7 | Update server.py | crossbario/autobahn-python,hzruandd/AutobahnPython,jvdm/AutobahnPython,schoonc/AutobahnPython,bencharb/AutobahnPython,meejah/AutobahnPython,oberstet/autobahn-python,crossbario/autobahn-python,nucular/AutobahnPython,iffy/AutobahnPython,markope/AutobahnPython,Geoion/AutobahnPython,dash-dash/AutobahnPython,iffy/AutobahnPython,oberstet/autobahn-python,tomwire/AutobahnPython,iffy/AutobahnPython,dash-dash/AutobahnPython,crossbario/autobahn-python,RyanHope/AutobahnPython,tavendo/AutobahnPython,dash-dash/AutobahnPython,iffy/AutobahnPython,dash-dash/AutobahnPython,inirudebwoy/AutobahnPython,oberstet/autobahn-python,ttimon7/AutobahnPython,Jenselme/AutobahnPython | examples/asyncio/websocket/echo/server.py | examples/asyncio/websocket/echo/server.py | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from autobahn.asyncio.websocket import WebSocketServerProtocol, \
WebSocketServerFactory
class MyServerProtocol(WebSocketServerProtocol):
def onConnect(self, request):
print("Client connecting: {0}".format(request.peer))
def onOpen(self):
print("WebSocket connection open.")
def onMessage(self, payload, isBinary):
if isBinary:
print("Binary message received: {0} bytes".format(len(payload)))
else:
print("Text message received: {0}".format(payload.decode('utf8')))
# echo back message verbatim
self.sendMessage(payload, isBinary)
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {0}".format(reason))
if __name__ == '__main__':
try:
import asyncio
except ImportError:
# Trollius >= 0.3 was renamed
import trollius as asyncio
factory = WebSocketServerFactory("ws://localhost:9000", debug=False)
factory.protocol = MyServerProtocol
loop = asyncio.get_event_loop()
coro = loop.create_server(factory, '0.0.0.0', 9000)
server = loop.run_until_complete(coro)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
server.close()
loop.close()
| ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from autobahn.asyncio.websocket import WebSocketServerProtocol, \
WebSocketServerFactory
class MyServerProtocol(WebSocketServerProtocol):
def onConnect(self, request):
print("Client connecting: {0}".format(request.peer))
def onOpen(self):
print("WebSocket connection open.")
def onMessage(self, payload, isBinary):
if isBinary:
print("Binary message received: {0} bytes".format(len(payload)))
else:
print("Text message received: {0}".format(payload.decode('utf8')))
# echo back message verbatim
self.sendMessage(payload, isBinary)
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {0}".format(reason))
if __name__ == '__main__':
try:
import asyncio
except ImportError:
# Trollius >= 0.3 was renamed
import trollius as asyncio
factory = WebSocketServerFactory("ws://localhost:9000", debug=False)
factory.protocol = MyServerProtocol
loop = asyncio.get_event_loop()
coro = loop.create_server(factory, '127.0.0.1', 9000)
server = loop.run_until_complete(coro)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
server.close()
loop.close()
| mit | Python |
d5420bddc9d845a8b56d62cc798b9851f68e3713 | Add zimg test script | vapoursynth/vapoursynth,Kamekameha/vapoursynth,Kamekameha/vapoursynth,vapoursynth/vapoursynth,Kamekameha/vapoursynth,Kamekameha/vapoursynth,vapoursynth/vapoursynth,vapoursynth/vapoursynth | test/zimgtest.py | test/zimgtest.py | import vapoursynth as vs
core = vs.get_core()
colorfamilies = (vs.GRAY, vs.YUV, vs.RGB, vs.YCOCG)
intbitdepths = (8, 9, 11, 11, 12, 13, 14, 15, 16)
floatbitdepths = (16, 32)
yuvss = (0, 1, 2)
formatids = []
for cfs in colorfamilies:
for bps in intbitdepths:
if cfs in (vs.YUV, vs.YCOCG):
for wss in yuvss:
for hss in yuvss:
formatids.append(core.register_format(cfs, vs.INTEGER, bps, wss, hss).id)
else:
formatids.append(core.register_format(cfs, vs.INTEGER, bps, 0, 0).id)
for cfs in colorfamilies:
for bps in floatbitdepths:
if cfs in (vs.YUV, vs.YCOCG):
for wss in yuvss:
for hss in yuvss:
formatids.append(core.register_format(cfs, vs.FLOAT, bps, wss, hss).id)
else:
formatids.append(core.register_format(cfs, vs.FLOAT, bps, 0, 0).id)
print(len(formatids))
for informat in formatids:
for outformat in formatids:
clip = core.std.BlankClip(format=informat)
try:
if (clip.format.color_family == vs.YUV):
clip = core.resize.Bicubic(clip, format=outformat, matrix_in_s="709")
elif (core.get_format(outformat).color_family == vs.YUV):
clip = core.resize.Bicubic(clip, format=outformat, matrix_s="709")
else:
clip = core.resize.Bicubic(clip, format=outformat)
clip.get_frame(0)
except vs.Error as e:
print(core.get_format(informat).name + ' ' + core.get_format(outformat).name)
print(e)
| lgpl-2.1 | Python | |
77b4624d5e0f5fb77edfca18be6ccf11d50fa862 | add Python/Visualisation/OrientedGlyphs | lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples | src/Python/Visualization/OrientedGlyphs.py | src/Python/Visualization/OrientedGlyphs.py | import vtk
sphereSource = vtk.vtkSphereSource()
sphereSource.Update()
input_data = vtk.vtkPolyData()
input_data.ShallowCopy(sphereSource.GetOutput())
arrowSource = vtk.vtkArrowSource()
glyph3D = vtk.vtkGlyph3D()
glyph3D.SetSourceConnection(arrowSource.GetOutputPort())
glyph3D.SetVectorModeToUseNormal()
glyph3D.SetInputData(input_data)
glyph3D.SetScaleFactor(.2)
glyph3D.Update()
# Visualize
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(glyph3D.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderer.AddActor(actor)
renderer.SetBackground(.3, .6, .3) # Background color green
renderWindow.Render()
renderWindowInteractor.Start()
| apache-2.0 | Python | |
a865ea3a9557ac3238615db020e742f10a787044 | Add test_settings module | nephila/aldryn-search,aldryn/aldryn-search,nephila/aldryn-search,aldryn/aldryn-search,nephila/aldryn-search,aldryn/aldryn-search | test_settings.py | test_settings.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
HELPER_SETTINGS = {
'ALLOWED_HOSTS': ['localhost'],
'CMS_LANGUAGES': {1: [{'code': 'en', 'name': 'English'}]},
'CMS_TEMPLATES': (("whee.html", "Whee Template"),),
'LANGUAGES': (('en', 'English'),),
'LANGUAGE_CODE': 'en',
'TEMPLATE_LOADERS': ('aldryn_search.tests.FakeTemplateLoader',),
}
def run():
from djangocms_helper import runner
runner.cms('aldryn_search')
if __name__ == '__main__':
run()
| bsd-3-clause | Python | |
a3a7e748c106e30b5b8cfeb50ef6722b6af5112c | Add initial test | pmav99/fxr | tests/test_sr.py | tests/test_sr.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# module:
# author: Panagiotis Mavrogiorgos <pmav99,gmail>
"""
"""
import shlex
import subprocess
import pytest
@pytest.fixture
def temp_file(tmpdir):
sample_file = tmpdir.join("file.txt")
return sample_file
@pytest.mark.parametrize("pattern, replacement, content", [
(" \d{3} ", "ASD", "substitute number: 123 is this ok? asdf123qwer"),
(" 123 ", "ASD", "substitute number: 123 is this ok? asdf123qwer"),
])
def test_sr_single(temp_file, pattern, replacement, content):
temp_file.write(content)
original = temp_file.read()
assert replacement not in original
# run script
cmd = "python3 sr.py single '{pattern}' '{replacement}' {filepath} "
cmd = shlex.split(cmd.format(pattern=pattern, replacement=replacement, filepath=temp_file))
subprocess.check_call(cmd)
substituted = temp_file.read()
# check that substitutions are OK
assert "123" in substituted
assert " 123 " not in substituted
assert replacement in substituted
| bsd-2-clause | Python | |
1aceea20a05b5324ad20bcd3595667f64eedf973 | Add tests for `cms.views`. | dan-gamble/cms,lewiscollard/cms,jamesfoley/cms,jamesfoley/cms,danielsamuels/cms,jamesfoley/cms,lewiscollard/cms,lewiscollard/cms,dan-gamble/cms,danielsamuels/cms,dan-gamble/cms,danielsamuels/cms,jamesfoley/cms | cms/tests/test_views.py | cms/tests/test_views.py | from django.test import TestCase
from ..views import TextTemplateView
class TestViews(TestCase):
def test_texttemplateview_render_to_response(self):
view = TextTemplateView()
view.request = {}
view.template_name = 'templates/base.html'
rendered = view.render_to_response({})
self.assertEqual(rendered.template_name, ['templates/base.html'])
self.assertEqual(rendered.status_code, 200)
| bsd-3-clause | Python | |
c3ed60f1fc767e4444a4ee0abdf9a76cd21263d8 | Include example using noise reducer. | jdammers/jumeg,pravsripad/jumeg | examples/preprocessing/plot_noise_reducer.py | examples/preprocessing/plot_noise_reducer.py | #!/usr/bin/env python
'''
Script to show the application of noise reducer on jusample data.
'''
import os
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
from jumeg.jumeg_noise_reducer import noise_reducer
import mne
plt.ion()
# load the jumeg sample data (has to be BTI)
data_dir = os.environ('JUSAMPLE_MEG_PATH')
subject = '207184'
raw_fname = op.join(data_dir, 'recordings', subject,
'sample_207184_rest_EC-raw.fif')
raw = mne.io.Raw(raw_fname, preload=True)
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1, sharex=True, sharey=True)
picks = mne.pick_types(raw.info, meg='mag', exclude='bads')
raw.plot_psd(fmin=0., fmax=100., tmin=None, tmax=60.,
n_fft=None, picks=picks, ax=ax1);
ax1.set_title('Original')
# notch filter
raw_notch = raw.copy().notch_filter(np.arange(50, 251, 50), picks=picks,
filter_length='auto',
notch_widths=None, n_jobs=4, method='fir',
phase='zero-double',
fir_window='hamming', fir_design='firwin')
raw_notch.plot_psd(fmin=0., fmax=100., tmin=None, tmax=60.,
n_fft=None, picks=picks, ax=ax2);
ax2.set_title('Notch filter 50Hz applied')
# powerline removal using noise_reducer
raw_nr_notch = noise_reducer(raw_fname, raw=raw.copy(), detrending=False,
reflp=None, refhp=None, refnotch=[50., 100., 150.],
return_raw=True, verbose=False)
raw_nr_notch.plot_psd(fmin=0., fmax=100., tmin=None, tmax=60.,
n_fft=None, picks=picks, ax=ax3);
ax3.set_title('Noise reducer notch filter 50Hz applied')
# remove high freq noise (>0.1Hz) from ref channels
raw_nr2 = noise_reducer(raw_fname, raw=raw_nr_notch, detrending=False,
reflp=None, refhp=0.1, refnotch=None,
return_raw=True, verbose=False)
raw_nr2.plot_psd(fmin=0., fmax=100., tmin=None, tmax=60.,
n_fft=None, picks=picks, ax=ax4);
ax4.set_title('Noise reducer high pass filtered 0.1Hz')
# remove low freq noise (<5Hz) from ref channels
raw_nr = noise_reducer(raw_fname, raw=raw_nr2, detrending=False,
reflp=5., refhp=None, refnotch=None,
return_raw=True, verbose=False)
raw_nr.plot_psd(fmin=0., fmax=100., tmin=None, tmax=60.,
n_fft=None, picks=picks, ax=ax5);
ax5.set_title('Noise reducer low pass filtered 5Hz')
plt.tight_layout()
plt.show()
| bsd-3-clause | Python | |
717174fcf7d876caba23cdde956f5884f6adc244 | Add module | jni/flatten | flatdir.py | flatdir.py | import os
import sys
import argparse
import re
BYTES_FINDER = re.compile(r'(\d+(?:\.\d+)?)\s?(k|K|m|M|g|G|t|T)?(B|b)?')
POWERS_D = {'k': 10**3, 'm': 10**6, 'g': 10**9, 't': 10**12}
POWERS_B = {'k': 2**10, 'm': 2**20, 'g': 2**30, 't': 2**40}
def r_scandir(path):
'''List files and directories recursively.
Parameters
----------
path : string
A path to a directory.
Returns
-------
dir_iterator : iterator of DirEntry objects
Iterator of DirEntry objects, same as `os.scandir`.
'''
for entry in os.scandir(path):
yield entry
if entry.is_dir():
yield from r_scandir(entry.path)
def human2bytes(text, binary=True):
'''Convert a human-readable file size spec to an integer number of bytes.
Parameters
----------
text : string
The text to be converted.
binary : bool, optional
Whether to use binary multipliers (1024, 1024^2, etc) (default), or
decimal ones (1000, 1000000, etc).
Returns
-------
bytes_count : int
The number of bytes matching the input text.
Examples
--------
>>> human2bytes('4500')
4500
>>> human2bytes('1.5kb')
1536
>>> human2bytes('2MB', False)
2000000
'''
parsed = BYTES_FINDER.match(text)
if parsed is None:
raise ValueError('Not a valid size spec: %s. Examples of valid '
'specs include 4500, 512MB, 20kb, and 2TB.' % text)
value = float(parsed.group(1))
mod = str.lower(parsed.group(2))
multiplier = POWERS_B[mod] if binary else POWERS_D[mod]
bytes_count = round(value * multiplier)
return bytes_count
def flatten(indir, outdir, filetype='', minsize=0, maxsize=None):
'''Place hardlinks in outdir to all files in nested directories in indir.
Parameters
----------
indir : string
The input directory to flatten.
outdir : string
The output directory, where to place all the files in `indir`.
filetype : string, optional
Link only files with this extension.
minsize : int, optional
Link only files larger than this size.
maxsize : int, optional
Link only files smaller than this size.
'''
filetype = str.lower(filetype)
if not os.path.isdir(outdir):
os.makedirs(outdir)
files = r_scandir(indir)
for entry in files:
info = entry.stat()
if (entry.is_dir() or
info.st_size < minsize or
(maxsize is not None and info.st_size > maxsize)):
continue
if not entry.name.lower().endswith(filetype):
continue
src = os.path.abspath(entry.path)
dst = os.path.join(outdir, entry.name)
os.link(src, dst)
__version__ = '0.1'
def main():
parser = argparse.ArgumentParser(description='foo')
parser.add_argument('indir',
help='Input directory to flatten.')
parser.add_argument('outdir',
help='Output directory: all files recursively found in <indir> '
'will be placed here. Created if it doesn\'t exist.')
parser.add_argument('-t', '--filetype',
help='Only flatten files matching this extension.')
parser.add_argument('-m', '--minsize', type=human2bytes,
help='Find only files larger than this size. This can be a human-'
'readable string, such as \'512kB\'.')
parser.add_argument('-M', '--maxsize', type=human2bytes,
help='Find only files smaller than this size. This can be a human-'
'readable string, such as \'512kB\'.')
parser.parse_args(sys.argv)
flatten(parser.indir, parser.outdir, filetype=parser.filetype,
minsize=parser.minsize, maxsize=parser.maxsize)
| mit | Python | |
52f0cb0f06a7f7918cfbb2f8ead5e4e725f4072d | add script for correcting Bogotá data (when PDoCInteri is not null, it's the house number and PDoTexto is the unit, otherwise PDoTexto is the house number) | sergiyprotsiv/openaddresses,slibby/openaddresses,sergiyprotsiv/openaddresses,tyrasd/openaddresses,sabas/openaddresses,sabas/openaddresses,sergiyprotsiv/openaddresses,openaddresses/openaddresses,tyrasd/openaddresses,openaddresses/openaddresses,openaddresses/openaddresses,slibby/openaddresses,orangejulius/openaddresses,mmdolbow/openaddresses,slibby/openaddresses,sabas/openaddresses,orangejulius/openaddresses,mmdolbow/openaddresses,tyrasd/openaddresses,orangejulius/openaddresses,mmdolbow/openaddresses | scripts/co/bogota.py | scripts/co/bogota.py | import json
import sys
import logging
from esridump.dumper import EsriDumper
logging.basicConfig(level=logging.DEBUG)
outfile_name = sys.argv[1] if len(sys.argv) > 1 else 'bogota.geojson'
d = EsriDumper('http://serviciosgis.catastrobogota.gov.co/arcgis/rest/services/Mapa_Referencia/Mapa_Referencia/MapServer/33')
outfile = open(outfile_name, 'w')
outfile.write('{"type":"FeatureCollection","features":[\n')
features = iter(d)
try:
feature = next(features)
while True:
props = feature['properties']
interior = props['PDoCInteri']
if interior and interior.strip():
street = props['PDoTexto']
props['PDoCInteri'] = street
props['PDoTexto'] = interior
outfile.write(json.dumps(feature))
feature = next(features)
outfile.write(',\n')
except StopIteration:
outfile.write('\n')
args.outfile.write(']}')
| bsd-3-clause | Python | |
e00a0430ceac9439cfc15f1d2c019e3da88f6cf7 | set up data generator | taoalpha/XMate,taoalpha/XMate,taoalpha/XMate,taoalpha/XMate | dataGenerator/dataGenerator.py | dataGenerator/dataGenerator.py | import requests
postUrl = "http://localhost:5000"
def sendUserProfile(profile):
r = requests.post(postUrl+'/user/', data = profile)
if (!checkStatus(r)):
break
def sendScheduleProfile(profile):
r = requests.post(postUrl+'/schedule/', data = profile)
def sendMessageProfile(profile):
r = requests.post(postUrl+'/message/', data = profile)
def checkStatus(r):
data = r.json()
if (data["status"] != 1):
print data
return false
else:
return true
| mit | Python | |
89c92a02062cf91c4e3e66fe86846e57bc3388b8 | Add new package:py-pytest-check-links (#16503) | LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack | var/spack/repos/builtin/packages/py-pytest-check-links/package.py | var/spack/repos/builtin/packages/py-pytest-check-links/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPytestCheckLinks(PythonPackage):
"""pytest plugin that checks URLs for HTML-containing files."""
homepage = "https://github.com/jupyterlab/pytest-check-links"
url = "https://pypi.io/packages/source/p/pytest-check-links/pytest_check_links-0.3.4.tar.gz"
version('0.3.4', sha256='4b3216548431bf9796557e8ee8fd8e5e77a69a4690b3b2f9bcf6fb5af16a502b')
depends_on('py-setuptools@17.1:', type='build')
depends_on('py-pbr@1.9:', type='build')
| lgpl-2.1 | Python | |
f98acc57f7ca18c21744d03cbad8f5239b4eceab | add script to create command list | kontron/python-ipmi | bin/supported_cmds.py | bin/supported_cmds.py | import sys
from collections import OrderedDict, namedtuple
from pyipmi.msgs.registry import DEFAULT_REGISTRY
def make_table(grid):
col_length = map(list, zip(*[[len(item) for item in row] for row in grid]))
max_cols = [max(out) for out in col_length]
rst = table_div(max_cols, 1)
for i, row in enumerate(grid):
header_flag = False
if i == 0 or i == len(grid)-1:
header_flag = True
rst += normalize_row(row, max_cols)
rst += table_div(max_cols, header_flag)
return rst
def table_div(max_cols, header_flag=1):
out = ""
if header_flag == 1:
style = "="
else:
style = "-"
for max_col in max_cols:
out += max_col * style + " "
out += "\n"
return out
def normalize_row(row, max_cols):
r = ""
for i, max_col in enumerate(max_cols):
r += row[i] + (max_col - len(row[i]) + 1) * " "
return r + "\n"
def get_command_list():
data = list()
Command = namedtuple('Command', ['netfn', 'cmdid', 'grpext', 'name'])
od = OrderedDict(sorted(DEFAULT_REGISTRY.registry.items()))
for key, val in od.items():
if isinstance(key, tuple):
# skip response messages
if key[0] & 1:
continue
data.append(Command(str(hex(key[0])), str(hex(key[1])),
str(key[2]), val.__name__[:-3]))
return data
def main():
data = get_command_list()
data.insert(0, ('Netfn', 'CMD', 'Group Extension', 'Name'))
if len(sys.argv) > 1 and sys.argv[1].lower() == 'rst':
rst = make_table(data)
print(rst)
else:
print(data)
if __name__ == '__main__':
main()
| lgpl-2.1 | Python | |
9f0e49a7523bc48529155b8f22f89a8c893026ce | add display | will127534/AFE4400,will127534/AFE4400 | AFE4400_display.py | AFE4400_display.py |
import numpy as np
from matplotlib.lines import Line2D
import matplotlib.pyplot as plt
import serial
from serial.tools import list_ports
import matplotlib.animation as animation
import time
print list(list_ports.comports())
#x = raw_input(">>> Input Com port name: ")
Serial = serial.Serial("COM13",115200)
long_ave = [0] * 100
count = 0
heart_ave = [0] * 3
count_h = 0
ave = 0
class Scope:
def __init__(self, ax, maxt=3, dt=0.02):
self.ax = ax
self.dt = dt
self.maxt = maxt
self.tdata = [0]
self.ydata = [0]
self.line = Line2D(self.tdata, self.ydata)
self.ax.add_line(self.line)
self.ax.set_ylim(ave-0.02,ave+0.02)
self.ax.set_xlim(0, self.maxt)
def update(self, y):
lastt = self.tdata[-1]
if lastt > self.tdata[0] + self.maxt: # reset the arrays
self.tdata = [self.tdata[-1]]
self.ydata = [self.ydata[-1]]
self.ax.set_xlim(self.tdata[0], self.tdata[0] + self.maxt)
self.ax.figure.canvas.draw()
t = self.tdata[-1] + self.dt
self.tdata.append(t)
self.ydata.append(y)
self.line.set_data(self.tdata, self.ydata)
return self.line,
def avelong(value):
global long_ave
global count
if (count is 100):
count = 0
#print "one cycle"
long_ave[count] = value
count +=1
out = np.mean(long_ave)
#print outb
return out
def aveheart(value):
global heart_ave
global count_h
if (count_h is 3):
count_h = 0
#print "one cycle"
heart_ave[count_h] = value
count_h +=1
out = np.mean(heart_ave)
#print outb
return out
beat = 0
last = 0
last_data = 0
test2 = 0
last_last_data = 0
cycle1 = 0
cycle2 = 0
cycle3 = 0
def emitter(p=0.03):
global test2
global last_data
global last_last_data
global cycle1
global cycle2
global cycle3
global ave
'return a random value with probability p, else 0'
while True:
try:
data = Serial.readline()
test = data.split(',')
for i in range(0,len(test)):
test[i] = float(test[i].strip())
test[i] = test[i]
#print len(test)
if len(test)==6:
#print test[0]
last_last_data = last_data
last_data = test2
test2 = test[0]
ave = avelong(test[0])
scope.ax.set_ylim(ave-1000,ave+1000)
slope1 = test2 - last_data
slope2 = last_data - last_last_data
if (slope1<=-90 or slope2<=-90 ):
if ((time.time()-cycle1)>0.5):
cycle2 = cycle1
cycle1 = time.time()
scope.line.set_color('red')
try:
heart = aveheart(-1/(cycle2-cycle1)*60)
print heart,slope1,slope2
except:
print "heart_beat calc error"
else:
scope.line.set_color('blue')
#print test2
yield test2
else:
# print test
yield 0.
except:
yield 0.
fig, ax = plt.subplots()
scope = Scope(ax)
# pass a generator in "emitter" to produce data for the update func
ani = animation.FuncAnimation(fig, scope.update, emitter, interval = 0,
blit=True)
plt.show()
| mit | Python | |
fda2eeb1b16ccd13b18d0badb5c3bcf93e08ea38 | Add initial code to obtain haplographs. | AbeelLab/phasm,AbeelLab/phasm | phasm/walker.py | phasm/walker.py | """
Obtain graph representations for chromosomes from the total assembly graph.
This module contains functions to obtain "fused" haplotype contigs. Or in other
words: it helps in obtaining subgraphs from the assembly graph that represent
a set of chromosome copies. This subgraph may contain bubbles and other
non-linear paths that could represents SNP's or larger structural variations
between different chromosome copies.
"""
import logging
from typing import Iterator
import networkx
from phasm.assembly_graph import AssemblyGraph
from phasm.bubbles import find_superbubbles, superbubble_nodes
logger = logging.getLogger(__name__)
def build_haplographs(g: AssemblyGraph,
min_nodes: int=1) -> Iterator[AssemblyGraph]:
# Build dictionary which maps the bubble source to the bubble sink
logger.info("Searching for superbubbles in the assembly graph...")
bubbles = {b[0]: b[1] for b in find_superbubbles(g)}
bubble_sources = set(bubbles.keys())
bubble_sinks = set(bubbles.values())
logger.debug("Found superbubbles: %s", bubbles)
logger.info("Graph has %d superbubbles", len(bubbles))
# Obtain start nodes, nodes which have no incoming edges or at a junction
# which is not part of a superbubble
start_points = [
n for n in g.nodes_iter() if g.in_degree(n) == 0 or (
g.in_degree(n) > 1 and n not in bubble_sources and
n not in bubble_sinks
)
]
logger.info("Number of start points : %d", len(start_points))
visited = set()
for start in start_points:
if start in visited:
continue
subgraph_nodes = set()
curr_node = start
while curr_node:
if curr_node in bubbles:
# Start of a superbubble, include all nodes of the superbubble
bubble_nodes = superbubble_nodes(g, curr_node,
bubbles[curr_node])
logger.debug("%s is bubble source, this bubble contains %d "
"nodes.", curr_node, len(bubble_nodes))
visited.update(bubble_nodes)
subgraph_nodes.update(bubble_nodes)
curr_node = bubbles[curr_node]
elif curr_node in visited:
# A bubble sink can also be a bubble source, so that's why we
# check for a bubble source first above, and afterwards this
# check if we already visited a given node. If this is not
# a bubble source and we've seen this node before, then we're
# looping and we quit building the contig any further.
logger.debug("%s already visited, stopping", curr_node)
break
elif g.out_degree(curr_node) == 1:
# Simple path to extend
neighbour = g.neighbors(curr_node)[0]
visited.add(neighbour)
subgraph_nodes.add(neighbour)
curr_node = neighbour
else:
# We're either at a node that has no outgoing edges, or a node
# that is not the source of a superbubble but has multiple
# outgoing edges. We're not sure what to do now so we quit
# here.
logger.debug("Current node %s is a junction with out-degree "
"%d", curr_node, g.out_degree(curr_node))
break
if len(subgraph_nodes) >= min_nodes:
yield networkx.subgraph(g, subgraph_nodes)
| mit | Python | |
31f4974482f1652b00324c9bc5760689ed4dd0fa | Create __init__.py | robertclf/FAFT,robertclf/FAFT | FAFT_64-points_R2C/__init__.py | FAFT_64-points_R2C/__init__.py | bsd-3-clause | Python | ||
4d465ba648ce4af7547818574bd2b08be835eb09 | Add initial template of zenodo-deposit.py | libscie/liberator,libscie/liberator,libscie/liberator,libscie/liberator | bin/zenodo-deposit.py | bin/zenodo-deposit.py | import requests
headers = {"Content-Type": "application/json"}
r = requests.post('https://zenodo.org/api/deposit/depositions',
params={'access_token': ZENODO_KEY}, json={},
headers=headers)
# Get the deposition id from the previous response
deposition_id = r.json()['id']
# Upload file
data = {'filename': 'fulltext.pdf'}
files = {'file': open('/path/to/myfirstfile.csv', 'rb')}
r = requests.post('https://zenodo.org/api/deposit/depositions/%s/files' % deposition_id,
params={'access_token': ZENODO_KEY}, data=data,
files=files)
# Add metadata
data = {
'metadata': {
'title': 'My first upload',
'upload_type': 'poster',
'description': 'This is my first upload',
'creators': [{'name': 'Doe, John',
'affiliation': 'Zenodo'}]
}
}
r = requests.put('https://zenodo.org/api/deposit/depositions/%s' % deposition_id,
params={'access_token': ZENODO_KEY}, data=json.dumps(data),
headers=headers)
# Deposit
r = requests.post('https://zenodo.org/api/deposit/depositions/%s/actions/publish' % deposition_id,
params={'access_token': ZENODO_KEY} )
r.status_code | cc0-1.0 | Python | |
8411eb2ee14718141b7242e54b60c3e8da906e4d | Add grep plugin | tomleese/smartbot,Cyanogenoid/smartbot,thomasleese/smartbot-old,Muzer/smartbot | plugins/grep.py | plugins/grep.py | import io
import re
import unittest
class Plugin:
def on_command(self, bot, msg, stdin, stdout, reply):
pattern_str = " ".join(msg["args"][1:])
if pattern_str:
pattern = re.compile(pattern_str)
for line in map(str.strip, stdin):
if pattern_str in line or re.match(pattern, line):
print(line, file=stdout)
else:
print(self.on_help(), file=stdout)
def on_help(self):
return "Usage: grep <pattern>"
class Test(unittest.TestCase):
def setUp(self):
self.plugin = Plugin()
def test_words(self):
stdout = io.StringIO()
stdin = io.StringIO("this is one line\nthis is another\n")
self.plugin.on_command(None, {"args": [None, "another"]}, stdin, stdout, None)
self.assertEqual(stdout.getvalue().strip(), "this is another")
def test_help(self):
self.assertTrue(self.plugin.on_help())
| mit | Python | |
299f08729c1a1c2d1893809a7ae7e53d51054d90 | Add realtime.py | toomore/goristock | realtime.py | realtime.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class twsk:
def __init__(self,no = None):
self.stock = ''
if no is None:
no = random.randrange(1000,8000)
ok = 1
ok_times = 0
while ok:
ok = 0
try:
page = urllib2.urlopen('http://mis.tse.com.tw/data/%s.csv?r=%s' % (no,random.randrange(1,10000)))
ok = 0
except:
no = random.randrange(1000,8000)
ok = 1
ok_times += 1
logging.info('%s: %s' % (ok_times,no))
self.oktimes = ok_times
reader = csv.reader(page)
for i in reader:
self.stock = i
@property
def sread(self):
re = {'name': unicode(self.stock[-1], 'cp950'),
'no': self.stock[0],
'range': self.stock[1],
'time': self.stock[2],
'top': self.stock[3],
'down': self.stock[4],
'open': self.stock[5],
'h': self.stock[6],
'l': self.stock[7],
'c': self.stock[8],
'value': self.stock[9],
'pvalue': self.stock[10],
'top5buy': [
(self.stock[11], self.stock[12]),
(self.stock[13], self.stock[14]),
(self.stock[15], self.stock[16]),
(self.stock[17], self.stock[18]),
(self.stock[19], self.stock[20])
],
'top5sell': [
(self.stock[21], self.stock[22]),
(self.stock[23], self.stock[24]),
(self.stock[25], self.stock[26]),
(self.stock[27], self.stock[28]),
(self.stock[29], self.stock[30])
]
}
if '-' in self.stock[1]:
re['ranges'] = False
else:
re['ranges'] = True
re['crosspic'] = "http://chart.apis.google.com/chart?chs=20x40&cht=lc&chd=t1:0,0,0|0,%s,0|0,%s,0|0,%s,0|0,%s,0&chds=%s,%s&chm=F,,1,1:4,20" % (re['h'],re['c'],re['open'],re['l'],re['l'],re['h'])
re['top5buy'].sort()
re['top5sell'].sort()
return re
| mit | Python | |
cca86b7dc339719863fb5cf66388a18b4f40b9c1 | Create plugin.py | architecture-building-systems/CityEnergyAnalyst,architecture-building-systems/CityEnergyAnalyst,architecture-building-systems/CityEnergyAnalyst | cea/plugin.py | cea/plugin.py | """
A base class for creating CEA plugins. Subclass this class in your own namespace to become a CEA plugin.
"""
### NOTE / FIXME: have this class read in the scripts.yml and schemas.yml. plots need to be python classes.
class CeaPlugin(object):
"""
A CEA Plugin defines a list of scripts and a list of plots - the CEA uses this to populate the GUI
and other interfaces. In addition, any input- and output files need to be defined.
"""
@property
def scripts(self):
"""Return a list of scripts - each is a subclass of :py:class`cea.scripts.CeaScript`"""
return []
@property
def plots(self):
"""Return a list of plots - each is a subclass of :py:class:`cea.plots.base`"""
return []
@property
def schemas(self):
"""Return a list of schemas - each is a subclass of :py:class:`cea.schema.Schema`"""
return [] | mit | Python | |
5084957b070e19334d7256a727ff995989f0e14e | add tar raw loader | shmilee/gdpy3,shmilee/gdpy3,shmilee/gdpy3,shmilee/gdpy3 | src/loaders/tarraw.py | src/loaders/tarraw.py | # -*- coding: utf-8 -*-
# Copyright (c) 2017 shmilee
'''
Contains TarFile raw loader class.
'''
import os
import tarfile
from ..glogger import getGLogger
from .base import BaseRawLoader
__all__ = ['TarRawLoader']
log = getGLogger('L')
class TarRawLoader(BaseRawLoader):
# https://docs.python.org/3/library/tarfile.html
'''
Load raw data from a tar archive. Return a dictionary-like object.
'''
__slots__ = []
def _special_check_path(self):
if os.path.isfile(self.path) and tarfile.is_tarfile(self.path):
return True
else:
log.error("'%s' is not a tar archive file!" % self.path)
return False
def _special_open(self):
return tarfile.open(self.path, mode='r')
def _special_close(self, tmpobj):
tmpobj.close()
def _special_getkeys(self, tmpobj):
return sorted(
[n for n in tmpobj.getnames() if tmpobj.getmember(n).isfile()])
def _special_getfile(self, tmpobj, key):
return tmpobj.extractfile(key)
| mit | Python | |
3eccb0323590c876735fe8c664cd2e4866bd3089 | Add SKeys function | funkybob/kopytka,funkybob/kopytka,funkybob/kopytka | kopytka/transforms.py | kopytka/transforms.py | from django.db.models import CharField, Func
from django.contrib.postgres.fields import ArrayField
class SKeys(Func):
function = 'skeys'
arity = 1
output_field = ArrayField(CharField())
| mit | Python | |
d56d2b2db736b9160c49ea7694732b047a398f3d | Add version.py | xlhtc007/blaze,caseyclements/blaze,maxalbert/blaze,dwillmer/blaze,ContinuumIO/blaze,ChinaQuants/blaze,ChinaQuants/blaze,jdmcbr/blaze,nkhuyu/blaze,ContinuumIO/blaze,nkhuyu/blaze,jcrist/blaze,cpcloud/blaze,alexmojaki/blaze,jcrist/blaze,caseyclements/blaze,mrocklin/blaze,scls19fr/blaze,LiaoPan/blaze,cowlicks/blaze,jdmcbr/blaze,cowlicks/blaze,alexmojaki/blaze,scls19fr/blaze,cpcloud/blaze,LiaoPan/blaze,maxalbert/blaze,dwillmer/blaze,xlhtc007/blaze,mrocklin/blaze | conda.recipe/version.py | conda.recipe/version.py | def main():
inp = sys.stdin.read().strip().split('-')
try:
inp[1] = 'post%03d' % int(inp[1])
except IndexError:
pass
print('.'.join(inp))
return 0
if __name__ == '__main__':
import sys
sys.exit(main())
| bsd-3-clause | Python | |
18c93f4c70a2247bcce8a853c30038097cb9f7b2 | Add test for optional page admin registrations for Country and CountryGroup. | jamesfoley/cms,jamesfoley/cms,danielsamuels/cms,lewiscollard/cms,jamesfoley/cms,dan-gamble/cms,lewiscollard/cms,danielsamuels/cms,danielsamuels/cms,lewiscollard/cms,dan-gamble/cms,jamesfoley/cms,dan-gamble/cms | cms/apps/pages/tests/test_admin_destructive.py | cms/apps/pages/tests/test_admin_destructive.py | from django.conf import settings
from django.contrib import admin
from django.test import TestCase
from ..models import Country, CountryGroup, Page
import sys
class TestArticleAdminBase(TestCase):
def test_article_admin(self):
NEW_MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES + (
'cms.middleware.LocalisationMiddleware',
)
self.assertNotIn(Country, admin.site._registry)
self.assertNotIn(CountryGroup, admin.site._registry)
with self.settings(MIDDLEWARE_CLASSES=NEW_MIDDLEWARE_CLASSES):
module = sys.modules['cms.apps.pages.admin']
del sys.modules['cms.apps.pages.admin']
admin.site.unregister(Page)
from ..admin import page_admin
assert page_admin
self.assertIn(Country, admin.site._registry)
self.assertIn(CountryGroup, admin.site._registry)
sys.modules['cms.apps.pages.admin'] = module
| bsd-3-clause | Python | |
06e0c4663dd9dcc99778b80ad288a5b845bb9d2f | add default string length for tutorial method (mysql requires it) | zejn/cubes,zejn/cubes,she11c0de/cubes,cesarmarinhorj/cubes,jell0720/cubes,cesarmarinhorj/cubes,jell0720/cubes,zejn/cubes,noyeitan/cubes,ubreddy/cubes,ubreddy/cubes,cesarmarinhorj/cubes,noyeitan/cubes,ubreddy/cubes,jell0720/cubes,pombredanne/cubes,she11c0de/cubes,she11c0de/cubes,pombredanne/cubes,pombredanne/cubes,noyeitan/cubes | cubes/tutorial/sql.py | cubes/tutorial/sql.py | import sqlalchemy
import csv
def create_table_from_csv(connectable, file_name, table_name, fields, create_id = False, schema = None):
"""Create a table with name `table_name` from a CSV file `file_name` with columns corresponding
to `fields`. The `fields` is a list of two string tuples: (name, type) where type might be:
``integer``, ``float`` or ``string``.
If `create_id` is ``True`` then a column with name ``id`` is created and will contain generated
sequential record id.
This is just small utility function for sandbox, play-around and testing purposes. It is not
recommended to be used for serious CSV-to-table loadings. For more advanced CSV loadings use another
framework, such as Brewery (http://databrewery.org).
"""
metadata = sqlalchemy.MetaData(bind = connectable)
table = sqlalchemy.Table(table_name, metadata, autoload=False, schema=schema)
if table.exists():
table.drop(checkfirst=False)
type_map = { "integer": sqlalchemy.Integer,
"float":sqlalchemy.Float,
"string":sqlalchemy.String(256) }
if create_id:
col = sqlalchemy.schema.Column('id', sqlalchemy.Integer, primary_key=True)
table.append_column(col)
field_names = []
for (field_name, field_type) in fields:
col = sqlalchemy.schema.Column(field_name, type_map[field_type.lower()])
table.append_column(col)
field_names.append(field_name)
table.create()
reader = csv.reader(open(file_name))
# Skip header
reader.next()
insert_command = table.insert()
for row in reader:
record = dict(zip(field_names, row))
insert_command.execute(record)
| import sqlalchemy
import csv
def create_table_from_csv(connectable, file_name, table_name, fields, create_id = False, schema = None):
"""Create a table with name `table_name` from a CSV file `file_name` with columns corresponding
to `fields`. The `fields` is a list of two string tuples: (name, type) where type might be:
``integer``, ``float`` or ``string``.
If `create_id` is ``True`` then a column with name ``id`` is created and will contain generated
sequential record id.
This is just small utility function for sandbox, play-around and testing purposes. It is not
recommended to be used for serious CSV-to-table loadings. For more advanced CSV loadings use another
framework, such as Brewery (http://databrewery.org).
"""
metadata = sqlalchemy.MetaData(bind = connectable)
table = sqlalchemy.Table(table_name, metadata, autoload=False, schema=schema)
if table.exists():
table.drop(checkfirst=False)
type_map = { "integer": sqlalchemy.Integer,
"float":sqlalchemy.Float,
"string":sqlalchemy.String }
if create_id:
col = sqlalchemy.schema.Column('id', sqlalchemy.Integer, primary_key=True)
table.append_column(col)
field_names = []
for (field_name, field_type) in fields:
col = sqlalchemy.schema.Column(field_name, type_map[field_type.lower()])
table.append_column(col)
field_names.append(field_name)
table.create()
reader = csv.reader(open(file_name))
# Skip header
reader.next()
insert_command = table.insert()
for row in reader:
record = dict(zip(field_names, row))
insert_command.execute(record)
| mit | Python |
4607b954ea9c2abb58e084d12179d42868fc12d0 | Create hash-digest-rename.py | ammongit/scripts,ammongit/scripts,ammongit/scripts,ammongit/scripts | hash-digest-rename.py | hash-digest-rename.py | #!/usr/bin/env python3
import hashlib
import os
import sys
def normalize_algorithm(name):
# Since the name might be caps or have dashes,
# we normalize to lowercase as used by Python's hashlib.
return name.lower().replace("-", "")
def plural(number):
return "" if number == 1 else "s"
def hash_rename(algorithm, path, errors):
try:
with open(path, "rb") as file:
data = file.read()
hasher = hashlib.new(algorithm, data)
digest = hasher.hexdigest()
directory = os.path.dirname(path)
_, ext = os.path.splitext(path)
new_path = os.path.join(directory, digest + ext)
if os.path.exists(new_path):
raise RuntimeError(
f"Destination path '{new_path}' (from '{path}') already exists",
)
print(f"Renaming {path} -> {new_path}")
os.rename(path, new_path)
except Exception as error:
errors.append(error)
if __name__ == "__main__":
if len(sys.argv) < 2:
print(f"Usage: {sys.argv[0]} <algorithm> <path...>")
sys.exit(1)
algorithm = normalize_algorithm(sys.argv[1])
paths = sys.argv[2:]
print(f"Hashing {len(paths)} files{plural(len(paths))} with {algorithm}")
errors = []
for path in paths:
hash_rename(algorithm, path, errors)
if errors:
for error in errors:
print(error)
sys.exit(1)
| mit | Python | |
06c9a86ab4fd12c319d35d64a135740077e1d93a | modify pkg_info | volterra-luo/django-oss-storage | storages/pkg_info.py | storages/pkg_info.py | package = "django-oss-storage"
version = "0.3.5"
url = "http://oss.aliyun.com"
license = "The MIT License (MIT)"
short_description = 'Django Aliyun OSS storage backend.' | mit | Python | |
cee4d852003a1ec8f19714ee1503888068cf9749 | Create svm.py | lingcheng99/kagge-digit-recognition | svm.py | svm.py | import numpy as np
import pandas as pd
from sklearn import metrics,cross_validation
from sklearn.cross_validation import train_test_split
from sklearn import svm
#Read training data and split into train and test data
data=pd.read_csv('train.csv')
data1=data.values
X=data1[:,1:]
y=data1[:,:1]
y=np.ravel(y)
Xtrain,Xtest,ytrain,ytest=train_test_split(X,y,test_size=0.25)
#Run linear kernel first
svmL1=svm.SVC(kernel='linear',C=0.01)
svmL1.fit(Xtrain,ytrain)
predL1=svmL1.predict(Xtest)
print("Classification report for classifier %s:\n%s\n"
% (svmL1, metrics.classification_report(ytest,predL1)))
#Run gaussian kernel but the result is poor and running time is long
svmR1=svm.SVC(kernel='rbf',gamma=0.001, C=10000)
svmR1.fit(Xtrain,ytrain)
predR1=svmR1.predict(Xtest)
print("Classification report for classifier %s:\n%s\n"
% (svmR1, metrics.classification_report(ytest,predR1)))
#Run polynomial kernel
svmP1=svm.SVC(kernel='poly',degree=3)
svmP1.fit(Xtrain,ytrain)
predP1=svmP1.predict(Xtest)
print("Classification report for classifier %s:\n%s\n"
% (svmP1, metrics.classification_report(ytest,predP1)))
#Run kfold cross-validation to check cost parameters for polynomial kernel
precision=[]
cprecision=[]
Crange=np.logspace(-6,2,9)
for crange in Crange:
kfold1=cross_validation.KFold(42000,n_folds=4)
precision=[]
for train,test in kfold1:
Xtrain,Xtest,ytrain,ytest=X[train],X[test],y[train],y[test]
svm1=svm.SVC(kernel='poly',degree=3,C=crange)
svm1.fit(Xtrain,ytrain)
ypred=svm1.predict(Xtest)
precision.append(metrics.precision_score(ytest,ypred))
cprecision.append(np.mean(precision))
#Use polynomial degree=3 for final model and submission
svm1=svm.SVC(kernel='poly',degree=3)
svm1.fit(X,y)
test=pd.read_csv('test.csv')
pred=svm1.predict(test)
pred = pd.DataFrame(pred)
pred['ImageId'] = pred.index + 1
pred = pred[['ImageId', 0]]
pred.columns = ['ImageId', 'Label']
pred.to_csv('pred.csv', index=False)
| mit | Python | |
ade401c1f988542344f62b73498d5a7a31554698 | update setup.py | dxq-git/nghttp2,wzyboy/nghttp2,serioussam/nghttp2,minhoryang/nghttp2,lukw00/nghttp2,kelbyludwig/nghttp2,syohex/nghttp2,tatsuhiro-t/spdylay,bxshi/nghttp2,serioussam/nghttp2,thinred/nghttp2,mixianghang/spdylay,dxq-git/nghttp2,bxshi/nghttp2,syohex/nghttp2,thinred/nghttp2,bxshi/nghttp2,icing/nghttp2,serioussam/nghttp2,ohyeah521/nghttp2,shines77/nghttp2,shines77/nghttp2,icing/nghttp2,tatsuhiro-t/spdylay,kelbyludwig/nghttp2,icing/nghttp2,icing/nghttp2,thinred/nghttp2,wzyboy/nghttp2,syohex/nghttp2,icing/nghttp2,shines77/nghttp2,shines77/nghttp2,lukw00/nghttp2,serioussam/nghttp2,thinred/nghttp2,syohex/nghttp2,dxq-git/nghttp2,minhoryang/nghttp2,wzyboy/nghttp2,icing/nghttp2,minhoryang/nghttp2,mixianghang/spdylay,ohyeah521/nghttp2,mixianghang/nghttp2,yuki-kodama/nghttp2,serioussam/nghttp2,mixianghang/nghttp2,wzyboy/nghttp2,dxq-git/nghttp2,wzyboy/nghttp2,syohex/nghttp2,lukw00/nghttp2,bxshi/nghttp2,lukw00/nghttp2,ohyeah521/nghttp2,wzyboy/nghttp2,ohyeah521/nghttp2,ohyeah521/nghttp2,dxq-git/nghttp2,ahnan4arch/spdylay,tatsuhiro-t/spdylay,kelbyludwig/nghttp2,mixianghang/nghttp2,yuki-kodama/nghttp2,wzyboy/nghttp2,mixianghang/spdylay,tatsuhiro-t/spdylay,lukw00/nghttp2,bxshi/nghttp2,thinred/nghttp2,ahnan4arch/spdylay,dxq-git/nghttp2,mixianghang/nghttp2,shines77/nghttp2,kelbyludwig/nghttp2,yuki-kodama/nghttp2,syohex/nghttp2,serioussam/nghttp2,yuki-kodama/nghttp2,lukw00/nghttp2,serioussam/nghttp2,tatsuhiro-t/spdylay,minhoryang/nghttp2,mixianghang/spdylay,kelbyludwig/nghttp2,ahnan4arch/spdylay,thinred/nghttp2,yuki-kodama/nghttp2,mixianghang/nghttp2,mixianghang/nghttp2,ahnan4arch/spdylay,mixianghang/spdylay,thinred/nghttp2,ahnan4arch/spdylay,shines77/nghttp2,kelbyludwig/nghttp2,yuki-kodama/nghttp2,ohyeah521/nghttp2,minhoryang/nghttp2,minhoryang/nghttp2,bxshi/nghttp2,icing/nghttp2,lukw00/nghttp2 | python/setup.py | python/setup.py | from distutils.core import setup
from distutils.extension import Extension
setup(
name = 'python-spdylay',
# Also update __version__ in spdylay.pyx
version = '0.1.0',
description = 'Python SPDY library on top of Spdylay C library',
author = 'Tatsuhiro Tsujikawa',
author_email = 'tatsuhiro.t@gmail.com',
url = 'http://spdylay.sourceforge.net/',
keywords = [],
ext_modules = [Extension("spdylay",
["spdylay.c"],
libraries=['spdylay'])],
long_description="""\
Python-spdylay is a Python SPDY library on top of Spdylay C
library. It supports SPDY/2 and SPDY/3 protocol.
It does not perform any I/O operations. When the library needs them,
it calls the callback functions provided by the application. It also
does not include any event polling mechanism, so the application can
freely choose the way of handling events.
It provides almost all API Spdylay provides with Pythonic fashion.
The core library API works with Python 2 and 3. But
``ThreadedSPDYServer`` requires Python 3.3 because it uses TLS NPN
extension.
Installation
============
First install Spdylay library. You can grab a source distribution from
`sf.net download page
<http://sourceforge.net/projects/spdylay/files/stable/>`_
or `clone git repository <https://github.com/tatsuhiro-t/spdylay>`_.
See `Spdylay documentation
<http://spdylay.sourceforge.net/package_README.html>`_ for the
required packages and how to build Spdylay from git repository.
After Spdylay is installed, run ``build_ext`` command to build
extension module::
$ python setup.py build_ext
If you installed Spdylay library in other than standard location, use
``--include-dirs`` and ``--library-dirs`` to specify header file and
library locations respectively.
Documentation
=============
See `python-spdylay documentation
<http://spdylay.sourceforge.net/python.html>`_.
Samples
=======
Here is a simple SPDY server::
#!/usr/bin/env python
# The example SPDY server. Python 3.3 or later is required because TLS
# NPN is used in spdylay.ThreadedSPDYServer. Put private key and
# certificate file in the current working directory.
import spdylay
# private key file
KEY_FILE='server.key'
# certificate file
CERT_FILE='server.crt'
class MySPDYRequestHandler(spdylay.BaseSPDYRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('content-type', 'text/html; charset=UTF-8')
content = '''\
<html>
<head><title>SPDY FTW</title></head>
<body>
<h1>SPDY FTW</h1>
<p>The age of HTTP/1.1 is over. The time of SPDY has come.</p>
</body>
</html>'''.encode('UTF-8')
self.wfile.write(content)
if __name__ == "__main__":
HOST, PORT = "localhost", 3000
server = spdylay.ThreadedSPDYServer((HOST, PORT),
MySPDYRequestHandler,
cert_file=CERT_FILE,
key_file=KEY_FILE)
server.start()
""",
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Cython',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| from distutils.core import setup
from distutils.extension import Extension
setup(
name = 'python-spdylay',
# Also update __version__ in spdylay.pyx
version = '0.1.0',
description = 'SPDY library',
author = 'Tatsuhiro Tsujikawa',
author_email = 'tatsuhiro.t@gmail.com',
url = 'http://spdylay.sourceforge.net/',
keywords = [],
ext_modules = [Extension("spdylay",
["spdylay.c"],
libraries=['spdylay'])],
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| mit | Python |
d5364fe87c4491bc564ba1406604ee423788a8a8 | Add lc01161_maximum_level_sum_of_a_binary_tree.py | bowen0701/algorithms_data_structures | lc01161_maximum_level_sum_of_a_binary_tree.py | lc01161_maximum_level_sum_of_a_binary_tree.py | """Leetcode 1161. Maximum Level Sum of a Binary Tree
Medium
URL: https://leetcode.com/problems/maximum-level-sum-of-a-binary-tree/
Given the root of a binary tree, the level of its root is 1,
the level of its children is 2, and so on.
Return the smallest level X such that the sum of all the values of nodes
at level X is maximal.
Example 1:
Input: [1,7,0,7,-8,null,null]
1
/ \
7 0
/ \
7 -8
Output: 2
Explanation:
Level 1 sum = 1.
Level 2 sum = 7 + 0 = 7.
Level 3 sum = 7 + -8 = -1.
So we return the level with the maximum sum which is level 2.
Note:
- The number of nodes in the given tree is between 1 and 10^4.
- -10^5 <= node.val <= 10^5
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class Solution(object):
def maxLevelSum(self, root):
"""
:type root: TreeNode
:rtype: int
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python | |
e6a972e4a5f0be7c78439f8437c693cfb3381e54 | create class Level - basic class methods | ana-balica/meow_letters_py | meow_letters/level.py | meow_letters/level.py | class Level(object):
"""Represents user level
"""
def __init__(self, level=1):
"""Level class initializer
:param level: int positive number that represents level
"""
self.level = level
def __repr__(self):
return "<Level '{0}' at {1}>".format(self.level, hex(id(self)))
| mit | Python | |
8053701daf9abdb35c870b1b70940bb60d60328c | Create annotate_gwas_results_from_vcf.py | christopher-gillies/VCFScripts,christopher-gillies/VCFScripts | annotate_gwas_results_from_vcf.py | annotate_gwas_results_from_vcf.py | #!/usr/bin/python
import argparse
import re
import pysam
import os
import string
from pysam import VariantFile
import itertools
"""
annotate_gwas_results_from_vcf.py This script will take a vcf file and gwas results and add additional columns from a
SnpEff annotated VCF
"""
| apache-2.0 | Python | |
ac720cc02d35c3baa8e1c09c2639126ae7173f2b | Add NLDN file reader | deeplycloudy/lmatools | NLDN.py | NLDN.py | from datetime import datetime
import numpy as np
from numpy.lib.recfunctions import drop_fields, append_fields
class NLDNdataFile(object):
stroke_DC3 = {'columns':[ ('date','S10'), ('time','S20'),
('lat','f4'), ('lon','f4'),
('peakCurrent','f4'), ('ellipse','f4'),
],
'date_dtype':[('year','i2'),('month','i1'),('day','i1')],
'time_dtype':[('hour','i1'),('minute','i1'),('second','float64')]
}
def __init__(self, filename, date_sep='-', time_sep=':', format='stroke_DC3'):
self.format=format
dtype_specs = getattr(self, format)
nldn_initial = np.genfromtxt(filename, dtype=dtype_specs['columns'])
date_part = np.genfromtxt(nldn_initial['date'],
delimiter=date_sep, dtype=dtype_specs['date_dtype'])
time_part = np.genfromtxt(nldn_initial['time'],
delimiter=time_sep, dtype=dtype_specs['time_dtype'])
dates = [datetime(a['year'], a['month'], a['day'], b['hour'], b['minute'])
for a, b in zip(date_part, time_part)]
min_date = min(dates)
t = np.fromiter( ((d-min_date).total_seconds() for d in dates), dtype='float64')
t += time_part['second']
self.basedate = min_date
data = drop_fields(nldn_initial, ('date', 'time'))
data = append_fields(data, 'time', t)
self.data = data
| bsd-2-clause | Python | |
5c834d19a113301e3bdc07dc03fb1486825825c5 | Add an initial moksha.api.hub API, with a dumb send_message method. | ralphbean/moksha,lmacken/moksha,mokshaproject/moksha,ralphbean/moksha,pombredanne/moksha,lmacken/moksha,ralphbean/moksha,pombredanne/moksha,mokshaproject/moksha,pombredanne/moksha,pombredanne/moksha,lmacken/moksha,mokshaproject/moksha,mokshaproject/moksha | moksha/api/hub/hub.py | moksha/api/hub/hub.py | from orbited import json
from moksha.hub import MokshaHub
def send_message(topic, message):
""" Send a `message` to a specific `topic` """
# Right now we're instantiating a new connection & channel each message..
# we could potentially do this in the DataStreamer...
# each could have their own Connection...
# or, the MokshaHub could instantiate all DataStreamers with their
# own channel?
hub = MokshaHub()
# Automatically encode non-strings to JSON
if not isinstance(message, basestring):
message = json.encode(message)
hub.send_message(message, routing_key=topic)
| apache-2.0 | Python | |
d34ad4b0b969dd6c10fc7c1646f934016ba8ddd7 | Add a script to list undocumented files and directories | makdharma/grpc,yugui/grpc,simonkuang/grpc,dklempner/grpc,matt-kwong/grpc,apolcyn/grpc,kumaralokgithub/grpc,geffzhang/grpc,ctiller/grpc,thinkerou/grpc,chrisdunelm/grpc,ejona86/grpc,donnadionne/grpc,yugui/grpc,nicolasnoble/grpc,dgquintas/grpc,adelez/grpc,PeterFaiman/ruby-grpc-minimal,kpayson64/grpc,grani/grpc,makdharma/grpc,greasypizza/grpc,thunderboltsid/grpc,muxi/grpc,simonkuang/grpc,simonkuang/grpc,kpayson64/grpc,vsco/grpc,geffzhang/grpc,vsco/grpc,infinit/grpc,philcleveland/grpc,daniel-j-born/grpc,jcanizales/grpc,PeterFaiman/ruby-grpc-minimal,yang-g/grpc,Crevil/grpc,carl-mastrangelo/grpc,thinkerou/grpc,jtattermusch/grpc,donnadionne/grpc,muxi/grpc,msmania/grpc,donnadionne/grpc,infinit/grpc,ctiller/grpc,greasypizza/grpc,LuminateWireless/grpc,donnadionne/grpc,kriswuollett/grpc,malexzx/grpc,Vizerai/grpc,dgquintas/grpc,wcevans/grpc,kpayson64/grpc,7anner/grpc,hstefan/grpc,muxi/grpc,stanley-cheung/grpc,royalharsh/grpc,thinkerou/grpc,jtattermusch/grpc,infinit/grpc,grani/grpc,kumaralokgithub/grpc,sreecha/grpc,firebase/grpc,wcevans/grpc,pszemus/grpc,deepaklukose/grpc,daniel-j-born/grpc,Vizerai/grpc,mehrdada/grpc,philcleveland/grpc,mehrdada/grpc,jtattermusch/grpc,soltanmm-google/grpc,zhimingxie/grpc,apolcyn/grpc,y-zeng/grpc,vjpai/grpc,thinkerou/grpc,makdharma/grpc,vjpai/grpc,vjpai/grpc,carl-mastrangelo/grpc,wcevans/grpc,thunderboltsid/grpc,y-zeng/grpc,andrewpollock/grpc,thinkerou/grpc,zhimingxie/grpc,y-zeng/grpc,ctiller/grpc,msmania/grpc,fuchsia-mirror/third_party-grpc,baylabs/grpc,kumaralokgithub/grpc,muxi/grpc,ncteisen/grpc,philcleveland/grpc,kskalski/grpc,ipylypiv/grpc,pszemus/grpc,firebase/grpc,nicolasnoble/grpc,jboeuf/grpc,dgquintas/grpc,fuchsia-mirror/third_party-grpc,malexzx/grpc,makdharma/grpc,jcanizales/grpc,ctiller/grpc,stanley-cheung/grpc,thinkerou/grpc,andrewpollock/grpc,matt-kwong/grpc,kriswuollett/grpc,nicolasnoble/grpc,kpayson64/grpc,muxi/grpc,yongni/grpc,perumaalgoog/grpc,vsco/grpc,greasypizza/grpc,ctiller/grpc,greasypizza/grpc,ppietrasa/grpc,MakMukhi/grpc,wcevans/grpc,PeterFaiman/ruby-grpc-minimal,kskalski/grpc,greasypizza/grpc,ncteisen/grpc,murgatroid99/grpc,pszemus/grpc,hstefan/grpc,jboeuf/grpc,mehrdada/grpc,a11r/grpc,philcleveland/grpc,Vizerai/grpc,vjpai/grpc,fuchsia-mirror/third_party-grpc,dgquintas/grpc,muxi/grpc,LuminateWireless/grpc,ejona86/grpc,ppietrasa/grpc,Crevil/grpc,kpayson64/grpc,rjshade/grpc,thinkerou/grpc,greasypizza/grpc,ctiller/grpc,simonkuang/grpc,Crevil/grpc,adelez/grpc,PeterFaiman/ruby-grpc-minimal,deepaklukose/grpc,ejona86/grpc,MakMukhi/grpc,perumaalgoog/grpc,ejona86/grpc,hstefan/grpc,stanley-cheung/grpc,royalharsh/grpc,ejona86/grpc,kpayson64/grpc,yongni/grpc,dgquintas/grpc,soltanmm/grpc,yang-g/grpc,kskalski/grpc,kriswuollett/grpc,perumaalgoog/grpc,dklempner/grpc,msmania/grpc,firebase/grpc,sreecha/grpc,pszemus/grpc,pmarks-net/grpc,matt-kwong/grpc,pszemus/grpc,malexzx/grpc,chrisdunelm/grpc,rjshade/grpc,vjpai/grpc,rjshade/grpc,jtattermusch/grpc,stanley-cheung/grpc,quizlet/grpc,PeterFaiman/ruby-grpc-minimal,ipylypiv/grpc,thinkerou/grpc,nicolasnoble/grpc,thunderboltsid/grpc,Vizerai/grpc,kumaralokgithub/grpc,sreecha/grpc,stanley-cheung/grpc,adelez/grpc,LuminateWireless/grpc,kumaralokgithub/grpc,matt-kwong/grpc,carl-mastrangelo/grpc,fuchsia-mirror/third_party-grpc,y-zeng/grpc,soltanmm/grpc,simonkuang/grpc,yongni/grpc,dgquintas/grpc,sreecha/grpc,firebase/grpc,msmania/grpc,ppietrasa/grpc,thinkerou/grpc,wcevans/grpc,Vizerai/grpc,thunderboltsid/grpc,MakMukhi/grpc,Crevil/grpc,nicolasnoble/grpc,fuchsia-mirror/third_party-grpc,PeterFaiman/ruby-grpc-minimal,pmarks-net/grpc,quizlet/grpc,jboeuf/grpc,sreecha/grpc,murgatroid99/grpc,mehrdada/grpc,a11r/grpc,deepaklukose/grpc,murgatroid99/grpc,jtattermusch/grpc,7anner/grpc,Crevil/grpc,ppietrasa/grpc,grpc/grpc,ncteisen/grpc,7anner/grpc,simonkuang/grpc,baylabs/grpc,jboeuf/grpc,ipylypiv/grpc,ipylypiv/grpc,firebase/grpc,perumaalgoog/grpc,LuminateWireless/grpc,sreecha/grpc,quizlet/grpc,geffzhang/grpc,firebase/grpc,pszemus/grpc,apolcyn/grpc,pmarks-net/grpc,firebase/grpc,hstefan/grpc,LuminateWireless/grpc,grpc/grpc,muxi/grpc,soltanmm/grpc,carl-mastrangelo/grpc,mehrdada/grpc,ipylypiv/grpc,andrewpollock/grpc,ncteisen/grpc,thunderboltsid/grpc,geffzhang/grpc,donnadionne/grpc,firebase/grpc,daniel-j-born/grpc,chrisdunelm/grpc,vjpai/grpc,adelez/grpc,a11r/grpc,perumaalgoog/grpc,malexzx/grpc,kriswuollett/grpc,yongni/grpc,carl-mastrangelo/grpc,vsco/grpc,geffzhang/grpc,grani/grpc,kskalski/grpc,rjshade/grpc,chrisdunelm/grpc,vjpai/grpc,hstefan/grpc,quizlet/grpc,zhimingxie/grpc,stanley-cheung/grpc,yugui/grpc,Crevil/grpc,daniel-j-born/grpc,LuminateWireless/grpc,pszemus/grpc,sreecha/grpc,chrisdunelm/grpc,msmania/grpc,grani/grpc,muxi/grpc,jcanizales/grpc,geffzhang/grpc,Vizerai/grpc,ctiller/grpc,ppietrasa/grpc,Crevil/grpc,y-zeng/grpc,vjpai/grpc,firebase/grpc,soltanmm/grpc,dklempner/grpc,chrisdunelm/grpc,vjpai/grpc,jcanizales/grpc,royalharsh/grpc,y-zeng/grpc,apolcyn/grpc,vsco/grpc,infinit/grpc,murgatroid99/grpc,wcevans/grpc,firebase/grpc,wcevans/grpc,Vizerai/grpc,ncteisen/grpc,kskalski/grpc,vjpai/grpc,baylabs/grpc,infinit/grpc,kpayson64/grpc,philcleveland/grpc,kpayson64/grpc,stanley-cheung/grpc,soltanmm-google/grpc,ncteisen/grpc,yang-g/grpc,msmania/grpc,hstefan/grpc,adelez/grpc,mehrdada/grpc,perumaalgoog/grpc,apolcyn/grpc,dklempner/grpc,donnadionne/grpc,andrewpollock/grpc,adelez/grpc,thinkerou/grpc,thunderboltsid/grpc,dgquintas/grpc,y-zeng/grpc,nicolasnoble/grpc,andrewpollock/grpc,ejona86/grpc,quizlet/grpc,greasypizza/grpc,deepaklukose/grpc,apolcyn/grpc,kpayson64/grpc,grani/grpc,apolcyn/grpc,Crevil/grpc,donnadionne/grpc,muxi/grpc,hstefan/grpc,apolcyn/grpc,jcanizales/grpc,ejona86/grpc,jtattermusch/grpc,baylabs/grpc,grpc/grpc,jtattermusch/grpc,infinit/grpc,donnadionne/grpc,muxi/grpc,sreecha/grpc,kumaralokgithub/grpc,wcevans/grpc,jboeuf/grpc,mehrdada/grpc,7anner/grpc,y-zeng/grpc,nicolasnoble/grpc,muxi/grpc,grpc/grpc,zhimingxie/grpc,andrewpollock/grpc,baylabs/grpc,andrewpollock/grpc,murgatroid99/grpc,jboeuf/grpc,philcleveland/grpc,pszemus/grpc,ejona86/grpc,Vizerai/grpc,quizlet/grpc,jcanizales/grpc,mehrdada/grpc,perumaalgoog/grpc,kpayson64/grpc,murgatroid99/grpc,yang-g/grpc,vjpai/grpc,ctiller/grpc,royalharsh/grpc,royalharsh/grpc,malexzx/grpc,MakMukhi/grpc,ppietrasa/grpc,daniel-j-born/grpc,ncteisen/grpc,malexzx/grpc,mehrdada/grpc,stanley-cheung/grpc,hstefan/grpc,PeterFaiman/ruby-grpc-minimal,firebase/grpc,geffzhang/grpc,ctiller/grpc,rjshade/grpc,grpc/grpc,adelez/grpc,makdharma/grpc,ppietrasa/grpc,yugui/grpc,matt-kwong/grpc,donnadionne/grpc,jtattermusch/grpc,ipylypiv/grpc,dklempner/grpc,hstefan/grpc,pmarks-net/grpc,perumaalgoog/grpc,ipylypiv/grpc,stanley-cheung/grpc,dklempner/grpc,kskalski/grpc,mehrdada/grpc,zhimingxie/grpc,grani/grpc,daniel-j-born/grpc,deepaklukose/grpc,philcleveland/grpc,apolcyn/grpc,yugui/grpc,soltanmm-google/grpc,yongni/grpc,simonkuang/grpc,kskalski/grpc,fuchsia-mirror/third_party-grpc,grani/grpc,soltanmm/grpc,a11r/grpc,matt-kwong/grpc,grpc/grpc,ejona86/grpc,pmarks-net/grpc,thunderboltsid/grpc,a11r/grpc,ncteisen/grpc,philcleveland/grpc,baylabs/grpc,vsco/grpc,daniel-j-born/grpc,jboeuf/grpc,deepaklukose/grpc,kriswuollett/grpc,thinkerou/grpc,dgquintas/grpc,dgquintas/grpc,yongni/grpc,deepaklukose/grpc,malexzx/grpc,sreecha/grpc,yang-g/grpc,ppietrasa/grpc,7anner/grpc,carl-mastrangelo/grpc,yugui/grpc,soltanmm/grpc,zhimingxie/grpc,sreecha/grpc,zhimingxie/grpc,soltanmm/grpc,pszemus/grpc,soltanmm/grpc,jtattermusch/grpc,thunderboltsid/grpc,pmarks-net/grpc,Crevil/grpc,fuchsia-mirror/third_party-grpc,carl-mastrangelo/grpc,MakMukhi/grpc,makdharma/grpc,chrisdunelm/grpc,ejona86/grpc,jtattermusch/grpc,yugui/grpc,ncteisen/grpc,geffzhang/grpc,makdharma/grpc,nicolasnoble/grpc,jtattermusch/grpc,zhimingxie/grpc,infinit/grpc,ncteisen/grpc,dklempner/grpc,pmarks-net/grpc,msmania/grpc,carl-mastrangelo/grpc,soltanmm-google/grpc,Vizerai/grpc,pszemus/grpc,stanley-cheung/grpc,daniel-j-born/grpc,donnadionne/grpc,MakMukhi/grpc,royalharsh/grpc,ipylypiv/grpc,carl-mastrangelo/grpc,kumaralokgithub/grpc,royalharsh/grpc,soltanmm/grpc,thinkerou/grpc,grani/grpc,murgatroid99/grpc,chrisdunelm/grpc,perumaalgoog/grpc,jcanizales/grpc,sreecha/grpc,rjshade/grpc,7anner/grpc,simonkuang/grpc,baylabs/grpc,sreecha/grpc,vsco/grpc,MakMukhi/grpc,LuminateWireless/grpc,carl-mastrangelo/grpc,a11r/grpc,nicolasnoble/grpc,ppietrasa/grpc,a11r/grpc,PeterFaiman/ruby-grpc-minimal,baylabs/grpc,7anner/grpc,fuchsia-mirror/third_party-grpc,yang-g/grpc,ncteisen/grpc,a11r/grpc,quizlet/grpc,kriswuollett/grpc,yang-g/grpc,ncteisen/grpc,yang-g/grpc,LuminateWireless/grpc,royalharsh/grpc,yang-g/grpc,kriswuollett/grpc,quizlet/grpc,simonkuang/grpc,kpayson64/grpc,kriswuollett/grpc,kriswuollett/grpc,dgquintas/grpc,vsco/grpc,quizlet/grpc,Vizerai/grpc,soltanmm-google/grpc,murgatroid99/grpc,soltanmm-google/grpc,infinit/grpc,a11r/grpc,jtattermusch/grpc,donnadionne/grpc,matt-kwong/grpc,makdharma/grpc,jboeuf/grpc,zhimingxie/grpc,stanley-cheung/grpc,ctiller/grpc,geffzhang/grpc,nicolasnoble/grpc,nicolasnoble/grpc,rjshade/grpc,jcanizales/grpc,carl-mastrangelo/grpc,grpc/grpc,mehrdada/grpc,jboeuf/grpc,pmarks-net/grpc,Vizerai/grpc,yongni/grpc,fuchsia-mirror/third_party-grpc,greasypizza/grpc,grpc/grpc,jboeuf/grpc,7anner/grpc,PeterFaiman/ruby-grpc-minimal,grpc/grpc,rjshade/grpc,malexzx/grpc,PeterFaiman/ruby-grpc-minimal,mehrdada/grpc,soltanmm-google/grpc,greasypizza/grpc,andrewpollock/grpc,firebase/grpc,yongni/grpc,fuchsia-mirror/third_party-grpc,deepaklukose/grpc,ejona86/grpc,jcanizales/grpc,thunderboltsid/grpc,philcleveland/grpc,ejona86/grpc,chrisdunelm/grpc,jboeuf/grpc,wcevans/grpc,kumaralokgithub/grpc,yongni/grpc,andrewpollock/grpc,dgquintas/grpc,nicolasnoble/grpc,MakMukhi/grpc,matt-kwong/grpc,soltanmm-google/grpc,7anner/grpc,kskalski/grpc,matt-kwong/grpc,adelez/grpc,carl-mastrangelo/grpc,msmania/grpc,pszemus/grpc,chrisdunelm/grpc,deepaklukose/grpc,pszemus/grpc,kumaralokgithub/grpc,ctiller/grpc,ctiller/grpc,vjpai/grpc,msmania/grpc,stanley-cheung/grpc,grpc/grpc,infinit/grpc,y-zeng/grpc,murgatroid99/grpc,baylabs/grpc,jboeuf/grpc,malexzx/grpc,rjshade/grpc,grani/grpc,donnadionne/grpc,daniel-j-born/grpc,yugui/grpc,royalharsh/grpc,adelez/grpc,soltanmm-google/grpc,murgatroid99/grpc,grpc/grpc,LuminateWireless/grpc,pmarks-net/grpc,makdharma/grpc,muxi/grpc,chrisdunelm/grpc,yugui/grpc,grpc/grpc,vsco/grpc,dklempner/grpc,ipylypiv/grpc,dklempner/grpc,kskalski/grpc,MakMukhi/grpc | tools/distrib/c-ish/check_documentation.py | tools/distrib/c-ish/check_documentation.py | #!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# check for directory level 'README.md' files
# check that all implementation and interface files have a \file doxygen comment
import os
import sys
# where do we run
_TARGET_DIRS = [
'include/grpc',
'include/grpc++',
'src/core',
'src/cpp',
'test/core',
'test/cpp'
]
# which file extensions do we care about
_INTERESTING_EXTENSIONS = [
'.c',
'.h',
'.cc'
]
# find our home
_ROOT = os.path.abspath(
os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
os.chdir(_ROOT)
errors = 0
# walk directories, find things
for target_dir in _TARGET_DIRS:
for root, dirs, filenames in os.walk(target_dir):
if 'README.md' not in filenames:
print '%s: missing README.md' % root
errors += 1
for filename in filenames:
if os.path.splitext(filename)[1] not in _INTERESTING_EXTENSIONS:
continue
path = os.path.join(root, filename)
with open(path) as f:
contents = f.read()
if '\\file' not in contents:
print '%s: no \\file comment' % path
errors += 1
assert errors == 0, 'error count = %d' % errors
| apache-2.0 | Python | |
fd4074160a943fb8841780211395a58ff5719dc1 | Add `settings.py` | avinassh/kekday,avinassh/kekday | settings.py | settings.py | import os
user_agent = 'when is my cakeday by /u/avinassh'
scopes = ['identity']
app_key = os.environ['APP_KEY']
app_secret = os.environ['APP_SECRET']
refresh_token = os.environ['REFRESH_TOKEN']
access_token = os.environ['ACCESS_TOKEN']
| mit | Python | |
d3ce90054a85c8711b95f6cf1fb975d78b8a2101 | add first try to understand stew api | mencattini/ideal-pancake,mencattini/ideal-pancake | core.py | core.py | from stew.types.nat import Nat
print(Nat(10))
| apache-2.0 | Python | |
b363b20440e564b2909736c64cb543cd632b4ae4 | Print daily form submission counts | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | custom/covid/management/commands/fetch_form_case_counts.py | custom/covid/management/commands/fetch_form_case_counts.py | import itertools
from datetime import date, datetime, timedelta
from django.core.management.base import BaseCommand
from corehq.apps.enterprise.models import EnterprisePermissions
from corehq.apps.es import CaseES, FormES
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'domains', nargs="*",
help='Domains to check, will include enterprise-controlled child domains.'
)
parser.add_argument('--num-days', type=int, default=30, help='Number of days (UTC) to inspect')
def handle(self, domains, **options):
filename = "form_case_counts_{}".format(datetime.utcnow().strftime("%Y-%m-%d_%H.%M.%S"))
for row in self.get_rows(domains, options['num_days']):
if row['forms_submitted']:
print(row)
def get_rows(self, domains, num_days):
end = date.today()
start = end - timedelta(days=num_days)
for domain in _expand_domains(domains):
submissions_counts = _get_submissions_counts(domain, start, end)
day = start
while day <= end:
yield {
'domain': domain,
'date': day.isoformat(),
'forms_submitted': submissions_counts.get(day, 0),
}
day += timedelta(days=1)
def _expand_domains(domains):
return sorted(set(itertools.chain(
domains,
*(EnterprisePermissions.get_domains(domain) for domain in domains)
)))
def _get_datetime_range(num_days):
now = datetime.utcnow()
end = datetime(now.year, now.month, now.day) # 00:00:00 this morning UTC
start = end - timedelta(days=num_days)
return start, end
def _get_submissions_counts(domain, start, end):
res = (FormES()
.domain(domain)
.submitted(gte=start, lte=end)
.submitted_histogram()
.run().aggregations.date_histogram)
return {
date.fromisoformat(bucket['key_as_string']): bucket['doc_count']
for bucket in res.normalized_buckets
}
| bsd-3-clause | Python | |
447720e9f18447476bc3d4f8b16a9eed018b260e | Add lc074_search_a_2d_matrix.py | bowen0701/algorithms_data_structures | lc074_search_a_2d_matrix.py | lc074_search_a_2d_matrix.py | """Leetcode 74. Search a 2D Matrix.
Medium
URL: https://leetcode.com/problems/search-a-2d-matrix/
Write an efficient algorithm that searches for a value in an m x n matrix.
This matrix has the following properties:
Integers in each row are sorted from left to right.
The first integer of each row is greater than the last integer of the previous row.
Example 1:
Input:
matrix = [
[1, 3, 5, 7],
[10, 11, 16, 20],
[23, 30, 34, 50]
]
target = 3
Output: true
Example 2:
Input:
matrix = [
[1, 3, 5, 7],
[10, 11, 16, 20],
[23, 30, 34, 50]
]
target = 13
Output: false
"""
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python | |
b6ac5540021dd7de2a97d4cb9f668059a16a7e94 | Write edit preparation test | osu-cass/whats-fresh-api,iCHAIT/whats-fresh-api,osu-cass/whats-fresh-api,iCHAIT/whats-fresh-api,osu-cass/whats-fresh-api,iCHAIT/whats-fresh-api,iCHAIT/whats-fresh-api,osu-cass/whats-fresh-api | whats_fresh/whats_fresh_api/tests/views/entry/test_edit_preparation.py | whats_fresh/whats_fresh_api/tests/views/entry/test_edit_preparation.py | from django.test import TestCase
from django.core.urlresolvers import reverse
from whats_fresh_api.models import *
from django.contrib.gis.db import models
from django.contrib.auth.models import User
import json
class EditPreparationTestCase(TestCase):
"""
Test that the Edit Preparation page works as expected.
Things tested:
URLs reverse correctly
The outputted page has the correct form fields
POSTing "correct" data will result in the update of the preparation
object with the specified ID
"""
fixtures = ['test_fixtures']
def setUp(self):
user = User.objects.create_user(
'temporary', 'temporary@gmail.com', 'temporary')
user.save()
self.client.post(
reverse('login'),
{'username': 'temporary', 'password': 'temporary'})
def test_url_endpoint(self):
url = reverse('edit-preparation', kwargs={'id': '1'})
self.assertEqual(url, '/entry/preparations/1')
def test_successful_preparation_update(self):
"""
POST a proper "update preparation" command to the server, and see if
the update appears in the database
"""
# Data that we'll post to the server to get the new preparation created
new_preparation = {
'name': 'Fried', 'description': '', 'additional_info': ''}
response = self.client.post(
reverse('edit-preparation', kwargs={'id': '1'}),
new_preparation)
preparation = Preparation.objects.get(id=1)
for field in new_preparation:
self.assertEqual(
getattr(preparation, field), new_preparation[field])
def test_form_fields(self):
"""
Tests to see if the form contains all of the right fields
"""
response = self.client.get(
reverse('edit-preparation', kwargs={'id': '1'}))
fields = {
'name': 'Fried', 'description': '', 'additional_info': ''}
form = response.context['preparation_form']
for field in fields:
self.assertEqual(fields[field], form[field].value())
| apache-2.0 | Python | |
ae8226b969a792f8a1018394ee193a186e13ba1e | Create azuretranslator.py | MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | home/hairygael/GESTURES/azuretranslator.py | home/hairygael/GESTURES/azuretranslator.py | #CREDITS : PAPAOUTAI
# your keys here ( put in config file ) : https://datamarket.azure.com/dataset/bing/microsofttranslator
AzureTranslator=Runtime.createAndStart("AzureTranslator", "AzureTranslator")
sleep(0.1)
#initiate azure
#AzureTranslator.setCredentials(Azure_client_id,Azure_client_secret)
AzureTranslator.setCredentials("xxxxxxxxxxxxxxxxx","xxxxxxxxxxxxxxxxxx") # KEY and SECRET azure credentials
#Origin language
supported_languages = { # as defined here: http://msdn.microsoft.com/en-us/library/hh456380.aspx
'ar' : ' Arabic',
# 'bs-Latn' : 'Bosnian (Latin)',
# 'bg' : 'Bulgarian',
# 'ca' : 'Catalan',
# 'zh-CHS' : 'Chinese (Simplified)',
# 'zh-CHT' : 'Chinese (Traditional)',
# 'hr' : 'Croatian',
# 'cs' : 'Czech',
'da' : 'Danish',
'nl' : 'Dutch',
'en' : 'English',
# 'et' : 'Estonian',
# 'fi' : 'Finnish',
'fr' : 'French',
'de' : 'German',
'el' : 'Greek',
# 'ht' : 'Haitian Creole',
# 'he' : 'Hebrew',
# 'hi' : 'Hindi',
# 'mww' : 'Hmong Daw',
# 'hu' : 'Hungarian',
# 'id' : 'Indonesian',
'it' : 'Italian',
# 'ja' : 'Japanese',
# 'sw' : 'Kiswahili',
# 'tlh' : 'Klingon',
# 'ko' : 'Korean',
# 'lv' : 'Latvian',
# 'lt' : 'Lithuanian',
# 'ms' : 'Malay',
# 'mt' : 'Maltese',
'no' : 'Norwegian',
# 'fa' : 'Persian',
# 'pl' : 'Polish',
'pt' : 'Portuguese',
# 'ro' : 'Romanian',
'ru' : 'Russian',
# 'sr-Cyrl' : 'Serbian (Cyrillic)',
# 'sr-Latn' : 'Serbian (Latin)',
# 'sk' : 'Slovak',
# 'sl' : 'Slovenian',
'es' : 'Spanish',
'sv' : 'Swedish',
# 'th' : 'Thai',
# 'tr' : 'Turkish',
# 'uk' : 'Ukrainian',
# 'ur' : 'Urdu',
# 'vi' : 'Vietnamese',
# 'cy' : 'Welsh',
# 'yua' : 'Yucatec Maya',
}
#acapela voice name map
male_languages = {
'ar' : 'Nizar',
'da' : 'Rasmus',
'nl' : 'Jeroen',
'en' : Voice,
'fr' : 'Bruno',
'de' : 'Klaus',
'el' : 'Dimitris',
'it' : 'Vittorio',
'no' : 'Olav',
'es' : 'Antonio',
'sv' : 'Emil',
'ja' : 'Sakura',
'pt' : 'Celia',
'ru' : 'Alyona',
}
female_languages = {
'pt' : 'Celia',
'ru' : 'Alyona',
}
#Translate to :
en_languages = {
'arab' : 'ar',
'arabe' : 'ar',
'danish' : 'da',
'danois' : 'da',
'dutch' : 'nl',
'hollandais' : 'nl',
'english' : 'en',
'anglais' : 'en',
'french' : 'fr',
'francais' : 'fr',
'german' : 'de',
'allemand' : 'de',
'greek' : 'el',
'italian' : 'it',
'norwegian' : 'no',
'norvegien' : 'no',
'spanish' : 'es',
'espagnol' : 'es',
'swedish' : 'sv',
'suedois' : 'sv',
'japonese' : 'ja',
'portuguese' : 'pt',
'portuguais' : 'pt',
'russian' : 'ru',
'russe' : 'ru',
}
def translateText(text,language):
RealLang="0"
try:
RealLang=en_languages[language]
except:
inmoovSuper.getResponse("AZURE_ERROR_2 "+language)
print RealLang
try:
AzureTranslator.detectLanguage(text)
except:
inmoovSuper.getResponse("AZURE_ERROR_1")
RealLang="0"
if RealLang!="0":
AzureTranslator.toLanguage(RealLang)
sleep(0.1)
t_text=AzureTranslator.translate(text)
#small trick to prevent connection timeout :)
i=0
while 'Cannot find an active Azure Market Place' in t_text and i<50:
print(i,t_text)
i += 1
sleep(0.2)
AzureTranslator.detectLanguage(text)
t_text=AzureTranslator.translate(text+" ")
if 'Cannot find an active Azure Market Place' in t_text:
inmoovSuper.getResponse("AZURE_ERROR_3")
else:
mouth.setVoice(male_languages[RealLang])
print t_text
talk(t_text)
mouth.setVoice(Voice)
| apache-2.0 | Python | |
8354354be70851b91a66e11a784d19684345df4d | Add triangular distribution. | Effective-Quadratures/Effective-Quadratures | equadratures/distributions/triangular.py | equadratures/distributions/triangular.py | """The Triangular distrubution."""
from equadratures.distributions.template import Distribution
from equadratures.distributions.recurrence_utils import custom_recurrence_coefficients
import numpy as np
from scipy.stats import triang
RECURRENCE_PDF_SAMPLES = 8000
class Triangular(Distribution):
"""
The class defines a Triangular object.
:param double lower:
Lower bound of the support of the distribution.
:param double upper:
Upper bound of the support of the distribution.
:param double mode:
Mode of the distribution.
"""
def __init__(self, lower=None, upper=None, mode=None):
self.lower = lower # loc
self.upper = upper
self.mode = mode
self.bounds = np.array([0, 1.0])
self.scale = upper - lower # scale
self.shape = (self.mode - self.lower) / (self.upper - self.lower) # c
if (self.lower is not None) and (self.upper is not None) and (self.mode is not None) :
mean, var, skew, kurt = triang.stats(c=self.shape, loc=self.lower, scale=self.scale, moments='mvsk')
self.mean = mean
self.variance = var
self.skewness = skew
self.kurtosis = kurt
self.x_range_for_pdf = np.linspace(self.lower, self.upper, RECURRENCE_PDF_SAMPLES)
self.parent = triang(loc=self.lower, scale=self.scale, c=self.shape)
def get_description(self):
"""
Returns the description of the distribution.
:param Distribution self:
An instance of the distribution class.
"""
text = "is a triangular distribution with a mode of "+str(self.mode)+" over the support "+str(self.lower)+" to "+str(self.upper)+"."
return text
def get_cdf(self, points=None):
"""
Returns the CDF of the distribution.
:param Distribution self:
An instance of the distribution class.
"""
if points is not None:
return self.parent.cdf(points)
else:
raise ValueError( 'Please digit an input for getCDF method')
def get_pdf(self, points=None):
"""
Returns the PDF of the distribution.
:param Distribution self:
An instance of the distribution class.
"""
if points is not None:
return self.parent.pdf(points)
else:
raise ValueError( 'Please digit an input for get_pdf method')
def get_icdf(self, xx):
"""
An inverse cumulative density function.
:param Distribution self:
An instance of the distribution class.
:param xx:
A numpy array of uniformly distributed samples between [0,1].
:return:
Inverse CDF samples associated with the gamma distribution.
"""
return self.parent.ppf(xx)
def get_samples(self, m=None):
"""
Generates samples from the distribution.
:param Distribution self:
An instance of the distribution class.
:param integer m:
Number of random samples. If no value is provided, a default of 5e5 is assumed.
:return:
A N-by-1 vector that contains the samples.
"""
if m is not None:
number = m
else:
number = 500000
return self.parent.rvs(size=number) | lgpl-2.1 | Python | |
e451343922251b552e902b84aa925b0a04ae958f | Add test script | ianknowles/EarTimeWrangler | src/test.py | src/test.py | import wrangler
if __name__ == '__main__':
wrangler.test_task()
| mit | Python | |
b438141932dc32c2bb0749ad0bf1cafaed09c711 | Add dynamic resizing support. | peterbrittain/asciimatics,peterbrittain/asciimatics | asciimatics/exceptions.py | asciimatics/exceptions.py | class ResizeScreenError(Exception):
"""
Asciimatics raises this Exception if the terminal is resized while playing
a Scene (and the Screen has been told not to ignore a resizing event).
"""
def __init__(self, message):
"""
:param message: Error message for this exception.
"""
self._message = message
def __str__(self):
"""
Printable form of the exception.
"""
return self._message
| apache-2.0 | Python | |
d3e90e4dca1cce2f27ccf9c24c1bb944f9d708b9 | Add a unit test file for MessageDirectory | tofu-rocketry/ssm,apel/ssm,tofu-rocketry/ssm,stfc/ssm,apel/ssm,stfc/ssm | test/test_message_directory.py | test/test_message_directory.py | """
Created on 18 May 2018.
@author: Greg Corbett
"""
import shutil
import tempfile
import unittest
from ssm.message_directory import MessageDirectory
class TestMessageDirectory(unittest.TestCase):
"""Class used for testing the MessageDirectory class."""
def setUp(self):
"""Create a MessageDirectory class on top of a temporary directory."""
self.tmp_dir = tempfile.mkdtemp(prefix='message_directory')
self.message_directory = MessageDirectory(self.tmp_dir)
# Assert no files exist in the underlying file system.
self.assertEqual(self.message_directory.count(), 0)
def test_add_and_get(self):
"""
Test the add and get methods of the MessageDirectory class.
This test adds a file to a MessageDirectory, checks it has been
written to the underlying directory and then checks the saved file
for content equality.
"""
test_content = "FOO"
# Add the test content to the MessageDirectory.
file_name = self.message_directory.add(test_content)
# Assert there is exactly on message in the directory.
self.assertEqual(self.message_directory.count(), 1)
# Fetch the saved content using the get method.
saved_content = self.message_directory.get(file_name)
# Assert the saved content is equal to the original test content.
self.assertEqual(saved_content, test_content)
def test_count(self):
"""
Test the count method of the MessageDirectory class.
This test adds two files to a MessageDirectory and then checks
the output of the count() function is as expected.
"""
# Add some files to the MessageDirectory.
self.message_directory.add("FOO")
self.message_directory.add("BAR")
# Check the count method returns the correct value.
self.assertEqual(self.message_directory.count(), 2)
def test_lock(self):
"""
Test the lock method of the MessageDirectory class.
This test checks the lock method returns true for any file.
"""
self.assertTrue(self.message_directory.lock("any file"))
def test_purge(self):
"""
Test the purge method of the MessageDirectory class.
This test only checks the purge method is callable without error,
as the purge method only logs that it has been called.
"""
self.message_directory.purge()
def test_remove(self):
"""
Test the remove method of the MessageDirectory class.
This test adds a file, removes the file and then checks
the number of files present.
"""
# Add some files to the MessageDirectory.
file_name = self.message_directory.add("FOO")
# Use the remove method to delete the recently added file.
self.message_directory.remove(file_name)
# Check the count method returns the expected value.
self.assertEqual(self.message_directory.count(), 0)
def tearDown(self):
"""Remove test directory and all contents."""
try:
shutil.rmtree(self.tmp_dir)
except OSError, error:
print 'Error removing temporary directory %s' % self.tmp_dir
print error
if __name__ == "__main__":
unittest.main()
| apache-2.0 | Python | |
25bb3511e6b06f739e4ebe8aed79ec25d1ecae29 | use get_total_case_count | puttarajubr/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq | corehq/ex-submodules/casexml/apps/case/tests/test_multi_case_submits.py | corehq/ex-submodules/casexml/apps/case/tests/test_multi_case_submits.py | from django.test import TestCase
import os
from django.test.utils import override_settings
from casexml.apps.case.dbaccessors import get_total_case_count
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.tests import delete_all_xforms, delete_all_cases
from couchforms.tests.testutils import post_xform_to_couch
from casexml.apps.case.xform import process_cases
@override_settings(CASEXML_FORCE_DOMAIN_CHECK=False)
class MultiCaseTest(TestCase):
def setUp(self):
delete_all_xforms()
delete_all_cases()
self.assertEqual(0, get_total_case_count())
def testParallel(self):
file_path = os.path.join(os.path.dirname(__file__), "data", "multicase", "parallel_cases.xml")
with open(file_path, "rb") as f:
xml_data = f.read()
form = post_xform_to_couch(xml_data)
process_cases(form)
cases = self._get_cases()
self.assertEqual(4, len(cases))
self._check_ids(form, cases)
def testMixed(self):
file_path = os.path.join(os.path.dirname(__file__), "data", "multicase", "mixed_cases.xml")
with open(file_path, "rb") as f:
xml_data = f.read()
form = post_xform_to_couch(xml_data)
process_cases(form)
cases = self._get_cases()
self.assertEqual(4, len(cases))
self._check_ids(form, cases)
def testCasesInRepeats(self):
file_path = os.path.join(os.path.dirname(__file__), "data", "multicase", "case_in_repeats.xml")
with open(file_path, "rb") as f:
xml_data = f.read()
form = post_xform_to_couch(xml_data)
process_cases(form)
cases = self._get_cases()
self.assertEqual(3, len(cases))
self._check_ids(form, cases)
def _get_cases(self):
return CommCareCase.view("case/get_lite", reduce=False, include_docs=True).all()
def _check_ids(self, form, cases):
for case in cases:
ids = case.get_xform_ids_from_couch()
self.assertEqual(1, len(ids))
self.assertEqual(form._id, ids[0])
| from django.test import TestCase
import os
from django.test.utils import override_settings
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.tests import delete_all_xforms, delete_all_cases
from couchforms.tests.testutils import post_xform_to_couch
from casexml.apps.case.xform import process_cases
@override_settings(CASEXML_FORCE_DOMAIN_CHECK=False)
class MultiCaseTest(TestCase):
def setUp(self):
delete_all_xforms()
delete_all_cases()
def testParallel(self):
self.assertEqual(0, len(CommCareCase.view("case/by_user", reduce=False).all()))
file_path = os.path.join(os.path.dirname(__file__), "data", "multicase", "parallel_cases.xml")
with open(file_path, "rb") as f:
xml_data = f.read()
form = post_xform_to_couch(xml_data)
process_cases(form)
cases = self._get_cases()
self.assertEqual(4, len(cases))
self._check_ids(form, cases)
def testMixed(self):
self.assertEqual(0, len(CommCareCase.view("case/by_user", reduce=False).all()))
file_path = os.path.join(os.path.dirname(__file__), "data", "multicase", "mixed_cases.xml")
with open(file_path, "rb") as f:
xml_data = f.read()
form = post_xform_to_couch(xml_data)
process_cases(form)
cases = self._get_cases()
self.assertEqual(4, len(cases))
self._check_ids(form, cases)
def testCasesInRepeats(self):
self.assertEqual(0, len(CommCareCase.view("case/by_user", reduce=False).all()))
file_path = os.path.join(os.path.dirname(__file__), "data", "multicase", "case_in_repeats.xml")
with open(file_path, "rb") as f:
xml_data = f.read()
form = post_xform_to_couch(xml_data)
process_cases(form)
cases = self._get_cases()
self.assertEqual(3, len(cases))
self._check_ids(form, cases)
def _get_cases(self):
return CommCareCase.view("case/get_lite", reduce=False, include_docs=True).all()
def _check_ids(self, form, cases):
for case in cases:
ids = case.get_xform_ids_from_couch()
self.assertEqual(1, len(ids))
self.assertEqual(form._id, ids[0])
| bsd-3-clause | Python |
2414789b86be5303e95ce8a4b98bca345d224bd9 | Add pythondoc HTMLParser code | brockuniera/PhoneBrowser,brockuniera/PhoneBrowser | html.py | html.py | import HTMLParser
import requests
class WikiParse(HTMLParser.HTMLParser):
def handle_starttag(self, tag, attrs):
print "start tag", tag
def handle_endtag(self, tag):
print "end tag", tag
def handle_data(self, data):
print "data", data
if __name__ == "__main__":
parser = WikiParse()
| mit | Python | |
499127a1dcb4edce3a823fb3fd1787db3a5b96b2 | Add util.py to add helpers to mailchimp handlers | tforrest/soda-automation,tforrest/soda-automation | app/mailchimp/util.py | app/mailchimp/util.py | import re
resp_match = lambda status: re.match(r"^[4,5][0-9][0-9]$",status)
def handle_response(func):
"""Utility function that pre processes a
mailchimp response
"""
r = func()
try:
if resp_match(r["status"]):
raise ChimpException(r["status"],r["title"],r["detail"])
except:
raise ChimpException("404","Not Found","Status not found")
return response
class ChimpException(Exception):
"""Exception for Bad Response from MailChimp"""
error_string = 'Chimp Exception Status:{}, Title: {}, Detail: {}'
def __init__(self,status,title,reason):
Exception.__init__(self,error_string.format(status,title,detail)) | mit | Python | |
70958a1573f2eb0987ce26fe6807896edb1c987a | Add script to allow download of images using new media API | mpi2/PhenotypeData,mpi2/PhenotypeData,mpi2/PhenotypeData,mpi2/PhenotypeData,mpi2/PhenotypeData,mpi2/PhenotypeData | external_tools/src/main/python/images/downloadimages_using_media_api.py | external_tools/src/main/python/images/downloadimages_using_media_api.py | #!/usr/bin/python
"""Download images from DCC using media API
Allows download of images using the media API provided by the DCC instead
of the solr core which the original downloadimages.py script uses. This is
necessary sometimes to get a headstart on the data-release.
See also downloadimages.py and downloadimages_using_xml.py
"""
import sys
import os
import requests
import json
import argparse
# Import helper functions from original downloadimages.py
from downloadimages import createDestinationFilePath, createNotDownloadedOutputPath, processFile
uniqueUris=set()
def main(argv):
"""Download images using DCC media API onto holding area on disk"""
parser = argparse.ArgumentParser(
description='Download images using DCC media API onto holding area on disk'
)
parser.add_argument('-d1', '--initialDestinationDir', required=True,
dest='initialDestinationDir',
help='Directory for root of holding destination to store images'
)
parser.add_argument('-d2', '--finalDestinationDir', required=True,
dest='finalDestinationDir',
help='Directory for root of final destination to store images'
)
parser.add_argument('--not-downloaded', dest='notDownloadedOutputPath',
help='path to save list of files that could not be downloaded'
)
args = parser.parse_args()
rootDestinationDir = args.initialDestinationDir
finalDestinationDir = args.finalDestinationDir
print "running python image download script for impc images"
print 'rootDestinationDir is "', rootDestinationDir
notDownloaded = runWithMediaApiAsDataSource(rootDestinationDir, finalDestinationDir)
print str(len(notDownloaded)) + " files could not be downloaded"
if len(notDownloaded) > 0:
notDownloadedOutputPath = args.notDownloadedOutputPath if args.notDownloadedOutputPath <> None else createNotDownloadedOutputPath(rootDestinationDir)
with open(notDownloadedOutputPath, 'wt') as fid:
fid.writelines(notDownloaded)
print "Written files that could not be downloaded to " + notDownloadedOutputPath
def runWithMediaApiAsDataSource(rootDestinationDir, finalDestinationDir):
"""
Download images using Media API as the datasource.
Return urls that cannot be downloaded
"""
notDownloaded = []
numFound=0
# We get the files we are interested in for each site using the
# media API
sites = [
('bcm', 'BCM',),
('gmc','HMGU',),
('h', 'MRC Harwell'),
('ics', 'ICS',),
('j', 'JAX',),
('tcp', 'TCP'),
('ning', 'NING',),
('rbrc', 'RBRC',),
('ucd', 'UC Davis',),
('wtsi', 'WTSI',),
('kmpc', 'KMPC',),
('ccpcz', 'CCP-IMG',),
]
# Preset observation ID. This is passed to processFile in the
# loop below but is not used by the function.
# ToDo: remove from function definition
observation_id=None
for site, phenotyping_center in sites:
query_string = "https://api.mousephenotype.org/media/dccUrl/" +\
site # + "?start=0&resultsize=2"
print query_string
v = json.loads(requests.get(query_string).text)
try:
docs = v['mediaFiles']
except KeyError as key_error:
print "WARNING - no media files returned for site: " + site
continue
numFound += len(docs)
for doc in docs:
download_file_path=doc['dccUrl']
download_file_path=download_file_path.lower()
if download_file_path.find('mousephenotype.org') < 0 or \
download_file_path.endswith('.mov') or \
download_file_path.endswith('.bz2'):
continue
# On 13/11/2019 got a KeyError for phenotyping centre. This
# should not happen, but code modified appropriately
try:
pipeline_stable_id=doc['pipelineKey']
procedure_stable_id=doc['procedureKey']
parameter_stable_id=doc['parameterKey']
except KeyError as e:
print "Key " + str(e)+ " not returned by solr - not downloading " + download_file_path
notDownloaded.append(download_file_path+'\n')
continue
downloaded = processFile(observation_id, rootDestinationDir, finalDestinationDir, phenotyping_center,pipeline_stable_id, procedure_stable_id, parameter_stable_id, download_file_path)
if not downloaded:
notDownloaded.append(download_file_path+'\n')
print 'number found in media API ='+str(numFound)+' number of images not downloaded = '+str(len(notDownloaded))
return notDownloaded
if __name__ == "__main__":
main(sys.argv[1:])
| apache-2.0 | Python | |
11b8fee12d0c4ccaa6c922e00e601efdf1706a89 | Add proof-of-concept video tracking | sigvartmh/CamPal-TDT4140,sigvartmh/CamPal-TDT4140 | videoconcept.py | videoconcept.py | import numpy as np
import sys
sys.path.append('/usr/local/Cellar/opencv3/3.2.0/lib/python2.7/site-packages')
from imutils.object_detection import non_max_suppression
import imutils
import cv2
import time
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
camera = cv2.VideoCapture(0)
counter = 0
while True:
start = time.time()
(grabbed, frame) = camera.read()
if not grabbed:
break
frame = imutils.resize(frame, width=800)
orig = frame.copy()
cv2.putText(frame, "CamPal Test", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 3)
if(counter):
(rects, weights) = hog.detectMultiScale(frame, winStride=(4, 4),padding=(8, 8), scale=1.05)
for (x, y, w, h) in rects:
cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
for (xA, yA, xB, yB) in pick:
cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
end = time.time()
cv2.putText(frame, "counter:{} ".format(1/(end-start)),(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
cv2.imshow("Frame", frame)
cv2.imshow("Original", orig)
key = cv2.waitKey(1) & 0xFF
counter += 1
if key == ord("q"):
break
camera.release()
cv2.destroyAllWindows()
| mit | Python | |
5d128562c1f90537e6c4164b7651cae6b7f2838b | Add missing import | dmerejkowsky/qibuild,aldebaran/qibuild,dmerejkowsky/qibuild,aldebaran/qibuild,aldebaran/qibuild,dmerejkowsky/qibuild,dmerejkowsky/qibuild,aldebaran/qibuild,dmerejkowsky/qibuild | python/qitoolchain/actions/convert_package.py | python/qitoolchain/actions/convert_package.py | ## Copyright (c) 2012-2014 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
"""Convert a binary archive into a qiBuild package.
"""
import os
from qisys import ui
import qisys
import qisys.parsers
from qitoolchain.convert import convert_package
def configure_parser(parser):
"""Configure parser for this action """
qisys.parsers.default_parser(parser)
parser.add_argument("--name", required=True,
help="The name of the package")
parser.add_argument("package_path", metavar='PACKAGE_PATH',
help="The path to the archive to be converted")
def do(args):
"""Convert a binary archive into a qiBuild package.
"""
name = args.name
package_path = args.package_path
ui.info("Converting", package_path, "into a qiBuild package")
res = convert_package(package_path, name, interactive=True)
message = """\
Conversion succeeded.
qiBuild package:
{1}
You can add this qiBuild package to a toolchain using:
qitoolchain add-package -c <toolchain name> {0} {1}\
""".format(name, res)
qisys.ui.info(message)
| ## Copyright (c) 2012-2014 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
"""Convert a binary archive into a qiBuild package.
"""
import os
import qisys
import qisys.parsers
from qitoolchain.convert import convert_package
def configure_parser(parser):
"""Configure parser for this action """
qisys.parsers.default_parser(parser)
parser.add_argument("--name", required=True,
help="The name of the package")
parser.add_argument("package_path", metavar='PACKAGE_PATH',
help="The path to the archive to be converted")
def do(args):
"""Convert a binary archive into a qiBuild package.
"""
name = args.name
package_path = args.package_path
ui.info("Converting", package_path, "into a qiBuild package")
res = convert_package(package_path, name, interactive=True)
message = """\
Conversion succeeded.
qiBuild package:
{1}
You can add this qiBuild package to a toolchain using:
qitoolchain add-package -c <toolchain name> {0} {1}\
""".format(name, res)
qisys.ui.info(message)
| bsd-3-clause | Python |
b4305e843ecfec33640762e5698cd36b38912707 | Add a utility script for encoding packet traces | vovojh/gem5,pombredanne/http-repo.gem5.org-gem5-,vovojh/gem5,hoangt/tpzsimul.gem5,pombredanne/http-repo.gem5.org-gem5-,hoangt/tpzsimul.gem5,hoangt/tpzsimul.gem5,vovojh/gem5,vovojh/gem5,vovojh/gem5,pombredanne/http-repo.gem5.org-gem5-,hoangt/tpzsimul.gem5,vovojh/gem5,pombredanne/http-repo.gem5.org-gem5-,pombredanne/http-repo.gem5.org-gem5-,hoangt/tpzsimul.gem5,hoangt/tpzsimul.gem5,pombredanne/http-repo.gem5.org-gem5-,pombredanne/http-repo.gem5.org-gem5-,vovojh/gem5,hoangt/tpzsimul.gem5 | util/encode_packet_trace.py | util/encode_packet_trace.py | #!/usr/bin/env python
# Copyright (c) 2013 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
#
# This script is used to migrate ASCII packet traces to the protobuf
# format currently used in gem5. It assumes that protoc has been
# executed and already generated the Python package for the packet
# messages. This can be done manually using:
# protoc --python_out=. --proto_path=src/proto src/proto/packet.proto
#
# The ASCII trace format uses one line per request on the format cmd,
# addr, size, tick. For example:
# r,128,64,4000
# w,232123,64,500000
# This trace reads 64 bytes from decimal address 128 at tick 4000,
# then writes 64 bytes to address 232123 at tick 500000.
#
# This script can of course also be used as a template to convert
# other trace formats into the gem5 protobuf format
import struct
import sys
import packet_pb2
def EncodeVarint(out_file, value):
"""
The encoding of the Varint32 is copied from
google.protobuf.internal.encoder and is only repeated here to
avoid depending on the internal functions in the library.
"""
bits = value & 0x7f
value >>= 7
while value:
out_file.write(struct.pack('<B', 0x80|bits))
bits = value & 0x7f
value >>= 7
out_file.write(struct.pack('<B', bits))
def encodeMessage(out_file, message):
"""
Encoded a message with the length prepended as a 32-bit varint.
"""
out = message.SerializeToString()
EncodeVarint(out_file, len(out))
out_file.write(out)
def main():
if len(sys.argv) != 3:
print "Usage: ", sys.argv[0], " <ASCII input> <protobuf output>"
exit(-1)
try:
ascii_in = open(sys.argv[1], 'r')
except IOError:
print "Failed to open ", sys.argv[1], " for reading"
exit(-1)
try:
proto_out = open(sys.argv[2], 'wb')
except IOError:
print "Failed to open ", sys.argv[2], " for writing"
exit(-1)
# Write the magic number in 4-byte Little Endian, similar to what
# is done in src/proto/protoio.cc
proto_out.write("gem5")
# Add the packet header
header = packet_pb2.PacketHeader()
header.obj_id = "Converted ASCII trace " + sys.argv[1]
# Assume the default tick rate
header.tick_freq = 1000000000
encodeMessage(proto_out, header)
# For each line in the ASCII trace, create a packet message and
# write it to the encoded output
for line in ascii_in:
cmd, addr, size, tick = line.split(',')
packet = packet_pb2.Packet()
packet.tick = long(tick)
# ReadReq is 1 and WriteReq is 4 in src/mem/packet.hh Command enum
packet.cmd = 1 if cmd == 'r' else 4
packet.addr = long(addr)
packet.size = int(size)
encodeMessage(proto_out, packet)
# We're done
ascii_in.close()
proto_out.close()
if __name__ == "__main__":
main()
| bsd-3-clause | Python | |
7fd09bd791661ab0b12921dfd977591690d9c01a | Add form tests for LoginForm | randomic/aniauth-tdd,randomic/aniauth-tdd | accounts/tests/test_forms.py | accounts/tests/test_forms.py | """accounts app unittests for views
"""
from django.test import TestCase
from accounts.forms import LoginForm
class LoginFormTest(TestCase):
"""Tests the form which validates the email used for login.
"""
def test_valid_email_accepted(self):
form = LoginForm({'email': 'newvisitor@example.com'})
self.assertTrue(form.is_valid())
def test_invalid_email_declined(self):
form = LoginForm({'email': 'invalidemail'})
self.assertFalse(form.is_valid())
| mit | Python | |
a719f3b15664555543266f0575210f750d8cb631 | Add hwmon instrument | lisatn/workload-automation,lisatn/workload-automation,setrofim/workload-automation,lisatn/workload-automation,ARM-software/workload-automation,ARM-software/workload-automation,setrofim/workload-automation,setrofim/workload-automation,setrofim/workload-automation,ARM-software/workload-automation,ARM-software/workload-automation,lisatn/workload-automation | wa/instrumentation/hwmon.py | wa/instrumentation/hwmon.py | # Copyright 2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from devlib import HwmonInstrument as _Instrument
from wa import Instrument
from wa.framework.instrumentation import fast
MOMENTARY_QUANTITIES = ['temperature', 'power', 'voltage', 'current', 'fps']
CUMULATIVE_QUANTITIES = ['energy', 'tx', 'tx/rx', 'frames']
class HwmonInstrument(Instrument):
name = 'hwmon'
description = """
Hardware Monitor (hwmon) is a generic Linux kernel subsystem,
providing access to hardware monitoring components like temperature or
voltage/current sensors.
Data from hwmon that are a snapshot of a fluctuating value, such as
temperature and voltage, are reported once at the beginning and once at the
end of the workload run. Data that are a cumulative total of a quantity,
such as energy (which is the cumulative total of power consumption), are
reported as the difference between the values at the beginning and at the
end of the workload run.
There is currently no functionality to filter sensors: all of the available
hwmon data will be reported.
"""
def initialize(self, context):
self.instrument = _Instrument(self.target)
def setup(self, context):
self.instrument.reset()
@fast
def start(self, context):
self.before = self.instrument.take_measurement()
@fast
def stop(self, context):
self.after = self.instrument.take_measurement()
def update_result(self, context):
measurements_before = {m.channel.label: m for m in self.before}
measurements_after = {m.channel.label: m for m in self.after}
if measurements_before.keys() != measurements_after.keys():
self.logger.warning(
'hwmon before/after measurements returned different entries!')
for label, measurement_after in measurements_after.iteritems():
if label not in measurements_before:
continue # We've already warned about this
measurement_before = measurements_before[label]
if measurement_after.channel.kind in MOMENTARY_QUANTITIES:
context.add_metric('{}_before'.format(label),
measurement_before.value,
measurement_before.channel.units)
context.add_metric('{}_after'.format(label),
measurement_after.value,
measurement_after.channel.units)
elif measurement_after.channel.kind in CUMULATIVE_QUANTITIES:
diff = measurement_after.value - measurement_before.value
context.add_metric(label, diff, measurement_after.channel.units)
else:
self.logger.warning(
"Don't know what to do with hwmon channel '{}'"
.format(measurement_after.channel))
def teardown(self, context):
self.instrument.teardown()
| apache-2.0 | Python | |
dc6b7da10f8da0adad984a24b9c3f5904fe48de5 | Add simpler video playback example. | infowantstobeseen/pyglet-darwincore,infowantstobeseen/pyglet-darwincore,infowantstobeseen/pyglet-darwincore,infowantstobeseen/pyglet-darwincore,infowantstobeseen/pyglet-darwincore | examples/video.py | examples/video.py | #!/usr/bin/env python
'''Simple example of video playback.
Usage::
video.py <filename>
See the Programming Guide for a partial list of supported video formats.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import sys
import pyglet
if len(sys.argv) < 2:
print __doc__
sys.exit(1)
source = pyglet.media.load(sys.argv[1])
format = source.video_format
if not format:
print 'No video track in this source.'
sys.exit(1)
player = pyglet.media.Player()
player.queue(source)
player.play()
window = pyglet.window.Window(width=format.width, height=format.height)
@window.event
def on_draw():
player.get_texture().blit(0, 0)
pyglet.app.run()
| bsd-3-clause | Python | |
28a7077b7f05f52d0bff7a849f8b50f82f73dbdb | Add a script to convert an index to a md5 file. | RKrahl/photo-tools | idx2md5.py | idx2md5.py | #! /usr/bin/python
from __future__ import print_function
import sys
import photo.index
idx = photo.index.Index(idxfile=sys.argv[1])
for i in idx:
print("%s %s" % (i.md5, i.filename))
| apache-2.0 | Python | |
05e2b100512a9c9b06c5d7d2701867f155c5e3f0 | Add API tests for profile validation | openstack/senlin,openstack/senlin,stackforge/senlin,openstack/senlin,stackforge/senlin | senlin/tests/tempest/api/profiles/test_profile_validate.py | senlin/tests/tempest/api/profiles/test_profile_validate.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestProfileValidate(base.BaseSenlinAPITest):
@decorators.idempotent_id('ff678e2d-60d0-43da-808f-cb70a3926112')
def test_profile_validate(self):
params = {
'profile': {
'spec': constants.spec_nova_server,
}
}
res = self.client.validate_obj('profiles', params)
# Verify resp of validate create API
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
profile = res['body']
for key in ['created_at', 'domain', 'id', 'metadata', 'name',
'project', 'spec', 'type', 'updated_at', 'user']:
self.assertIn(key, profile)
self.assertEqual('validated_profile', profile['name'])
self.assertEqual('os.nova.server-1.0', profile['type'])
self.assertEqual(constants.spec_nova_server, profile['spec'])
| apache-2.0 | Python | |
98d215599a722ec2784d8795d521eb40bb35c1fe | Create iphone_chat_server.py | stephaneAG/Python_tests,stephaneAG/Python_tests,stephaneAG/Python_tests,stephaneAG/Python_tests | iphone_chat_server.py | iphone_chat_server.py | #StephaneAG - 2012
#simple chat server for iphone socket communication using python
# to run : sudo Python iphone_chat_server.py # > Using sudo as it requires administer access to listen on a port on the machine
# to test : telnet localhost 80
# Chat events: >we use a simple format to exchange messages : strings separated by a ":" . Before that char we have the command, wican be "iam" or "msg"
# the "iam" msg is used when someone joins the chat and is followed by a nickname of who joined the chat
# the "msg" comd sends a message to all clients. There is no need for "msg" to carry the name of the sender, because it is managed server side, in the self.factory.client.list
# > We do not need to use simple string-based protocols, we can use JSON, XML , custom binary format, or whatever we like ;p
#from twisted.internet.protocol import Factory, # factory mthd : creates a management machinery to handle connection established with clients
from twisted.internet.protocol import Factory, Protocol # import Protocol plus factory stuff
from twisted.internet import reactor #importing the reactor files
## defining a protocol ##
# > The protocol is exactly the logic of the server application. This is were we state what to do when a client connects, sends msg, and so on ..
# class IphoneChat(Protocol): # creation of new class "IphoneChat" that extends "Protocol"
# def connectionMade(self): # and extends the "connectionMade" hook that prints a msg when a connection is made
# print "a client connected"
##
## modified version to keep track of clients: Each client has a socket assigned, so we need t ostore that info in an array
##
class IphoneChat(Protocol): # creation of new class "IphoneChat" that extends "Protocol"
def connectionMade(self): # and extends the "connectionMade" hook that prints a msg when a connection is made
self.factory.clients.append(self)
print "clients are ", self.factory.clients
def connectionLost(self, reason): # managing a user disconnection ( > connectionLost callback ) "for the Sake of Completeness"
self.factory.clients.remove(self)
def dataReceived(self, data): # TO RECEIVE DATA, THE CALLBACK WE HAVE TO OVERRIDE HAS THE PREVIOUS SIGNATURE. > in this callback , data is received by the socket
a = data.split(':') # Split the string to find out the command
print a
if len(a) > 1:
command = a[0] # holds the first part of the splitted string
content = a[1] # holds the second part of the splitted string
msg = "" # empty (for the moment) message
if command == "iam": # if the cmd is "iam"
self.name = content # store the name of the client
msg = self.name + " has joined" # alert user joined
#print msg # build the custom message ##WAS NOT WRITTEN IN THE TUT > TESTING
elif command == "msg": # if the cmd is "msg"
msg = self.name + ": " + content # store the msg of the client + his name
print msg # build the custom message
for c in self.factory.clients:
c.message(msg) # broadcast the message to all clients ## use the "message" mthd right below
def message(self, message): # "message" mthd implm
self.transport.write(message + '\n') # IMPORTANT : use of the '\n' char so the socket detects when the message transmission has completed.
factory = Factory() # the factory handles connections
##
## added to to keep track of clients
factory.clients = [] # initialise the array of clients as empty right after the line creating the factory
##
#assign our class as the protocol of our factory
factory.protocol = IphoneChat # assign IphoneChat protocol to factory
reactor.listenTCP(80, factory) # implementation of a reactor pattern
# > Uses port 80 cuz open by default ( as standard port for http conns)
# > allow real-device-app testing wirelessly without modifying settings of the router
print "Iphone Chat Server Started"
reactor.run() # run the server
| mit | Python | |
baeaa2b2e9ed7b6d932adae9aee0517a0e73428f | Add __init__ file to iso_639_3/ | noumar/iso639,noumar/iso639 | iso_639_3/__init__.py | iso_639_3/__init__.py | from __future__ import absolute_import
from iso_639_3.iso_639_3 import iso_639_3
languages = iso_639_3()
| agpl-3.0 | Python | |
923b32c3ff66539db01fd4f3008ee23f61348507 | Add files via upload | guillochon/MOSFiT,guillochon/MOSFiT,bmockler/MOSFiT,guillochon/FriendlyFit,mnicholl/MOSFiT,mnicholl/MOSFiT,mnicholl/MOSFiT,villrv/MOSFiT,guillochon/MOSFiT,bmockler/MOSFiT,villrv/MOSFiT | friendlyfit/modules/seds/envelopecore.py | friendlyfit/modules/seds/envelopecore.py | from math import pi
import numpy as np
import numexpr as ne
from astropy import constants as c
from ...constants import DAY_CGS, FOUR_PI, KM_CGS, M_SUN_CGS
from .sed import SED
CLASS_NAME = 'EnvelopeCore'
class EnvelopeCore(SED):
"""Expanding/receding photosphere with a core+envelope
morphology and a blackbody spectral energy
distribution.
"""
FLUX_CONST = FOUR_PI * (2.0 * c.h / (c.c**2) * pi).cgs.value
X_CONST = (c.h / c.k_B).cgs.value
STEF_CONST = (4.0 * pi * c.sigma_sb).cgs.value
PL_ENV = 10.0
def __init__(self, **kwargs):
super().__init__(**kwargs)
def process(self, **kwargs):
self._t_explosion = kwargs['texplosion']
self._times = kwargs['times']
self._luminosities = kwargs['luminosities']
self._temperature = kwargs['temperature']
self._bands = kwargs['bands']
self._v_ejecta = kwargs['vejecta']
self._m_ejecta = kwargs['mejecta']
self._kappa = kwargs['kappa']
xc = self.X_CONST
fc = self.FLUX_CONST
slope = self.PL_ENV
zp1 = 1.0 + kwargs['redshift']
seds = []
for li, lum in enumerate(self._luminosities):
cur_band = self._bands[li]
bi = self._band_names.index(cur_band)
rest_freqs = [x * zp1 for x in self._band_frequencies[bi]]
# rest_freqs3 = [x**3 for x in rest_freqs]
# Radius is determined via expansion, unless this would make
# temperature lower than temperature parameter.
radius = self._v_ejecta * KM_CGS * (
self._times[li] - self._t_explosion) * DAY_CGS
# Compute density in core
rho_core = 3.0 * self._m_ejecta * M_SUN_CGS / (4.0 * pi *
radius**3)
# Attach power-law envelope of negligible mass
tau_e = self._kappa * rho_core * radius / (slope - 1.0)
# Find location of photosphere in envelope/core
if tau_e > 0.667:
radius_phot = (2.0 * (slope - 1.0) / (3.0 * self._kappa *
rho_core * radius**slope))**(1.0 / (1.0 - slope))
else:
radius_phot = slope * radius / (slope - 1.0) - 2.0 / (
3.0 * self._kappa * rho_core)
# Put temperature floor to prevent weird behaviour as r_phot -> 0
rec_radius = np.sqrt(lum /
(self.STEF_CONST * self._temperature**4))
if radius < rec_radius:
radius2 = radius**2
temperature = (lum / (self.STEF_CONST * radius2))**0.25
else:
radius2 = rec_radius**2
temperature = self._temperature
if li == 0:
sed = ne.evaluate('fc * radius2 * rest_freqs**3 / '
'exp(xc * rest_freqs / temperature) - 1.0')
else:
sed = ne.re_evaluate()
# a = [np.exp(self.X_CONST * x / temperature) - 1.0
# for x in rest_freqs]
# sed = [
# (self.FLUX_CONST * radius2 * x / y)
# for x, y in zip(rest_freqs3, a)
# ]
seds.append(sed)
return {'bandwavelengths': self._band_wavelengths, 'seds': seds}
| mit | Python | |
35662cf6a174f1924f20d6b78241e6095fa9fb3e | Fix syntax error in build/all_android.gyp | anirudhSK/chromium,ondra-novak/chromium.src,PeterWangIntel/chromium-crosswalk,jaruba/chromium.src,ltilve/chromium,dushu1203/chromium.src,ondra-novak/chromium.src,crosswalk-project/chromium-crosswalk-efl,patrickm/chromium.src,bright-sparks/chromium-spacewalk,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,chuan9/chromium-crosswalk,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,ltilve/chromium,ltilve/chromium,dednal/chromium.src,jaruba/chromium.src,Chilledheart/chromium,M4sse/chromium.src,dushu1203/chromium.src,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,anirudhSK/chromium,jaruba/chromium.src,Jonekee/chromium.src,ChromiumWebApps/chromium,crosswalk-project/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,ChromiumWebApps/chromium,crosswalk-project/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,chuan9/chromium-crosswalk,littlstar/chromium.src,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,dednal/chromium.src,axinging/chromium-crosswalk,ltilve/chromium,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,jaruba/chromium.src,fujunwei/chromium-crosswalk,patrickm/chromium.src,patrickm/chromium.src,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,dushu1203/chromium.src,M4sse/chromium.src,axinging/chromium-crosswalk,M4sse/chromium.src,dednal/chromium.src,jaruba/chromium.src,dednal/chromium.src,markYoungH/chromium.src,dednal/chromium.src,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,ltilve/chromium,jaruba/chromium.src,jaruba/chromium.src,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,anirudhSK/chromium,Pluto-tv/chromium-crosswalk,ondra-novak/chromium.src,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,fujunwei/chromium-crosswalk,markYoungH/chromium.src,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,dushu1203/chromium.src,axinging/chromium-crosswalk,Chilledheart/chromium,bright-sparks/chromium-spacewalk,littlstar/chromium.src,ltilve/chromium,axinging/chromium-crosswalk,dednal/chromium.src,ChromiumWebApps/chromium,markYoungH/chromium.src,bright-sparks/chromium-spacewalk,ChromiumWebApps/chromium,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,ondra-novak/chromium.src,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk,ChromiumWebApps/chromium,Pluto-tv/chromium-crosswalk,littlstar/chromium.src,dednal/chromium.src,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,anirudhSK/chromium,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,krieger-od/nwjs_chromium.src,ChromiumWebApps/chromium,Fireblend/chromium-crosswalk,Fireblend/chromium-crosswalk,dushu1203/chromium.src,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,anirudhSK/chromium,chuan9/chromium-crosswalk,jaruba/chromium.src,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,fujunwei/chromium-crosswalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,anirudhSK/chromium,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,patrickm/chromium.src,M4sse/chromium.src,Just-D/chromium-1,patrickm/chromium.src,jaruba/chromium.src,Chilledheart/chromium,M4sse/chromium.src,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,dednal/chromium.src,Fireblend/chromium-crosswalk,krieger-od/nwjs_chromium.src,patrickm/chromium.src,Just-D/chromium-1,bright-sparks/chromium-spacewalk,TheTypoMaster/chromium-crosswalk,anirudhSK/chromium,markYoungH/chromium.src,anirudhSK/chromium,hgl888/chromium-crosswalk-efl,patrickm/chromium.src,Jonekee/chromium.src,ltilve/chromium,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,dednal/chromium.src,ondra-novak/chromium.src,Just-D/chromium-1,crosswalk-project/chromium-crosswalk-efl,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,ondra-novak/chromium.src,Chilledheart/chromium,Fireblend/chromium-crosswalk,Jonekee/chromium.src,patrickm/chromium.src,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,Jonekee/chromium.src,markYoungH/chromium.src,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,jaruba/chromium.src,Just-D/chromium-1,dednal/chromium.src,M4sse/chromium.src,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,markYoungH/chromium.src,chuan9/chromium-crosswalk,Pluto-tv/chromium-crosswalk,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,littlstar/chromium.src,patrickm/chromium.src,ChromiumWebApps/chromium,fujunwei/chromium-crosswalk,Jonekee/chromium.src,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,ltilve/chromium,Jonekee/chromium.src,dushu1203/chromium.src,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,Just-D/chromium-1,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,M4sse/chromium.src,littlstar/chromium.src,ChromiumWebApps/chromium,TheTypoMaster/chromium-crosswalk,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,dushu1203/chromium.src,krieger-od/nwjs_chromium.src,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,anirudhSK/chromium,Just-D/chromium-1,Jonekee/chromium.src,ondra-novak/chromium.src,ChromiumWebApps/chromium,ondra-novak/chromium.src,TheTypoMaster/chromium-crosswalk,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,M4sse/chromium.src,hgl888/chromium-crosswalk,littlstar/chromium.src,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,Fireblend/chromium-crosswalk,M4sse/chromium.src | build/all_android.gyp | build/all_android.gyp | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO(thakis): Remove this file after https://codereview.chromium.org/139743016
# has been in the tree for a while.
{
'targets': [
{
'target_name': 'temporary_android_dummy_target',
'dependencies': [
'all.gyp:*',
],
},
],
}
| # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO(thakis): Remove this file after https://codereview.chromium.org/139743016
# has been in the tree for a while.
{
'targets': [
{
'target_name': 'temporary_android_dummy_target',
'dependencies': [
'all.gyp:*',
],
],
],
}
| bsd-3-clause | Python |
8751397275a6de993b2c317e85aee26c0ce3d9a9 | add tests for middleware | avlach/univbris-ocf,avlach/univbris-ocf,avlach/univbris-ocf,avlach/univbris-ocf | src/python/expedient_geni/tests.py | src/python/expedient_geni/tests.py | '''
Created on Oct 8, 2010
@author: jnaous
'''
from expedient.common.tests.manager import SettingsTestCase
from django.core.urlresolvers import reverse
from django.conf import settings
import os
from expedient_geni.utils import get_user_cert_fname, get_user_key_fname
from django.contrib.auth.models import User
from expedient.common.tests.client import test_get_and_post_form
def try_unlink(fname):
try:
os.unlink(fname)
except OSError as e:
if "No such file" not in str(e):
raise
class Tests(SettingsTestCase):
def setUp(self):
self.u = User.objects.create_user(
"test_user", "email@email.com", "password")
self.cert_fname = get_user_cert_fname(self.u)
self.key_fname = get_user_key_fname(self.u)
try_unlink(self.cert_fname)
try_unlink(self.key_fname)
def test_login_redirect(self):
"""Check that a user who is not logged in get redirected with no cert created."""
response = self.client.get(reverse("home"))
expected_url = settings.LOGIN_URL \
+ '?next=%s' % reverse("home")
self.assertRedirects(
response,
expected_url,
)
self.assertFalse(
os.access(self.cert_fname, os.F_OK))
self.assertFalse(
os.access(self.key_fname, os.F_OK))
def test_login(self):
"""Check that users can login and get a cert created for them."""
response = test_get_and_post_form(
self.client,
settings.LOGIN_URL,
{"username": "test_user", "password": "password"},
)
expected_url = reverse("home")
self.assertRedirects(
response,
expected_url,
)
self.assertTrue(
os.access(self.cert_fname, os.F_OK))
self.assertTrue(
os.access(self.key_fname, os.F_OK))
def tearDown(self):
try_unlink(self.cert_fname)
try_unlink(self.key_fname)
| bsd-3-clause | Python | |
4053948ba58b266ef66e181776c2fe4bc37b4760 | Create Anagrams.py | UmassJin/Leetcode | Array/Anagrams.py | Array/Anagrams.py | Given an array of strings, return all groups of strings that are anagrams.
Note: All inputs will be in lower-case.
Reference: http://blog.csdn.net/linhuanmars/article/details/21664747
# Notes: anagrams definition: two strings, which have the same character, maybe different order
# 1) Save the SORTED string as the key in the dictionary
# 2) Save each string as the value (put into a list), then push them into the list
class Solution:
# @param strs, a list of strings
# @return a list of strings
def anagrams(self, strs):
result = []
dict = {}
for member in strs:
newword = ''.join(sorted(member))
if newword not in dict:
dict[newword] = [member]
else:
dict[newword] += [member]
for key in dict:
if len(dict[key]) >= 2:
result.extend(dict[key])
return result
| mit | Python | |
98e7f719b379ad4bdb87e6cb077b3d9d6340107b | Monitor for nexus | CiscoSystems/os-sqe,CiscoSystems/os-sqe,CiscoSystems/os-sqe | lab/monitors/nexus.py | lab/monitors/nexus.py |
def monitor(context, log, args):
import time
import logging
import json
import requests
nexus_name = args.get('name', 'Nexus_'.format(args['ip']))
def _nxapi(commands):
request = [{"jsonrpc": "2.0", "method": "cli", "params": {"cmd": command, "version": 1}, "id": 1} for command in commands]
try:
results = requests.post('http://{0}/ins'.format(args['ip']), auth=(args['username'], args['password']),
headers={'content-type': 'application/json-rpc'}, data=json.dumps(request)).json()
for i, x in enumerate(results, start=0):
if 'error' in x:
raise Exception('Error: {0} in command: {1}'.format(x['error']['data']['msg'].strip('\n'), commands[i]))
return results
except:
logging.exception("Exception while executing nexus command")
return {}
def _make_vlans_set(vlans_str):
"""
Converts alllowed vlans string to a set object
:param self:
:param vlans_str: Ex: 1,177,2006,3000-3004
:return: Set object {1, 177, 2006, 3000, 3001, 3002, 3003, 3004}
"""
vlans = set()
for vlan_range in vlans_str.split(','):
se = vlan_range.split('-')
if len(se) == 2:
vlans = vlans | set(range(int(se[0]), int(se[1]) + 1))
elif len(se) == 1:
vlans.add(int(se[0]))
return vlans
def _get_item(dictionary, path, default=None):
"""
Looks for value in dictionary.
:param dictionary: Source
:param path: Path to value. List object
:param default: Default value if there is no such path
:return: Found value
"""
d = dictionary
for i in range(0, len(path) - 1):
d = d.get(path[i], {})
return d.get(path[-1], default)
log.info('Starting NXOS monitoring {0}'.format(nexus_name))
start_time = time.time()
port_channels = _get_item(_nxapi(['show port-channel summary']), ['result', 'body', 'TABLE_channel', 'ROW_channel'], [])
while start_time + args['duration'] > time.time():
# Allowed vlans
for port_channel in port_channels:
pc_name = port_channel['port-channel']
cmd = 'show interface {0} switchport'.format(pc_name)
allowed_vlans = _nxapi([cmd])['result']['body']['TABLE_interface']['ROW_interface']['trunk_vlans']
log.info('{0} {1} Allowed vlans: {2}'.format(nexus_name, pc_name, _make_vlans_set(allowed_vlans)))
# Vlans
vlans = _get_item(_nxapi(['show vlan']), ['result', 'body', 'TABLE_vlanbrief', 'ROW_vlanbrief'], [])
log.info('{0} Vlans: {1}'.format(nexus_name, vlans))
# User sessions
users = _get_item(_nxapi(['show users']), ['result', 'body', 'TABLE_sessions', 'ROW_sessions'], [])
users = [users] if isinstance(users, dict) else users
log.info('{0} User sessions: {1}'.format(nexus_name, map(lambda u: u['p_pid'], users)))
time.sleep(args['period'])
| apache-2.0 | Python | |
adc12504811dff0318f085887e207517d780862b | add merge migration | felliott/osf.io,aaxelb/osf.io,brianjgeiger/osf.io,erinspace/osf.io,erinspace/osf.io,Johnetordoff/osf.io,baylee-d/osf.io,adlius/osf.io,felliott/osf.io,Johnetordoff/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,erinspace/osf.io,mattclark/osf.io,caseyrollins/osf.io,HalcyonChimera/osf.io,mfraezz/osf.io,pattisdr/osf.io,saradbowman/osf.io,mfraezz/osf.io,aaxelb/osf.io,pattisdr/osf.io,caseyrollins/osf.io,HalcyonChimera/osf.io,mattclark/osf.io,CenterForOpenScience/osf.io,pattisdr/osf.io,Johnetordoff/osf.io,adlius/osf.io,HalcyonChimera/osf.io,baylee-d/osf.io,aaxelb/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,baylee-d/osf.io,adlius/osf.io,saradbowman/osf.io,brianjgeiger/osf.io,felliott/osf.io,CenterForOpenScience/osf.io,caseyrollins/osf.io,CenterForOpenScience/osf.io,felliott/osf.io,cslzchen/osf.io,mattclark/osf.io,HalcyonChimera/osf.io,adlius/osf.io,mfraezz/osf.io,aaxelb/osf.io,Johnetordoff/osf.io,cslzchen/osf.io,CenterForOpenScience/osf.io,mfraezz/osf.io | osf/migrations/0106_merge_20180531_0919.py | osf/migrations/0106_merge_20180531_0919.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-05-31 14:19
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0105_merge_20180525_1529'),
('osf', '0104_merge_20180524_1257'),
]
operations = [
]
| apache-2.0 | Python | |
cac9e9fa65a9b7f1055f193822a66e5edcad88ee | Create twitch.py | TingPing/plugins,TingPing/plugins | HexChat/twitch.py | HexChat/twitch.py | import hexchat
__module_name__ = 'Twitch'
__module_author__ = 'TingPing'
__module_version__ = '0'
__module_description__ = 'Better integration with Twitch.tv'
# Very much a work in progress...
# Best used with my 'mymsg.py' script so you can sync your messages with the web clients.
# Commands from http://help.twitch.tv/customer/portal/articles/659095-chat-moderation-commands
# /ban may conflict with other scripts nothing we can do about that
# /clear is an existing command, just override it
commands = ['timeout', 'slow', 'slowoff', 'subscribers', 'subscribersoff',
'mod', 'unmod', 'mods', 'clear', 'ban', 'unban', 'commercial']
def is_twitch():
if 'twitch.tv' in hexchat.get_info('server'):
return True
else: return False
# Print jtv messages in front tab.. to improve.
def msg_cb(word, word_eol, userdata):
if is_twitch() and hexchat.nickcmp(word[0], 'jtv') == 0:
hexchat.find_context().emit_print('Private Message', word[0], word[1])
return hexchat.EAT_ALL
# Eat any message starting with a '.', twitch eats all of them too.
def yourmsg_cb(word, word_eol, userdata):
if is_twitch() and word[1][0] == '.':
return hexchat.EAT_ALL
# Just prefix with a '.'.
def command_cb(word, word_eol, userdata):
if is_twitch():
hexchat.command('say .{}'.format(word_eol[0]))
return hexchat.EAT_ALL
for cmd in commands:
hexchat.hook_command(cmd, command_cb)
hexchat.hook_print('Private Message to Dialog', msg_cb)
hexchat.hook_print('Your Message', yourmsg_cb)
| mit | Python | |
53d1e2ca188fbcae1899b6190ad3e59bdf5911bb | Create base64_enc_dec.py | agusmakmun/Some-Examples-of-Simple-Python-Script,agusmakmun/Some-Examples-of-Simple-Python-Script | Encryption/base64_enc_dec.py | Encryption/base64_enc_dec.py | """
Name : Bas 64 Encryption, encoder and decoder
Created By : Agus Makmun (Summon Agus)
Blog : bloggersmart.net - python.web.id
License : GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007
Documentation : https://github.com/agusmakmun/Some-Examples-of-Simple-Python-Script/
Powered : Python-2.7, mpg321
"""
import base64
class Encode():
def _base16_encode(self, encode_string):
encoder = base64.b16encode(encode_string)
return encoder
def _base32_encode(self, encode_string):
encoder = base64.b32encode(encode_string)
return encoder
def _base64_encode(self, encode_string):
encoder = base64.b64encode(encode_string)
return encoder
def _base64_encode_string(self, encode_string):
encoder = base64.encodestring(encode_string)
return encoder
def _base64_standard_encode(self, encode_string):
encoder = base64.standard_b64encode(encode_string)
return encoder
def _base64_urlsafe_encode(self, encode_string):
#sample: 'http://bloggersmart.net'
encoder = base64.urlsafe_b64encode(encode_string)
return encoder
class Decode():
def _base16_decode(self, decode_string):
decoder = base64.b16decode(decode_string, casefol=False)
return decoder
def _base32_decode(self, decode_string):
decoder = base64.b32decode(decode_string, casefol=False, map01=None)
return decoder
def _base64_decode(self, decode_string):
decoder = base64.b64decode(decode_string)
return decoder
def _base64_decode_string(self, decode_string):
#sample string: 'c3Nz\n'
decoder = base64.decodestring(decode_string)
return decoder
def _base64_standard_decode(self, decode_string):
#sample string: 'c3Nz'
decoder = base64.standard_b64decode(decode_string)
return decoder
def _base64_urlsafe_decode(self, decode_string):
#sample string: 'aHR0cDovL2Jsb2dnZXJzbWFydC5uZXQ='
decoder = base64.urlsafe_b64encode(decode_string)
return decoder
#mome = Decode()
#mome._base64_urlsafe_decode('aHR0cDovL2Jsb2dnZXJzbWFydC5uZXQ=')
#print mome._base64_decode_string('c3Nz\n')
| agpl-3.0 | Python | |
94500a279da6f03362c82961e9a97d0223175bd7 | Create LabelPerimeter.py | DigitalSlideArchive/HistomicsTK,DigitalSlideArchive/HistomicsTK | LabelPerimeter.py | LabelPerimeter.py | import numpy as np
def LabelPerimeter(L, Connectivity=4):
"""Converts a label or binary mask image to a binary perimeter image.
Uses 4-neighbor or 8-neighbor shifts to detect pixels whose values do
not agree with their neighbors.
Parameters
----------
L : array_like
A label or binary mask image.
Connectivity : double or int
Neighborhood connectivity to evaluate. Valid values are 4 or 8.
Default value = 4.
Returns
-------
Mask : array_like
A binary image where object perimeter pixels have value 1, and
non-perimeter pixels have value 0.
See Also
--------
EmbedBounds
"""
# initialize temporary variable
Mask = np.zeros(L.shape)
Temp = np.zeros(L.shape)
# check left-right neighbors
Temp[:, 0:-2] = np.not_equal(L[:, 0:-2], L[:, 1:-1])
Temp[:, 1:-1] = np.logical_or(Temp[:, 1:-1], Temp[:, 0:-2])
Mask = np.logical_or(Mask, Temp)
# check up-down neighbors
Temp[0:-2, :] = np.not_equal(L[0:-2, :], L[1:-1, :])
Temp[1:-1, :] = np.logical_or(Temp[1:-1, :], Temp[0:-2, :])
Mask = np.logical_or(Mask, Temp)
# additional calculations if Connectivity == 8
if(Connectivity == 8):
# slope 1 diagonal shift
Temp[1:-1, 0:-2] = np.not_equal(L[0:-2, 1:-2], L[1:-1, 0:-2])
Temp[0:-2, 1:-1] = np.logical_or(Temp[0:-2, 1:-1], Temp[1:-1, 0:-2])
Mask = np.logical_or(Mask, Temp)
# slope -1 diagonal shift
Temp[1:-1, 1:-1] = np.not_equal(L[0:-2, 0:-2], L[1:-1, 1:-1])
Temp[0:-2, 0:-2] = np.logical_or(Temp[0:-2, 0:-2], Temp[1:-1, 1:-1])
Mask = np.logical_or(Mask, Temp)
# generate label-valued output
return Mask.astype(np.float) * L
| apache-2.0 | Python | |
cf6cf3433cbc816a14534f12bad36d20c908d869 | Create Subset.py | OLAPLINE/TM1py,MariusWirtz/TM1py | Samples/Subset.py | Samples/Subset.py | import uuid
from TM1py import Subset, TM1Queries
random_string1 = str(uuid.uuid4())
random_string2 = str(uuid.uuid4())
q = TM1Queries(ip='', port=8008, user='admin', password='apple', ssl=True)
# create dynamic subset
s = Subset(dimension_name='plan_business_unit', subset_name=random_string1,
expression='{ HIERARCHIZE( {TM1SUBSETALL( [plan_business_unit] )} ) }')
response = q.create_subset(s)
# create static subset
s = Subset(dimension_name='plan_business_unit', subset_name=random_string2,
elements=['10000', '10000', '10000', '10000', '10000', '10000', '10000', '10000', '10000'])
response = q.create_subset(s)
# delete subset
q.delete_subset(name_dimension='plan_business_unit', name_subset=random_string1)
q.delete_subset(name_dimension='plan_business_unit', name_subset=random_string2)
# logout
q. logout()
| mit | Python | |
bf6737d8ddd8c0ff21a550da44d23b8ec39c70eb | Add 1-4.py, simple hashing in set 1 | SingingTree/WarGames | set1/1-4.py | set1/1-4.py | # This is a hack to make sure Python 2.x and 3.x behave the same
# You can ignore this bit
try:
input = raw_input
except NameError:
pass
# Real program starts here
def hash_password(string):
return str(len(string) % 10)
hashed_password = "6"
print("Please enter the password")
password_input = input()
if(hash_password(password_input) == hashed_password):
print("Success, huzzah")
else:
print("Not success, boo")
| mit | Python | |
861a5fcda82fefbe10c844fda4075688dc6baf8e | Save submissions, and redirect user to the submission detail page after completing form. | patricmutwiri/pombola,hzj123/56th,mysociety/pombola,ken-muturi/pombola,mysociety/pombola,mysociety/pombola,ken-muturi/pombola,patricmutwiri/pombola,hzj123/56th,geoffkilpin/pombola,geoffkilpin/pombola,mysociety/pombola,patricmutwiri/pombola,patricmutwiri/pombola,ken-muturi/pombola,patricmutwiri/pombola,geoffkilpin/pombola,geoffkilpin/pombola,ken-muturi/pombola,hzj123/56th,hzj123/56th,mysociety/pombola,hzj123/56th,patricmutwiri/pombola,ken-muturi/pombola,geoffkilpin/pombola,mysociety/pombola,ken-muturi/pombola,hzj123/56th,geoffkilpin/pombola | mzalendo/votematch/views.py | mzalendo/votematch/views.py | import models
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import RequestContext
def quiz_detail (request, slug):
quiz = get_object_or_404(
models.Quiz,
slug=slug
)
# If this is a POST then extract all the answers
if request.method == 'POST':
# get the answers. Use the current set of statements to look for
# submitted values. Ignore anything that is not expected.
answers = {}
statements = {}
for statement in quiz.statement_set.all():
statements[statement.id] = statement
val = request.POST.get( 'statement-' + str(statement.id) )
if len( val ): # ignore "" which is used for 'don't know' defaults
answers[statement.id] = int(val)
if len(answers):
submission = models.Submission.objects.create(quiz=quiz)
for statement_id, answer in answers.iteritems():
submission.answer_set.create(
statement = statements[statement_id],
agreement = answer
)
return redirect(submission)
return render_to_response(
'votematch/quiz_detail.html',
{
'object': quiz,
'choices': models.agreement_choices,
},
context_instance=RequestContext(request)
)
def submission_detail (request, slug, token):
# TODO - we're not checking that the quiz slug is correct. We don't really
# care - but should probably check just to be correct.
submission = get_object_or_404(
models.Submission,
token = token
)
return render_to_response(
'votematch/submission_detail.html',
{
'object': submission,
},
context_instance=RequestContext(request)
)
| import models
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import RequestContext
def quiz_detail (request, slug):
quiz = get_object_or_404(
models.Quiz,
slug=slug
)
return render_to_response(
'votematch/quiz_detail.html',
{
'object': quiz,
'choices': models.agreement_choices,
},
context_instance=RequestContext(request)
)
def submission_detail (request, slug, token):
# TODO - we're not checking that the quiz slug is correct. We don't really
# care - but should probably check just to be correct.
submission = get_object_or_404(
models.Submission,
token = token
)
return render_to_response(
'votematch/submission_detail.html',
{
'object': submission,
},
context_instance=RequestContext(request)
)
| agpl-3.0 | Python |
4653596825bf00fe403e77c4b11a0def6bac2649 | add settings.py | koox00/scalable-py,koox00/scalable-py,koox00/scalable-py | settings.py | settings.py | #!/usr/bin/env python
"""settings.py
Udacity conference server-side Python App Engine app user settings
$Id$
created/forked from conference.py by wesc on 2014 may 24
"""
# Replace the following lines with client IDs obtained from the APIs
# Console or Cloud Console.
WEB_CLIENT_ID = ''
ANDROID_CLIENT_ID = 'replace with Android client ID'
IOS_CLIENT_ID = 'replace with iOS client ID'
ANDROID_AUDIENCE = WEB_CLIENT_ID
| apache-2.0 | Python | |
dc6ac64817d9eb1022c4b8e8a0bba399104c2bbc | Set HOME too | ceibal-tatu/sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,samdroid-apps/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit,quozl/sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,i5o/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,puneetgkaur/sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit,quozl/sugar-toolkit-gtk3,tchx84/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit,tchx84/debian-pkg-sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit,tchx84/debian-pkg-sugar-toolkit-gtk3,puneetgkaur/sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,samdroid-apps/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,ceibal-tatu/sugar-toolkit,tchx84/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit,quozl/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,godiard/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,godiard/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3,tchx84/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit-gtk3,puneetgkaur/sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,godiard/sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit-gtk3 | spawn-test-env.py | spawn-test-env.py | #!/usr/bin/python -t
import sys, os
import gtk, gobject
import pwd
import types
def change_user(user):
try:
pwrec = pwd.getpwnam(user)
except KeyError:
raise Exception("Username '%s' does not exist." % user)
uid = pwrec[2]
os.setuid(uid)
return (pwrec[6], pwrec[5])
def shell_watch_cb(pid, condition, user_data=None):
gtk.main_quit()
def main():
if len(sys.argv) < 2:
print "Usage: %s <test user>" % sys.argv[0]
user = sys.argv[1]
# Start Xephyr
DISPLAY = ":10"
args = "/usr/bin/Xephyr -ac -host-cursor -screen 800x600 %s" % DISPLAY
args = args.split()
(xephyr_pid, ign1, ign2, ign3) = gobject.spawn_async(args, flags=gobject.SPAWN_STDERR_TO_DEV_NULL | gobject.SPAWN_STDOUT_TO_DEV_NULL)
print "Xepyhr pid is %d" % xephyr_pid
(shell, home) = change_user(user)
args = "/bin/dbus-daemon --session --print-address".split()
(dbus_pid, ign1, dbus_stdout, ign3) = gobject.spawn_async(args, flags=gobject.SPAWN_STDERR_TO_DEV_NULL, standard_output=True)
dbus_file = os.fdopen(dbus_stdout)
addr = dbus_file.readline()
addr = addr.strip()
print "dbus-daemon pid is %d, session bus address is %s" % (dbus_pid, addr)
dbus_file.close()
os.environ["DISPLAY"] = DISPLAY
os.environ["DBUS_SESSION_BUS_ADDRESS"] = addr
os.environ["HOME"] = home
args = "/usr/bin/metacity"
(metacity_pid, ign1, ign2, ign3) = gobject.spawn_async([args], flags=gobject.SPAWN_STDERR_TO_DEV_NULL | gobject.SPAWN_STDOUT_TO_DEV_NULL)
print "\n"
(shell_pid, ign1, ign2, ign3) = gobject.spawn_async([shell], flags=gobject.SPAWN_LEAVE_DESCRIPTORS_OPEN | gobject.SPAWN_CHILD_INHERITS_STDIN | gobject.SPAWN_DO_NOT_REAP_CHILD)
gobject.child_watch_add(shell_pid, shell_watch_cb)
try:
gtk.main()
except KeyboardInterrupt:
pass
try:
os.kill(dbus_pid, 9)
except OSError:
pass
try:
os.kill(metacity_pid, 9)
except OSError:
pass
if __name__ == "__main__":
main()
| #!/usr/bin/python -t
import sys, os
import gtk, gobject
import pwd
import types
def change_user(user):
try:
pwrec = pwd.getpwnam(user)
except KeyError:
raise Exception("Username '%s' does not exist." % user)
uid = pwrec[2]
os.setuid(uid)
return pwrec[6]
def shell_watch_cb(pid, condition, user_data=None):
gtk.main_quit()
def main():
if len(sys.argv) < 2:
print "Usage: %s <test user>" % sys.argv[0]
user = sys.argv[1]
# Start Xephyr
DISPLAY = ":10"
args = "/usr/bin/Xephyr -ac -host-cursor -screen 800x600 %s" % DISPLAY
args = args.split()
(xephyr_pid, ign1, ign2, ign3) = gobject.spawn_async(args, flags=gobject.SPAWN_STDERR_TO_DEV_NULL | gobject.SPAWN_STDOUT_TO_DEV_NULL)
print "Xepyhr pid is %d" % xephyr_pid
shell = change_user(user)
args = "/bin/dbus-daemon --session --print-address".split()
(dbus_pid, ign1, dbus_stdout, ign3) = gobject.spawn_async(args, flags=gobject.SPAWN_STDERR_TO_DEV_NULL, standard_output=True)
dbus_file = os.fdopen(dbus_stdout)
addr = dbus_file.readline()
addr = addr.strip()
print "dbus-daemon pid is %d, session bus address is %s" % (dbus_pid, addr)
dbus_file.close()
os.environ["DISPLAY"] = DISPLAY
os.environ["DBUS_SESSION_BUS_ADDRESS"] = addr
args = "/usr/bin/metacity"
(metacity_pid, ign1, ign2, ign3) = gobject.spawn_async([args], flags=gobject.SPAWN_STDERR_TO_DEV_NULL | gobject.SPAWN_STDOUT_TO_DEV_NULL)
print "\n"
(shell_pid, ign1, ign2, ign3) = gobject.spawn_async([shell], flags=gobject.SPAWN_LEAVE_DESCRIPTORS_OPEN | gobject.SPAWN_CHILD_INHERITS_STDIN | gobject.SPAWN_DO_NOT_REAP_CHILD)
gobject.child_watch_add(shell_pid, shell_watch_cb)
try:
gtk.main()
except KeyboardInterrupt:
pass
try:
os.kill(dbus_pid, 9)
except OSError:
pass
try:
os.kill(metacity_pid, 9)
except OSError:
pass
if __name__ == "__main__":
main()
| lgpl-2.1 | Python |
08ec17c3ad674ff249307d03be8decb9c241a5d6 | add simple test | twdb/sdi | test/test_normalized_intensities.py | test/test_normalized_intensities.py | import os
import unittest
import numpy as np
from sdi.binary import Dataset
class TestRead(unittest.TestCase):
""" Test basic reading of binary files
"""
def setUp(self):
self.test_dir = os.path.dirname(__file__)
def test_normalized_intensities(self):
""" Test that normalized intensities lie in the interval [0,1] """
for root, dirs, files in os.walk(os.path.join(self.test_dir, 'files')):
for filename in files:
if filename.endswith('.bin'):
d = Dataset(os.path.join(root, filename))
d.parse()
self.assertLessEqual(np.nanmax(d.intensity_image), np.float64(1))
self.assertGreaterEqual(np.nanmin(d.intensity_image), np.float64(0))
if __name__ == '__main__':
unittest.main() | bsd-3-clause | Python | |
9bad5f9233519f55a8fcdf74f8ce67a280529594 | Add a storage class for ProfileData | SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange | app/soc/profiling/storage.py | app/soc/profiling/storage.py | #!/usr/bin/python2.5
#
# Copyright 2010 the Melange authors.
# Copyright 2009 Jake McGuire.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing a storage model for stats data.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from google.appengine.ext import db
class ProfileData(db.Model):
"""Profile data for one request.
"""
#: the profile data in gzipped pickled string form
profile = db.BlobProperty(required=True)
#: the path of the request this profile data belongs to
path = db.StringProperty(required=True)
#: the user that made the request this profile data belongs to, if any
user = db.UserProperty()
def from_key(key):
"""Returns profile data for the specified key.
"""
return ProfileData.get_by_key_name(key)
def store(key, path, profile, user):
"""Stores the profile data with the specified attributes.
"""
import logging
logging.info(dir(user))
ProfileData.get_or_insert(key, path=path, profile=profile, user=user)
| apache-2.0 | Python | |
5681936147081229eba1b4c0dc7faff89a281b21 | Create solution.py | lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges | leetcode/easy/maxium_subarray/py/solution.py | leetcode/easy/maxium_subarray/py/solution.py | class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 0:
return 0
maxSub = nums[0]
maxSoFar = nums[0]
for index in range(1, len(nums)):
maxSoFar = max(nums[index], maxSoFar + nums[index])
maxSub = max(maxSub, maxSoFar)
return maxSub
| mit | Python | |
71a1a8efe93312373f2ec83536b265be0246c895 | Add "xcsoar.flightpath.FlightPathFix" constructor tests | Harry-R/skylines,skylines-project/skylines,Turbo87/skylines,RBE-Avionik/skylines,skylines-project/skylines,Turbo87/skylines,shadowoneau/skylines,RBE-Avionik/skylines,Turbo87/skylines,skylines-project/skylines,Harry-R/skylines,Harry-R/skylines,shadowoneau/skylines,shadowoneau/skylines,RBE-Avionik/skylines,shadowoneau/skylines,Turbo87/skylines,Harry-R/skylines,RBE-Avionik/skylines,skylines-project/skylines | tests/lib/xcsoar/flightpath_test.py | tests/lib/xcsoar/flightpath_test.py | from datetime import datetime
from skylines.lib.xcsoar_ import FlightPathFix
from pytest import approx
def test_list_to_fix():
values = [datetime(2016, 5, 4, 8, 10, 50), 29450,
dict(latitude=50.82191666668235, longitude=6.181650000001908),
230, 48, None, None, 0, None, None, 8, None, 0]
fix = FlightPathFix(*values)
assert fix.datetime.isoformat() == '2016-05-04T08:10:50'
assert fix.seconds_of_day == 29450
assert fix.location['latitude'] == approx(50.82191666668235)
assert fix.location['longitude'] == approx(6.181650000001908)
assert fix.gps_altitude == 230
assert fix.pressure_altitude == 48
assert fix.enl == None
assert fix.track == None
assert fix.groundspeed == 0
assert fix.tas == None
assert fix.ias == None
assert fix.siu == 8
assert fix.elevation == None
def test_kwargs():
fix = FlightPathFix(
datetime=datetime(2016, 5, 4, 8, 10, 50), seconds_of_day=29450,
location=dict(latitude=50.82191666668235, longitude=6.181650000001908),
gps_altitude=230, pressure_altitude=48, groundspeed=0, siu=8
)
assert fix.datetime.isoformat() == '2016-05-04T08:10:50'
assert fix.seconds_of_day == 29450
assert fix.location['latitude'] == approx(50.82191666668235)
assert fix.location['longitude'] == approx(6.181650000001908)
assert fix.gps_altitude == 230
assert fix.pressure_altitude == 48
assert fix.enl == None
assert fix.track == None
assert fix.groundspeed == 0
assert fix.tas == None
assert fix.ias == None
assert fix.siu == 8
assert fix.elevation == None
| agpl-3.0 | Python | |
3b3294f337c1658b952ea05a385252bd618d4def | Create ThrSumClose_001.py | Chasego/codirit,Chasego/codirit,Chasego/cod,Chasego/cod,Chasego/codi,cc13ny/algo,cc13ny/algo,Chasego/codi,cc13ny/Allin,Chasego/codi,Chasego/codirit,Chasego/codi,Chasego/cod,cc13ny/algo,Chasego/cod,cc13ny/algo,Chasego/codirit,cc13ny/Allin,Chasego/cod,Chasego/codi,cc13ny/algo,cc13ny/Allin,Chasego/codirit,cc13ny/Allin,cc13ny/Allin | leetcode/016-3Sum-Closest/ThrSumClose_001.py | leetcode/016-3Sum-Closest/ThrSumClose_001.py | #@author: cchen
#It will be updated later
class Solution:
# @return an integer
def threeSumClosest(self, num, target):
num.sort()
mindiff=100000
res=0
for i in range(len(num)):
left=i+1; right=len(num)-1
while left<right:
sum=num[i]+num[left]+num[right]
diff=abs(sum-target)
if diff<mindiff: mindiff=diff; res=sum
if sum==target: return sum
elif sum<target: left+=1
else: right-=1
return res
| mit | Python | |
1d9dac555897f408fc2216a315b6f48dea94583a | add github auth | feilaoda/dojang | auth/github.py | auth/github.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
import logging
import tornado.auth
from tornado import httpclient
from tornado import escape
from tornado.options import options
class GithubMixin(tornado.auth.OAuth2Mixin):
_OAUTH_ACCESS_TOKEN_URL = "http://github.com/login/oauth/access_token"
_OAUTH_AUTHORIZE_URL = "http://github.com/login/oauth/authorize"
_OAUTH_NO_CALLBACKS = False
def get_authenticated_user(self, redirect_uri, client_id,
client_secret, code, callback):
http = httpclient.AsyncHTTPClient()
args = {
"redirect_uri": redirect_uri,
"code": code,
"client_id": client_id,
"client_secret": client_secret,
}
fields = set(['login'])
http.fetch(self._oauth_request_token_url(**args),
self.async_callback(self._on_access_token, redirect_uri, client_id,
client_secret, callback, fields))
def _on_access_token(self, redirect_uri, client_id, client_secret,
callback, fields, response):
if response.error:
logging.warning('Github auth error: %s' % str(response))
callback(None)
return
args = escape.parse_qs_bytes(escape.native_str(response.body))
session = {
"access_token": args["access_token"][-1],
}
self.github_request(
path="/user",
callback=self.async_callback(
self._on_get_user_info, callback, session, fields),
access_token=session["access_token"],
fields=",".join(fields)
)
def _on_get_user_info(self, callback, session, fields, user):
if user is None:
callback(None)
return
fieldmap = {}
for field in fields:
fieldmap[field] = user.get(field)
fieldmap.update({"access_token": session["access_token"]})
callback(fieldmap)
def github_request(self, path, callback, access_token=None,
post_args=None, **args):
url = "https://api.github.com" + path
all_args = {}
if access_token:
all_args["access_token"] = access_token
all_args.update(args)
all_args.update(post_args or {})
if all_args:
url += "?" + urllib.urlencode(all_args)
callback = self.async_callback(self._on_github_request, callback)
http = httpclient.AsyncHTTPClient()
if post_args is not None:
http.fetch(url, method="POST", body=urllib.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_github_request(self, callback, response):
if response.error:
logging.warning("Error response %s fetching %s", response.error,
response.request.url)
callback(None)
return
callback(escape.json_decode(response.body))
| mit | Python | |
8ae846689b03d11cd9c231cffa7113339948df99 | Create simple_join_Spark.py | sammath/Hadoop-Platform,sammath/Hadoop-Platform | MapReduce_Examples/simple_join_Spark.py | MapReduce_Examples/simple_join_Spark.py | apache-2.0 | Python | ||
0571c0828f8a39d080d804e32e937b65bfe897ef | add development.py configuration | aacanakin/glim | glim/prototype/app/config/development.py | glim/prototype/app/config/development.py | """
Here, "development" is the environment. You can run configuration
using the following;
$ glim start --env development # loads the development configuration
"""
import os
import glim.paths
config = {
# the configurations of extensions
'extensions': {
# 'gredis' : {
# 'default' : {
# 'host' : 'localhost',
# 'port' : '6379',
# 'db' : 0
# }
# }
},
# database configuration
'db': {
# 'default' : {
# 'driver' : 'mysql',
# 'host' : 'localhost',
# 'schema' : 'test',
# 'user' : 'root',
# 'password' : '',
# },
},
# the orm switch for orm to be disabled or not
'orm': True,
# logging configuration, it has a default configuration
# if you don't provide one.
'log': {
# 'level' : 'info',
# 'format' : '[%(levelname)s] : %(message)s',
# 'file' : 'app/storage/logs/debug.log'
},
# view configuration
'views': {
# package to be loaded by jinja2
'package': 'app.views'
},
# werkzeug sessions configuration
'sessions': {
# session id prefix
'id_header': 'glim_session',
'path': glim.paths.STORAGE_PATH,
},
# the glim.app.App configuration
'app': {
'reloader': True,
'debugger': True,
'static': {
'path': glim.paths.STATIC_PATH,
'url': '/static'
}
}
}
| mit | Python | |
c4e6b2b68e6acd8f83091fc055897628b8df05bb | Add migration for django-simple-history == 1.9.0 | softwaresaved/fat,softwaresaved/fat,softwaresaved/fat,softwaresaved/fat | lowfat/migrations/0105_auto_20170615_1400.py | lowfat/migrations/0105_auto_20170615_1400.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-15 14:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lowfat', '0104_auto_20170607_1428'),
]
operations = [
migrations.AddField(
model_name='historicalblog',
name='history_change_reason',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='historicalclaimant',
name='history_change_reason',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='historicalexpense',
name='history_change_reason',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='historicalfund',
name='history_change_reason',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='historicalgeneralsentmail',
name='history_change_reason',
field=models.CharField(max_length=100, null=True),
),
]
| bsd-3-clause | Python | |
9b91aa78fcb541f03deb137dc9c5c02918e7ffe0 | Add widget | GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek | geotrek/maintenance/widgets.py | geotrek/maintenance/widgets.py | from mapentity.widgets import LeafletWidget
from .models import Topology
class InterventionWidget(LeafletWidget):
""" A widget allowing to create topologies on a map.
"""
is_point_topology = True
def serialize(self, value):
if value:
return value.geom.transform(4326, clone=True).geojson
def render(self, name, value, attrs=None):
"""Renders the fields. Parent class calls `serialize()` with the value.
"""
if isinstance(value, int):
value = Topology.objects.get(pk=value)
attrs = attrs or {}
attrs.update(is_point_topology=self.is_point_topology)
return super(InterventionWidget, self).render(name, value, attrs) | bsd-2-clause | Python | |
11b2bc33fe08e116347c21cc8ae5baa6e9352bb8 | add scheduler module __init__ | wwitzel3/awx,wwitzel3/awx,wwitzel3/awx,wwitzel3/awx | awx/main/scheduler/__init__.py | awx/main/scheduler/__init__.py | # Copyright (c) 2017 Ansible, Inc.
#
from awx.main.scheduler.task_manager import TaskManager # noqa
| apache-2.0 | Python | |
97781668a6ae80d4fa6330ccd70a6d01fe9a44a1 | Add C11 executor support; #276 | DMOJ/judge,DMOJ/judge,DMOJ/judge | dmoj/executors/C11.py | dmoj/executors/C11.py | from .gcc_executor import GCCExecutor
class Executor(GCCExecutor):
command = 'gcc11'
flags = ['-std=c11']
ext = '.c'
name = 'C11'
test_program = '''
#include <stdio.h>
int main() {
int ch;
while ((ch = getchar()) != EOF)
putchar(ch);
return 0;
}
'''
| agpl-3.0 | Python | |
22946180e9e5e660be14d453ddf5bb37564ddf33 | Add a script to build a map of all the tiles | simonsonc/mn-glo-mosaic,simonsonc/mn-glo-mosaic,simonsonc/mn-glo-mosaic | build-tile-map.py | build-tile-map.py | #!/usr/bin/env python
from osgeo import ogr
from osgeo import osr
from glob import glob
import os
driver = ogr.GetDriverByName("ESRI Shapefile")
ds = driver.CreateDataSource("tile-map.shp")
srs = osr.SpatialReference()
srs.ImportFromEPSG(26915)
layer = ds.CreateLayer("tiles", srs, ogr.wkbPolygon)
field_name = ogr.FieldDefn("Name", ogr.OFTString)
field_name.SetWidth(16)
layer.CreateField(field_name)
tile_ids = [ os.path.splitext(os.path.basename(x))[0] for x in glob('tile-entries/*.txt') ]
for tile_id in tile_ids:
x1_s, y1_s = tile_id.split('x')
x1 = int(x1_s)
y1 = int(y1_s)
x2 = x1 + 10000
y2 = y1 + 10000
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(x1, y1)
ring.AddPoint(x2, y1)
ring.AddPoint(x2, y2)
ring.AddPoint(x1, y2)
ring.CloseRings()
poly = ogr.Geometry(ogr.wkbPolygon)
poly.AddGeometry(ring)
feature = ogr.Feature(layer.GetLayerDefn())
feature.SetField("Name", tile_id)
feature.SetGeometry(poly)
layer.CreateFeature(feature)
feature.Destroy()
ds.Destroy()
| mit | Python | |
e56dcf595a646d03dbd9d49ab27a37adfe87f3e5 | Add shell command | ramaxlo/cerbero,sdroege/cerbero,shoreflyer/cerbero,BigBrother-International/gst-cerbero,EricssonResearch/cerbero,davibe/cerbero,EricssonResearch/cerbero,atsushieno/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,ikonst/cerbero,brion/cerbero,multipath-rtp/cerbero,BigBrother-International/gst-cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,GStreamer/cerbero,EricssonResearch/cerbero,OptoFidelity/cerbero,flexVDI/cerbero,nicolewu/cerbero,atsushieno/cerbero,ford-prefect/cerbero,lubosz/cerbero,brion/cerbero,AlertMe/cerbero,jackjansen/cerbero,atsushieno/cerbero,superdump/cerbero,nirbheek/cerbero,jackjansen/cerbero-2013,brion/cerbero,jackjansen/cerbero,ikonst/cerbero,fluendo/cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,brion/cerbero,lubosz/cerbero,ylatuya/cerbero,nirbheek/cerbero-old,multipath-rtp/cerbero,jackjansen/cerbero,cee1/cerbero-mac,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,BigBrother-International/gst-cerbero,AlertMe/cerbero,cee1/cerbero-mac,nirbheek/cerbero,multipath-rtp/cerbero,nzjrs/cerbero,superdump/cerbero,nzjrs/cerbero,OptoFidelity/cerbero,sdroege/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,AlertMe/cerbero,centricular/cerbero,ford-prefect/cerbero,nirbheek/cerbero-old,shoreflyer/cerbero,EricssonResearch/cerbero,jackjansen/cerbero,atsushieno/cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,jackjansen/cerbero-2013,jackjansen/cerbero-2013,ford-prefect/cerbero,ramaxlo/cerbero,GStreamer/cerbero,nicolewu/cerbero,ikonst/cerbero,lubosz/cerbero,fluendo/cerbero,justinjoy/cerbero,centricular/cerbero,cee1/cerbero-mac,jackjansen/cerbero-2013,centricular/cerbero,GStreamer/cerbero,justinjoy/cerbero,sdroege/cerbero,EricssonResearch/cerbero,nirbheek/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,AlertMe/cerbero,ford-prefect/cerbero,nzjrs/cerbero,multipath-rtp/cerbero,justinjoy/cerbero,jackjansen/cerbero-2013,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,ramaxlo/cerbero,flexVDI/cerbero,sdroege/cerbero,OptoFidelity/cerbero,superdump/cerbero,OptoFidelity/cerbero,ylatuya/cerbero,sdroege/cerbero,nirbheek/cerbero,flexVDI/cerbero,multipath-rtp/cerbero,shoreflyer/cerbero,ramaxlo/cerbero,AlertMe/cerbero,nirbheek/cerbero-old,davibe/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,ikonst/cerbero,nzjrs/cerbero,ramaxlo/cerbero,justinjoy/cerbero,fluendo/cerbero,shoreflyer/cerbero,BigBrother-International/gst-cerbero,centricular/cerbero,brion/cerbero,nicolewu/cerbero,superdump/cerbero,ylatuya/cerbero,fluendo/cerbero,lubosz/cerbero,davibe/cerbero,ylatuya/cerbero,flexVDI/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,GStreamer/cerbero,shoreflyer/cerbero,BigBrother-International/gst-cerbero,centricular/cerbero,atsushieno/cerbero,cee1/cerbero-mac,davibe/cerbero,nirbheek/cerbero-old,flexVDI/cerbero,fluendo/cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,nzjrs/cerbero,ikonst/cerbero,GStreamer/cerbero | cerbero/commands/shell.py | cerbero/commands/shell.py | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.commands import Command, register_command
from cerbero.utils import N_
class Shell(Command):
doc = N_('Starts a shell with the build environment')
name = 'shell'
def __init__(self):
Command.__init__(self, [])
def run(self, config, args):
shell = os.environ.get('SHELL', '/bin/sh')
os.execlp(shell, shell)
register_command(Shell)
| lgpl-2.1 | Python | |
ef0410257af6787724e2e31471e465052dd8e4e3 | add sample to get entropy | echizentm/CompactDataStructures,echizentm/CompactDataStructures,echizentm/CompactDataStructures | chapter_02/get_entropy.py | chapter_02/get_entropy.py | # coding: utf-8
import sys
import math
def shannon_ent(probs):
return sum(
map(lambda p: -p*math.log2(p if p > 0 else 1), probs)
)
def seq2dic(seq):
dic = {}
for ch in seq:
if ch in dic:
dic[ch] += 1
else:
dic[ch] = 1
return dic
def worst_case_ent(seq):
return math.log2(len(seq2dic(seq)))
def zero_order_ent(seq):
return shannon_ent(
list(map(lambda v: v / len(seq), seq2dic(seq).values()))
)
def high_order_ent(seq, k):
dic = {}
for i in range(0, len(seq) - k + 1):
context = seq[i:i+k]
ch = seq[i+k] if i+k < len(seq) else '$'
if context in dic:
dic[context] += ch
else:
dic[context] = ch
return sum(
map(lambda s: len(s) * zero_order_ent(s) / len(seq), dic.values())
)
for line in sys.stdin:
line = line.rstrip()
print('String: {}'.format(line))
print('Worst-Case Entropy: {}'.format(
worst_case_ent(line)
))
print('Zero-Order Empirical Entropy: {}'.format(
zero_order_ent(line)
))
print('First-Order Empirical Entropy: {}'.format(
high_order_ent(line, 1)
))
print('Second-Order Empirical Entropy: {}'.format(
high_order_ent(line, 2)
))
| apache-2.0 | Python | |
96163bd46069142b69748ecc658d52497108a745 | add example client implementation using requests | elliotpeele/pyramid_oauth2_provider | example/client.py | example/client.py | #
# Copyright (c) Elliot Peele <elliot@bentlogic.net>
#
# This program is distributed under the terms of the MIT License as found
# in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/mit-license.php.
#
# This program is distributed in the hope that it will be useful, but
# without any waranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the MIT License for full details.
#
import sys
import copy
import base64
import logging
import requests
from collections import namedtuple
log = logging.getLogger('example_client')
class Token(namedtuple('Token', 'token_type access_token expires_in '
'refresh_token user_id')):
__slots__ = ()
@classmethod
def fromdict(cls, d):
return cls(
d['token_type'],
d['access_token'],
d['expires_in'],
d['refresh_token'],
d['user_id']
)
class Client(object):
def __init__(self, client_id, client_secret, token_endpoint,
verifySSL=True):
self.client_id = client_id
self.client_secret = client_secret
self.token_endpoint = token_endpoint
self.verifySSL = verifySSL
self.token = None
def _get_client_auth_header(self):
return {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic %s' % base64.b64encode('%s:%s'
% (self.client_id, self.client_secret)),
}
def login(self, username, password):
data = {
'grant_type': 'password',
'username': username,
'password': password,
}
resp = requests.post(self.token_endpoint, data=data,
headers=self._get_client_auth_header,
verify=self.verifySSL, config=dict(verbose=log.debug))
self.token = Token.fromdict(resp.json)
def refresh_login(self):
data = {
'grant_type': 'refresh_token',
'refresh_token': self.token.refresh_token,
'user_id': self.token.user_id,
}
resp = requests.post(self.token_endpoint, data=data,
headers=self._get_client_auth_header,
verify=self.verifySSL, config=dict(verbose=log.debug))
self.token = Token.fromdict(resp.json)
def _get_token_auth_header(self):
return {
'Authorization': '%s %s' % (self.token.token_type,
base64.b64encode(self.token.access_token))
}
def _handle_request(self, method, uri, data=None, headers=None):
if not headers:
headers = {}
else:
headers = copy.copy(headers)
headers.update(self._get_token_auth_header())
handler = getattr(requests, method)
resp = handler(uri, data=data, headers=headers, verify=self.verifySSL,
config=dict(verbose=log.debug))
return resp
def get(self, *args, **kwargs):
return self._handle_request('get', *args, **kwargs)
def post(self, *args, **kwargs):
return self._handle_request('post', *args, **kwargs)
def put(self, *args, **kwargs):
return self._handle_request('put', *args, **kwargs)
def delete(self, *args, **kwargs):
return self._handle_request('delete', *args, **kwargs)
def usage(args):
print >>sys.stderr, ('usage: %s <client_id> <client_secret> <token_uri> '
'<username> <password>' % args[0])
return 1
def main(args):
if len(args) != 6:
return usage(args)
client_id = args[1]
client_secret = args[2]
token_uri = args[3]
username = args[4]
password = args[5]
client = Client(client_id, client_secret, token_uri, verifySSL=False)
client.login(username, password)
client.refresh_login()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mit | Python | |
006e5a686a594efd4c0f7c19cd70395a8534d894 | index set tests done, working on sparse quads | joshua-cogliati-inl/raven,idaholab/raven,joshua-cogliati-inl/raven,joshua-cogliati-inl/raven,joshua-cogliati-inl/raven,idaholab/raven,idaholab/raven,idaholab/raven,joshua-cogliati-inl/raven,joshua-cogliati-inl/raven,idaholab/raven,idaholab/raven,idaholab/raven,joshua-cogliati-inl/raven | framework/TestSparseGrid.py | framework/TestSparseGrid.py | from __future__ import division, print_function, unicode_literals, absolute_import
import warnings
warnings.simplefilter('default',DeprecationWarning)
import xml.etree.ElementTree as ET
import numpy as np
import sys, os
from copy import copy as copy
from utils import find_crow
find_crow(os.path.dirname(os.path.abspath(sys.argv[0])))
import Distributions
import Quadrature
import OrthoPolynomials
import IndexSets
debug = False
def createElement(tag,attrib={},text={}):
element = ET.Element(tag,attrib)
element.text = text
return element
def checkObject(comment,value,expected):
if value!=expected:
print(comment,value,"!=",expected)
results['fail']+=1
else: results['pass']+=1
def checkIndexSet(comment,value,expected):
#NOTE this test requires identical ordering for value and expected
same=True
if len(expected) != len(value):
same=False
else:
for v,val in enumerate(value):
if val!=expected[v]:
same=False
if not same:
print(comment)
results['fail']+=1
for v,val in enumerate(value):
try:
print(' ',val,'|',expected[v])
except IndexError:
print(' ',val,'| -',)
for e in range(len(value),len(expected)):
print(' ','- |',expected[e])
else: results['pass']+=1
results = {'pass':0,'fail':0}
# Generate distributions
distros = {}
uniformElement = ET.Element("uniform")
uniformElement.append(createElement("low",text="-1"))
uniformElement.append(createElement("hi" ,text=" 1"))
uniform = Distributions.Uniform()
uniform._readMoreXML(uniformElement)
uniform.initializeDistribution()
distros['uniform']=uniform
# Generate quadrature
quads={}
legendreElement = ET.Element("legendre")
legendre = Quadrature.Legendre()
legendre._readMoreXML(legendreElement)
legendre.initialize()
quads['Legendre']=legendre
ccElement = ET.Element("clenshawcurtis")
cc = Quadrature.ClenshawCurtis()
cc._readMoreXML(ccElement)
cc.initialize()
quads['ClenshawCurtis']=cc
# Generate polynomials
polys={}
plegendreElement = ET.Element("plegendre")
plegendre = OrthoPolynomials.Legendre()
plegendre._readMoreXML(plegendreElement)
plegendre.initialize()
polys['Legendre']=plegendre
# Test index set generation, N=2, L=4
if debug: print('Testing Index Set generation...')
N=2; L=4
myDists={}
y1 = copy(distros['uniform'])
y1.setQuadrature(quads['Legendre'])
y1.setPolynomials(polys['Legendre'],L)
y2 = copy(distros['uniform'])
y2.setQuadrature(quads['Legendre'])
y2.setPolynomials(polys['Legendre'],L)
myDists['y1']=y1
myDists['y2']=y2
tpSet = IndexSets.TensorProduct()
tpSet.initialize(myDists)
correct = [(0,0),(0,1),(0,2),(0,3),(0,4),
(1,0),(1,1),(1,2),(1,3),(1,4),
(2,0),(2,1),(2,2),(2,3),(2,4),
(3,0),(3,1),(3,2),(3,3),(3,4),
(4,0),(4,1),(4,2),(4,3),(4,4)]
checkIndexSet('Tensor Product set points',tpSet.points,correct)
correct = [(0,0),(0,1),(0,2),(0,3),(0,4),
(1,0),(1,1),(1,2),(1,3),
(2,0),(2,1),(2,2),
(3,0),(3,1),
(4,0)]
tdSet = IndexSets.TotalDegree()
tdSet.initialize(myDists)
checkIndexSet('Total Degree set points',tdSet.points,correct)
correct = [(0,0),(0,1),(0,2),(0,3),(0,4),
(1,0),(1,1),
(2,0),
(3,0),
(4,0)]
hcSet = IndexSets.HyperbolicCross()
hcSet.initialize(myDists)
checkIndexSet('Hyperbolic Cross set points',hcSet.points,correct)
# Test Anisotropic index set
wts=[1,2]
correct = [(0,0),(0,1),(0,2),(0,3),(0,4),
(1,0),(1,1),(1,2),(1,3),(1,4),
(2,0),(2,1),(2,2),(2,3),(2,4),
(3,0),(3,1),(3,2),(3,3),(3,4),
(4,0),(4,1),(4,2),(4,3),(4,4)]
tpSet.initialize(myDists,wts)
checkIndexSet('Tensor Product anisotropic',tpSet.points,correct) #TODO should I implment it so this changes?
correct = [(0,0),(0,1),(0,2),(0,3),(0,4),(0,5),(0,6),
(1,0),(1,1),(1,2),(1,3),(1,4),
(2,0),(2,1),(2,2),
(3,0)]
tdSet.initialize(myDists,wts)
checkIndexSet('Total Degree anisotropic',tdSet.points,correct)
correct = [(0,0),(0,1),(0,2),(0,3),(0,4),(0,5),(0,6),(0,7),(0,8),(0,9),(0,10),
(1,0),(1,1),
(2,0)]
hcSet.initialize(myDists,wts)
checkIndexSet('Hyperbolic Cross anisotropic',hcSet.points,correct)
# Test sparse grids #
if debug: print('Testing Index Set generation...')
print(results)
sys.exit(results["fail"])
| apache-2.0 | Python | |
f0762de9a941745e199ba6ed3e02a475e9838d13 | Package for integration tests | huntxu/fuel-web,dancn/fuel-main-dev,zhaochao/fuel-main,zhaochao/fuel-main,SmartInfrastructures/fuel-web-dev,huntxu/fuel-web,Fiware/ops.Fuel-main-dev,nebril/fuel-web,koder-ua/nailgun-fcert,SmartInfrastructures/fuel-web-dev,zhaochao/fuel-main,AnselZhangGit/fuel-main,SmartInfrastructures/fuel-web-dev,ddepaoli3/fuel-main-dev,AnselZhangGit/fuel-main,eayunstack/fuel-main,eayunstack/fuel-web,eayunstack/fuel-web,nebril/fuel-web,nebril/fuel-web,dancn/fuel-main-dev,SmartInfrastructures/fuel-web-dev,huntxu/fuel-web,zhaochao/fuel-web,zhaochao/fuel-web,zhaochao/fuel-web,stackforge/fuel-web,SmartInfrastructures/fuel-main-dev,ddepaoli3/fuel-main-dev,teselkin/fuel-main,teselkin/fuel-main,eayunstack/fuel-web,stackforge/fuel-web,SmartInfrastructures/fuel-main-dev,koder-ua/nailgun-fcert,koder-ua/nailgun-fcert,prmtl/fuel-web,zhaochao/fuel-main,eayunstack/fuel-web,SmartInfrastructures/fuel-main-dev,zhaochao/fuel-main,eayunstack/fuel-main,dancn/fuel-main-dev,dancn/fuel-main-dev,ddepaoli3/fuel-main-dev,teselkin/fuel-main,SergK/fuel-main,prmtl/fuel-web,AnselZhangGit/fuel-main,ddepaoli3/fuel-main-dev,nebril/fuel-web,huntxu/fuel-main,Fiware/ops.Fuel-main-dev,stackforge/fuel-main,SergK/fuel-main,eayunstack/fuel-main,stackforge/fuel-main,prmtl/fuel-web,huntxu/fuel-web,SmartInfrastructures/fuel-web-dev,SergK/fuel-main,Fiware/ops.Fuel-main-dev,prmtl/fuel-web,nebril/fuel-web,AnselZhangGit/fuel-main,SmartInfrastructures/fuel-main-dev,teselkin/fuel-main,stackforge/fuel-web,stackforge/fuel-main,huntxu/fuel-main,Fiware/ops.Fuel-main-dev,eayunstack/fuel-web,koder-ua/nailgun-fcert,zhaochao/fuel-web,huntxu/fuel-web,zhaochao/fuel-web,prmtl/fuel-web,huntxu/fuel-main | nailgun/nailgun/test/integration/__init__.py | nailgun/nailgun/test/integration/__init__.py | # -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.