commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
c9d852041ed99964560c9f071a9c927911583a62 | Add Mediator pattern | jackaljack/design-patterns | mediator.py | mediator.py | """Mediator pattern
"""
import random
import time
class ControlTower(object):
def __init__(self):
self.available_runways = list()
self.engaged_runways = list()
def authorize_landing(self):
if not self.available_runways:
print('Request denied. No available runways')
return False
else:
runway = self.available_runways.pop()
self.engaged_runways.append(runway)
print('Request granted. Please land on runway {}'.format(runway))
self.status()
return True
def authorize_takeoff(self):
# for simplicity, all takeoff requests are granted
time.sleep(random.randint(0, 2))
runway = self.engaged_runways.pop()
self.available_runways.append(runway)
self.status()
def status(self):
print('The control tower has {} available runway/s'
.format(len(self.available_runways)))
class Airplane(object):
def __init__(self):
self.control_tower = None
@property
def registered(self):
return True if self.control_tower is not None else False
def register(self, control_tower):
self.control_tower = control_tower
print('An airplane registers with the control tower')
def request_landing(self):
is_authorized = self.control_tower.authorize_landing()
if is_authorized:
self.land()
def land(self):
print('The airplane {} lands'.format(self))
def takeoff(self):
print('The airplane {} takes off'.format(self))
self.control_tower.authorize_takeoff()
class Runway(object):
def register(self, control_tower):
print('A runway has been registered with the control tower')
control_tower.available_runways.append(self)
control_tower.status()
def main():
print('There is an airport with 2 runways and a control tower\n')
r1 = Runway()
r2 = Runway()
ct = ControlTower()
r1.register(ct)
r2.register(ct)
print('\n3 airplanes approach the airport and register with the tower')
a1 = Airplane()
a2 = Airplane()
a3 = Airplane()
a1.register(ct)
a2.register(ct)
a3.register(ct)
print('\nTwo airplanes request for landing. There are enough runways, so '
'the requests are granted')
a1.request_landing()
a2.request_landing()
print('\nThe third airplane also makes a request for landing. There are no'
' runways available, so the request is denied')
a3.request_landing()
print('\nAfter a while, the first airplane takes off, so now the third '
'airplane can land')
a1.takeoff()
a3.request_landing()
if __name__ == '__main__':
main()
| mit | Python | |
31b81e3f901da0a06f5a949dbc55a00aa8b0a407 | Add auditor repo's events | polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon | polyaxon/auditor/events/repo.py | polyaxon/auditor/events/repo.py | import auditor
from libs.event_manager import event_types
from libs.event_manager.event import Event
class RepoCreatedEvent(Event):
type = event_types.REPO_CREATED
class RepoNewCommitEvent(Event):
type = event_types.REPO_NEW_COMMIT
auditor.register(RepoCreatedEvent)
auditor.register(RepoNewCommit)
| apache-2.0 | Python | |
ffd3b3367f7bb932505a6312c6370a7d30c3d1fe | Implement account-notify | Heufneutje/txircd,ElementalAlchemist/txircd | txircd/modules/ircv3/accountnotify.py | txircd/modules/ircv3/accountnotify.py | from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class AccountNotify(ModuleData):
implements(IPlugin, IModuleData)
name = "AccountNotify"
def actions(self):
return [ ("usermetadataupdate", 10, self.sendAccountNotice),
("capabilitylist", 10, self.addCapability) ]
def load(self):
if "unloading-account-notify" in self.ircd.dataCache:
del self.ircd.dataCache["unloading-account-notify"]
return
if "cap-add" in self.ircd.functionCache:
self.ircd.functionCache["cap-add"]("account-notify")
def unload(self):
self.ircd.dataCache["unloading-account-notify"] = True
def fullUnload(self):
del self.ircd.dataCache["unloading-account-notify"]
if "cap-del" in self.ircd.functionCache:
self.ircd.functionCache["cap-del"]("account-notify")
def addCapability(self, capList):
capList.append("account-notify")
def sendAccountNotice(self, user, key, oldValue, value, visibility, setByUser, fromServer):
if key != "account":
return
noticeUsers = set()
noticePrefix = user.hostmask()
for channel in user.channels:
for noticeUser in channel.users.iterkeys():
if noticeUser.uuid[:3] == self.ircd.serverID and noticeUser != user and "capabilities" in noticeUser.cache and "account-notify" in noticeUser.cache["capabilities"]:
noticeUsers.add(noticeUser)
if value:
for noticeUser in noticeUsers:
noticeUser.sendMessage("ACCOUNT", value, prefix=noticePrefix)
else:
for noticeUser in noticeUsers:
noticeUser.sendMessage("ACCOUNT", "*", prefix=noticePrefix)
accountNotify = AccountNotify() | bsd-3-clause | Python | |
1ea465856739169aadd90b27fc8ad1cf42dcb665 | add 120 | zeyuanxy/project-euler,EdisonAlgorithms/ProjectEuler,EdisonAlgorithms/ProjectEuler,EdisonAlgorithms/ProjectEuler,zeyuanxy/project-euler,zeyuanxy/project-euler,EdisonAlgorithms/ProjectEuler,zeyuanxy/project-euler | vol3/120.py | vol3/120.py | def calc(a):
s = set([2])
for n in range(1, 2 * a + 1, 2):
r = (2 * n * a) % (a * a)
s.add(r)
return max(s)
if __name__ == "__main__":
print sum([calc(a) for a in range(3, 1001)])
| mit | Python | |
94f621255ffb7b90f6233e45b658d9e73113cec4 | add 121 | EdisonAlgorithms/ProjectEuler,EdisonAlgorithms/ProjectEuler,EdisonAlgorithms/ProjectEuler,zeyuanxy/project-euler,EdisonAlgorithms/ProjectEuler,zeyuanxy/project-euler,zeyuanxy/project-euler,zeyuanxy/project-euler | vol3/121.py | vol3/121.py | if __name__ == "__main__":
LIMIT = 15
res = [0] * (LIMIT + 1)
res[LIMIT - 1] = res[LIMIT] = 1
for i in range(2, LIMIT + 1):
for j in range(0, LIMIT):
res[j] = res[j + 1]
res[LIMIT] = 0
for j in range(LIMIT, 0, -1):
res[j] += res[j - 1] * i
pos = 0
for i in range(0, LIMIT / 2 + 1):
pos += res[i]
total = 1
for i in range(2, LIMIT + 2):
total *= i
print total / pos
| mit | Python | |
ac9f76d210776a322953ce838e4a8f3a0ff49795 | add basic mongo testing connection | meyersj/geotweet,meyersj/geotweet,meyersj/geotweet | tests/mongo/mongo_tests.py | tests/mongo/mongo_tests.py | import unittest
import os
from os.path import dirname
import sys
root = dirname(dirname(dirname(os.path.abspath(__file__))))
sys.path.append(root)
from geotweet.mongo import MongoGeo, MongoQuery
from pymongo import MongoClient
from pymongo.errors import ServerSelectionTimeoutError
MONGODB_URI = os.getenv('GEOTWEET_MONGODB_URI', 'mongodb://127.0.0.1:27017')
PORTLAND = [-122.675983, 45.524764]
PROVIDENCE = [-71.404823, 41.827730]
JOHN_HAY = "John Hay Library"
def check_connection():
timeout = 1 * 1000
args = dict(
connectTimeoutMS=timeout,
socketTimeoutMS=timeout,
serverSelectionTimeoutMS=timeout
)
try:
print MongoClient(MONGODB_URI, **args)
return True
except ServerSelectionTimeoutError:
return False
return False
class ConnectionTestCase(unittest.TestCase):
def test_connection(self):
error = "Failed to connect to Mongo Instance < {0} >".format(MONGODB_URI)
self.assertTrue(check_connection(), error)
"""
class BoundaryQueryTests(unittest.TestCase):
def setUp(self):
self.db = "boundary"
def connect(self):
return MongoQuery(db=self.db, collection=self.collection, uri=MONGODB_URI)
def check_field(self, expected, actual, error):
error = error.format(expected, actual)
self.assertEqual(expected, actual, error)
def test_portland_states(self):
self.collection = "states"
self.mongo = self.connect()
query = self.mongo.intersects(PORTLAND)
state = self.mongo.find(query=query).next()
error = 'STATE is was expected to be : {0}, actual: {1}'
actual = state['properties']['STATE']
self.check_field('41', actual, error)
error = 'NAME is was expected to be : {0}, actual: {1}'
actual = state['properties']['NAME']
self.check_field('Oregon', actual, error)
def test_portland_counties(self):
self.collection = "counties"
self.mongo = self.connect()
query = self.mongo.intersects(PORTLAND)
county = self.mongo.find(query=query).next()
error = 'STATE is was expected to be : {0}, actual: {1}'
actual = county['properties']['STATE']
self.check_field('41', actual, error)
error = 'NAME is was expected to be : {0}, actual: {1}'
actual = county['properties']['NAME']
self.check_field('Multnomah', actual, error)
class OSMQueryTests(unittest.TestCase):
def setUp(self):
self.db = "osm"
def connect(self):
return MongoQuery(db=self.db, collection=self.collection, uri=MONGODB_URI)
def check_field(self, expected, actual, error):
error = error.format(expected, actual)
self.assertEqual(expected, actual, error)
def test_rhode_island_poi(self):
self.collection = "poi"
self.mongo = self.connect()
query = self.mongo.near(PROVIDENCE, 150)
match = False
for poi in self.mongo.find(query=query):
if poi['properties']['name'] == JOHN_HAY:
match = True
break
self.assertTrue(match, "No POI matching {0}".format(JOHN_HAY))
"""
if __name__ == "__main__":
unittest.main()
| mit | Python | |
6361e766de14508342cc87f1b26dd99f43b02fc9 | Add script to calculate statistics about liwc categories | NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts | liwc2csv.py | liwc2csv.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to generate statistics on LIWC entities.
The script calculates how many LIWC words are found. This script can be used to
compare the differences in numbers of words found for the modern and historic
versions of LIWC.
"""
from bs4 import BeautifulSoup
from lxml import etree
from collections import Counter
import argparse
import string
import glob
import pandas as pd
from emotools.bs4_helpers import sentence, note, word
from emotools.liwc_helpers import load_liwc
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dir_name', help='the name of the FoLiA XML file to '
'be processed')
parser.add_argument('dic', help='the liwc dictionary to be used')
parser.add_argument('out_file', help='the liwc dictionary to be used')
args = parser.parse_args()
if args.dic == 'LIWC_Dutch_dictionary.dic':
encoding = 'latin1'
else:
encoding = 'utf8'
liwc_dict, liwc_categories = load_liwc(args.dic, encoding)
act_tag = '{http://ilk.uvt.nl/folia}div'
event_tag = '{http://ilk.uvt.nl/folia}event'
sentence_tag = '{http://ilk.uvt.nl/folia}s'
word_tag = '{http://ilk.uvt.nl/folia}w'
text_content_tag = '{http://ilk.uvt.nl/folia}t'
result = pd.DataFrame(columns=liwc_categories.values()+['#words'])
xml_files = glob.glob('{}/*.xml'.format(args.dir_name))
for i, f in enumerate(xml_files):
print '{} ({} of {})'.format(f, i+1, len(xml_files))
num_words = 0
liwc_count = Counter()
# make sure all categories have a value in the DataFrame
for cat in liwc_categories.values():
liwc_count[cat] = 0
text_id = f[-20:-7]
fi = open(f)
context = etree.iterparse(fi, events=('end',), tag=act_tag, huge_tree=True)
for event, elem in context:
#print elem.attrib
if elem.get('class') == 'act':
# load div into memory
div_xml = BeautifulSoup(etree.tostring(elem), 'xml')
sentences = div_xml.find_all(sentence)
s = None
for sent in sentences:
if not note(sent.parent):
sent_id = sent.attrs.get('xml:id')
sent_words = [w.t.string.lower()
for w in sent.find_all(word)]
for w in sent_words:
if w not in string.punctuation:
num_words += 1
if w in liwc_dict.keys():
#print w
for cat in liwc_dict[w]:
liwc_count[liwc_categories[cat]] += 1
result.loc[text_id] = pd.Series(liwc_count)
result.set_value(text_id, '#words', num_words)
print result
result.to_csv(args.out_file)
| apache-2.0 | Python | |
c788ff97a48d54d2b2c2cb181a1a0a95d100b850 | Add flip tests module | danforthcenter/plantcv,danforthcenter/plantcv,danforthcenter/plantcv | tests/plantcv/test_flip.py | tests/plantcv/test_flip.py | import pytest
import cv2
from plantcv.plantcv import flip
def test_flip(test_data):
# Read in test data
img = cv2.imread(test_data.small_rgb_img)
flipped_img = flip(img=img, direction="horizontal")
assert img.shape == flipped_img.shape
def test_flip_grayscale(test_data):
# Read in test data
gray_img = cv2.imread(test_data.small_gray_img, -1)
flipped_img = flip(img=gray_img, direction="vertical")
assert gray_img.shape == flipped_img.shape
def test_flip_bad_input(test_data):
img = cv2.imread(test_data.small_rgb_img)
with pytest.raises(RuntimeError):
_ = flip(img=img, direction="vert")
| mit | Python | |
7b7ead5fb9814ba1d25d73c9dea50db86a2a827f | add a script to update jar only. So we don't need to reingest data. | uwescience/myria,bsalimi/myria,bsalimi/myria,uwescience/myria,uwescience/myria,jamesmarva/myria,jamesmarva/myria,bsalimi/myria,jamesmarva/myria | myriadeploy/update_myria_jar_only.py | myriadeploy/update_myria_jar_only.py | #!/usr/bin/env python
import myriadeploy
import subprocess
import sys
def host_port_list(workers):
return [str(x) + ':' + str(y) for (x, y) in workers]
def copy_distribution(config):
"Copy the distribution (jar and libs and conf) to compute nodes."
nodes = config['nodes']
description = config['description']
path = config['path']
username = config['username']
for (hostname, _) in nodes:
if hostname != 'localhost':
remote_path = "%s@%s:%s/%s-files" % (username, hostname, path, description)
else:
remote_path = "%s/%s-files" % (path, description)
to_copy = ["myriad-0.1.jar"]
args = ["rsync", "-aLvz"] + to_copy + [remote_path]
if subprocess.call(args):
raise Exception("Error copying distribution to %s" % (hostname,))
def main(argv):
# Usage
if len(argv) != 2:
print >> sys.stderr, "Usage: %s <deployment.cfg>" % (argv[0])
print >> sys.stderr, " deployment.cfg: a configuration file modeled after deployment.cfg.sample"
sys.exit(1)
config = myriadeploy.read_config_file(argv[1])
# Step 1: Copy over java, libs, myriad
copy_distribution(config)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause | Python | |
cdbd7b4ebb2190dc7e7067378d6b009d773c6537 | Create reverseCompliment.py | mikejthomas/biote100_pset2 | reverseCompliment.py | reverseCompliment.py | #Python Problem 1
#reverseComplement.py
#Introduction to Bioinformatics Assignment 2
#Purpose: Reverse Compliment
#Your Name: Michael Thomas
#Date: 10/10/15
#s1 is the string you should use to generate a reverse complement sequence
#Note that your result needs to be presented in the 5' to 3' direction
s1 = "AAAAACCCCCTCGGCTAATCGACTACTACTACTACTACTTCATCATCATCAGGGGGGGGCTCTCTCTAAAAACCCCTTTTGGGGG"
#Your Code Here
#empty list for rc of s1
rs1 = []
#create dictionary containing the character for s1
#and the cooresponding key for rs
compdic = {'A':'T', 'T':'A', 'C':'G', 'G':'C'}
#iterate through s1 and replace
#characters for their comps from dictionary compdic
for i in s1:
rs1.append(compdic[i])
#print rs1 (list) as a string for rc of s1
print ''.join(rs1[::-1])
| mit | Python | |
54c5f4f476cebec063652f5e4c6acd30bf2dee2e | Add test for nova-compute and nova-network main database blocks | klmitch/nova,gooddata/openstack-nova,hanlind/nova,mahak/nova,Juniper/nova,phenoxim/nova,vmturbo/nova,phenoxim/nova,mahak/nova,vmturbo/nova,rajalokan/nova,rajalokan/nova,openstack/nova,mikalstill/nova,gooddata/openstack-nova,mikalstill/nova,alaski/nova,sebrandon1/nova,sebrandon1/nova,openstack/nova,alaski/nova,mahak/nova,gooddata/openstack-nova,rahulunair/nova,klmitch/nova,cloudbase/nova,mikalstill/nova,Juniper/nova,Juniper/nova,klmitch/nova,jianghuaw/nova,rajalokan/nova,Juniper/nova,openstack/nova,hanlind/nova,rajalokan/nova,cloudbase/nova,hanlind/nova,jianghuaw/nova,rahulunair/nova,jianghuaw/nova,sebrandon1/nova,vmturbo/nova,cloudbase/nova,vmturbo/nova,gooddata/openstack-nova,rahulunair/nova,klmitch/nova,jianghuaw/nova | nova/tests/unit/cmd/test_cmd_db_blocks.py | nova/tests/unit/cmd/test_cmd_db_blocks.py | # Copyright 2016 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
from nova.cmd import compute
from nova.cmd import network
from nova import db
from nova import exception
from nova import test
@contextlib.contextmanager
def restore_db():
orig = db.api.IMPL
try:
yield
finally:
db.api.IMPL = orig
class ComputeMainTest(test.NoDBTestCase):
@mock.patch('nova.utils.monkey_patch')
@mock.patch('nova.conductor.api.API.wait_until_ready')
@mock.patch('oslo_reports.guru_meditation_report')
def _call_main(self, mod, gmr, cond, patch):
@mock.patch.object(mod, 'config')
@mock.patch.object(mod, 'service')
def run_main(serv, conf):
mod.main()
run_main()
def test_compute_main_blocks_db(self):
with restore_db():
self._call_main(compute)
self.assertRaises(exception.DBNotAllowed,
db.api.instance_get, 1, 2)
def test_network_main_blocks_db(self):
with restore_db():
self._call_main(network)
self.assertRaises(exception.DBNotAllowed,
db.api.instance_get, 1, 2)
| apache-2.0 | Python | |
86c13905a616fe74ea1264b3e462ada3ca7b4e04 | Add a test for clickthrough | mpkato/openliveq | tests/test_clickthrough.py | tests/test_clickthrough.py | import openliveq as olq
import os
class TestClickthrough(object):
def test_load(self):
filepath = os.path.join(os.path.dirname(__file__),
"fixtures", "sample_clickthrough.tsv")
cs = []
with open(filepath) as f:
for line in f:
c = olq.Clickthrough.readline(line)
cs.append(c)
assert cs[0].query_id == 'OLQ-9998'
assert cs[0].question_id == '1167627151'
assert cs[0].rank == 1
assert cs[0].ctr == 0.5
assert cs[0].male == 0.4
assert cs[0].female == 0.6
assert cs[0].a00 == 0.1
assert cs[0].a10 == 0.1
assert cs[0].a20 == 0.1
assert cs[0].a30 == 0.1
assert cs[0].a40 == 0.1
assert cs[0].a50 == 0.1
assert cs[0].a60 == 0.4
assert cs[2].query_id == 'OLQ-9999'
assert cs[2].question_id == '1414846259'
assert cs[2].rank == 2
assert cs[2].ctr == 0.2
assert cs[2].male == 0.5
assert cs[2].female == 0.5
assert cs[2].a00 == 0.1
assert cs[2].a10 == 0.1
assert cs[2].a20 == 0.1
assert cs[2].a30 == 0.1
assert cs[2].a40 == 0.2
assert cs[2].a50 == 0.2
assert cs[2].a60 == 0.2
| mit | Python | |
731e102ed1faee07128979655f4eaca9bd4818ba | Create analyse.py | KitWallace/terrain | analyse.py | analyse.py | """
analysis of an array
"""
import sys, math
from numpy import *
missing_value = -9999
elev = loadtxt(sys.stdin)
maxi = elev.shape[0]
maxj = elev.shape[1]
missing = 0
min = 999999999999
max = -99999999999
for i in range(0, maxi-1) :
for j in range(0, maxj-1) :
e = elev[i,j]
if (e == missing_value) :
missing=missing+1
else :
if (e < min):
min = e
if (e > max) :
max = e
print("Shape " + ', '.join(map(str, elev.shape )))
print("Points " + str(elev.shape[0] * elev.shape[1]))
print("missing (" +str(missing_value) + ") " + str(missing))
print("min " +str(min) + " max " + str(max) + ' range ' + str(max-min))
| cc0-1.0 | Python | |
3f1b477c7e51fdafa44f090104fbf0f2ae10ccdf | Create Train.py | LauritsSkov/Introgression-detection | Train.py | Train.py | from templates import *
# Parameters (path to observations file, output file, model, weights file)
_, infile, outprefix, model, weights_file = sys.argv
# Load data
state_names, transitions, emissions, starting_probabilities, weights = make_hmm_from_file(model, weights_file)
obs = read_observations_from_file(infile)
# Train model
epsilon = 0.0001
starting_probabilities, transitions, emissions, old_prob = train_on_obs_pure_baum(starting_probabilities, transitions, emissions, weights, obs)
with open(outprefix + '.log','w') as out:
out.write('name\titeration\tstate\tvalue\tcomment\tmodel\n')
for i in range(1000):
transitions = log_with_inf_array(transitions)
starting_probabilities, transitions, emissions, new_prob = train_on_obs_pure_baum(starting_probabilities, transitions, emissions, weights, obs)
print 'doing iteration {0} with old prob {1} and new prob {2}'.format(i, old_prob, new_prob)
# Report emission values, transition values and likelihood of sequence
out.write('{0}\t{1}\t{2}\t{3}\t{4}\t{5}\n'.format(infile, i, 1,new_prob, 'forward.probability', model))
for state in range(len(state_names)):
out.write('{0}\t{1}\t{2}\t{3}\t{4}\t{5}\n'.format(infile, i, state, emissions[state],'emission.state.{}'.format(state+1), model))
for from_state in range(len(state_names)):
for to_state in range(len(state_names)):
out.write('{0}\t{1}\t{2}\t{3}\t{4}\t{5}\n'.format(infile, i, state, transitions[from_state][to_state],'transition.state.{0}.to.{1}'.format(from_state+1,to_state+1), model))
out.flush()
if new_prob - old_prob < epsilon:
break
old_prob = new_prob
# Write the optimal parameters
with open(outprefix + '.hmm','w') as out:
out.write('# State names (only used for decoding)\n')
out.write("states = [{states}]\n\n".format(states = ','.join(["'{}'".format(x) for x in state_names])))
out.write('# Initialization parameters (prob of staring in states)\n')
out.write("starting_probabilities = {values}\n\n".format(values = [x for x in starting_probabilities]))
out.write('# transition matrix\n')
out.write("transitions = [{values}]\n\n".format(values = ','.join(['[{}]'.format(','.join([str(y) for y in x])) for x in transitions])))
out.write('# emission matrix (poisson parameter)\n')
out.write("emissions = {values}\n".format(values = [x for x in emissions]))
| mit | Python | |
39ceccd8fe3d3a91c2975dac33e3c70dd07ce10e | add example deafult_reprocess_summaries_func | mit-probabilistic-computing-project/crosscat,probcomp/crosscat,fivejjs/crosscat,fivejjs/crosscat,probcomp/crosscat,fivejjs/crosscat,fivejjs/crosscat,probcomp/crosscat,probcomp/crosscat,fivejjs/crosscat,mit-probabilistic-computing-project/crosscat,probcomp/crosscat,probcomp/crosscat,probcomp/crosscat,fivejjs/crosscat,fivejjs/crosscat,probcomp/crosscat,mit-probabilistic-computing-project/crosscat,mit-probabilistic-computing-project/crosscat,mit-probabilistic-computing-project/crosscat,mit-probabilistic-computing-project/crosscat,mit-probabilistic-computing-project/crosscat | crosscat/utils/summary_utils.py | crosscat/utils/summary_utils.py | import numpy
#
import crosscat.utils.convergence_test_utils
def get_logscore(p_State):
return p_State.get_marginal_logp()
def get_num_views(p_State):
return len(p_State.get_X_D())
def get_column_crp_alpha(p_State):
return p_State.get_column_crp_alpha()
def get_ari(p_State):
# requires environment: {view_assignment_truth}
# requires import: {crosscat.utils.convergence_test_utils}
X_L = p_State.get_X_L()
ctu = crosscat.utils.convergence_test_utils
return ctu.get_column_ARI(X_L, view_assignment_truth)
def get_mean_test_ll(p_State):
# requires environment {M_c, T, T_test}
# requires import: {crosscat.utils.convergence_test_utils}
X_L = p_State.get_X_L()
X_D = p_State.get_X_D()
ctu = crosscat.utils.convergence_test_utils
return ctu.calc_mean_test_log_likelihood(M_c, T, X_L, X_D, T_test)
def get_column_partition_assignments(p_State):
return p_State.get_X_L()['column_partition']['assignments']
def column_partition_assignments_to_f_z_statistic(column_partition_assignments):
# FIXME: actually implement this
get_num_views_over_iters = lambda vector: map(len, map(set, vector))
intermediate = map(get_num_views_over_iters,
column_partition_assignments.T)
return numpy.array(intermediate).T
def default_reprocess_summaries_func(summaries_arr_dict):
column_partition_assignments = summaries_arr_dict.pop('column_partition_assignments')
f_z_statistic = column_partition_assignments_to_f_z_statistic(column_partition_assignments)
summaries_arr_dict['f_z_statistic'] = f_z_statistic
#
return summaries_arr_dict
| apache-2.0 | Python | |
866daca6e25e41ef35aee0539e737e33e8275645 | Create photoTweet.py | Semyonic/RaspberryPi-Projects,Semyonic/RaspberryPi-Projects,Semyonic/RaspberryPi-Projects | Twitter/photoTweet.py | Twitter/photoTweet.py | # -*- coding: utf-8 -*-
#!/usr/bin/env python2.7
import tweepy
from subprocess import call
from datetime import datetime
# Date-Time stamps
tm = datetime.now()
now = tm.strftime('%Y%m%d-%H%M%S')
photo_name = now + '.jpg'
cmd = 'sudo raspistill -v -vf -mm spot -q 100 -w 1024 -h 768 -o /home/pi/' + photo_name
call ([cmd], shell=True)
# 0Auth Keys
CONSUMER_KEY = 'yourKey'
CONSUMER_SECRET = 'yourSecret'
ACCESS_KEY = 'yourKey'
ACCESS_SECRET = 'yourSecret'
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
# Send the tweet with photo
photo_path = '/home/pi/' + photo_name
status = 'Photo auto-tweet from RaspberryPi: ' + i.strftime('%Y/%m/%d %H:%M:%S')
api.update_with_media(photo_path, status=status)
| mit | Python | |
a9a4befe88b35a50470c5c48062c8f56c643203d | Create metatest.py | yan123/QABox,yan123/BitBox,yan123/QABox | metatest.py | metatest.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from unittest import TestCase
import itertools
import sys
def mix_params(args_kwargs):
args, kwargs = args_kwargs
args_len = len(args)
for i in itertools.product(*itertools.chain(args, kwargs.values())):
yield tuple(i[:args_len]), dict(zip(kwargs.keys(), i[args_len:]))
def with_combinations(*args, **kwargs):
def hook_args_kwargs(method):
method.metatest_params = (args, kwargs)
return method
return hook_args_kwargs
class MetaTest(type):
def __new__(cls, name, bases, attrs):
for method in attrs.values():
if callable(method) and hasattr(method, 'metatest_params'):
for arg, kw in mix_params(method.metatest_params):
print arg, kw # Closure here!!!!
def test_steps(self, a=arg, k=kw): return method(self, *a, **k)
test_steps.__name__ = 'test_case ' + ', '.join(arg) + ' ' + ', '.join(str(k)+'='+str(v) for k,v in kw.items())
attrs[test_steps.__name__] = test_steps
print (cls, name, bases, attrs)
return super(MetaTest, cls).__new__(cls, name, bases, attrs)
class SuiteOne(TestCase):
__metaclass__ = MetaTest
def setUp(self):
print 'running setup'
@with_combinations(row='123', col='abc')
def one(self, row, col):
self.assertFalse(row)
runTest = lambda *args: True
class T2(TestCase):
def test_case_two(self):
assert True
| bsd-2-clause | Python | |
e351b88ecbc0a75f6945abf5e1f745760b22ea2b | Delete non-primary images | DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative | ynr/apps/people/migrations/0036_delete_non_primary_images.py | ynr/apps/people/migrations/0036_delete_non_primary_images.py | # Generated by Django 3.2.4 on 2022-02-15 10:51
from django.db import migrations
def delete_non_primary_images(apps, schema_editor):
PersonImage = apps.get_model("people", "PersonImage")
PersonImage.objects.filter(is_primary=False).delete()
class Migration(migrations.Migration):
dependencies = [("people", "0035_alter_person_birth_date")]
operations = [
migrations.RunPython(
code=delete_non_primary_images,
reverse_code=migrations.RunPython.noop,
)
]
| agpl-3.0 | Python | |
c0271aa4812ba0c3a77d415aefed4a52b50952c3 | Convert to/from marble strings | dbrattli/RxPY,ReactiveX/RxPY,ReactiveX/RxPY | rx/testing/stringify.py | rx/testing/stringify.py | from six import add_metaclass
from rx import AnonymousObservable, Observable
from rx.concurrency import timeout_scheduler
from rx.internal import ExtensionMethod
from .coldobservable import ColdObservable
from .reactivetest import ReactiveTest
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
@add_metaclass(ExtensionMethod)
class ObservableAggregate(Observable):
"""Uses a meta class to extend Observable with the methods in this class"""
@classmethod
def from_string(cls, string, scheduler=None):
"""Converts a marble diagram string to an observable sequence, using an
optional scheduler to enumerate the events.
Special characters:
- = Timespan of 100 ms
x = on_error()
| = on_completed()
All other characters are treated as an on_next() event at the given
moment they are found on the string.
Examples:
1 - res = rx.Observable.from_string("1-2-3-|")
2 - res = rx.Observable.from_string("1-2-3-x", rx.Scheduler.timeout)
Keyword arguments:
string -- String with marble diagram
scheduler -- [Optional] Scheduler to run the the input sequence on.
Returns the observable sequence whose elements are pulled from the
given marble diagram string."""
scheduler = scheduler or timeout_scheduler
completed = [False]
messages = []
timespan = [0]
def handle_timespan(value):
timespan[0] += 100
def handle_on_next(value):
messages.append(on_next(timespan[0], value))
def handle_on_completed(value):
messages.append(on_completed(timespan[0]))
completed[0] = True
def handle_on_error(value):
messages.append(on_error(timespan[0], value))
completed[0] = True
specials = {
'-' : handle_timespan,
'x' : handle_on_error,
'|' : handle_on_completed
}
for char in string:
func = specials.get(char, handle_on_next)
func(char)
if not completed[0]:
messages.append(on_completed(timespan[0]))
return ColdObservable(scheduler, messages)
def to_string(self, scheduler=None):
"""Converts an observable sequence into a marble diagram string
Keyword arguments:
scheduler -- [Optional] The scheduler that was used to run the the input
sequence on.
Returns marble string"""
scheduler = scheduler or timeout_scheduler
source = self
def subscribe(observer):
result = []
previously = [scheduler.now()]
def add_timespan():
now = scheduler.now()
diff = now - previously[0]
previously[0] = now
msecs = scheduler.to_relative(diff)
dashes = "-" * int((msecs+50)/100)
result.append(dashes)
def on_next(value):
add_timespan()
result.append(value)
def on_error(exception):
add_timespan()
result.append(exception)
observer.on_next("".join(str(n) for n in result))
def on_completed():
add_timespan()
result.append("|")
observer.on_next("".join(str(n) for n in result))
return source.subscribe(on_next, observer.on_error, on_completed)
return AnonymousObservable(subscribe)
| mit | Python | |
5607b6fe168f4142dc2c1f343e782f232206ddee | Create apps.py | etianen/django-reversion,etianen/django-reversion | reversion/apps.py | reversion/apps.py | from django.apps import AppConfig
class ReversionConfig(AppConfig):
name = 'reversion'
default_auto_field = 'django.db.models.BigAutoField'
| bsd-3-clause | Python | |
905aba6e79e5ec0d2087b30b60002f475234cfe4 | Make `sat.ext` a package | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/ext/__init__.py | salt/ext/__init__.py | # coding: utf-8 -*-
| apache-2.0 | Python | |
8c4e58fac4d1d020ac2da38441067959100690a5 | Add expectation that all Python code is correct | yunity/yunity-core,yunity/foodsaving-backend,yunity/foodsaving-backend,yunity/foodsaving-backend,yunity/yunity-core | yunity/tests/integration/test_python.py | yunity/tests/integration/test_python.py | from importlib import import_module, reload
from os.path import join as join_path, dirname
from os import walk
from sys import modules
from yunity.utils.tests.abc import BaseTestCase
import yunity
def _path_to_module(path, root_module_path, pysuffix='.py'):
path = path[len(dirname(root_module_path)) + 1:-len(pysuffix)]
path = path.replace('/', '.')
return path
def iter_modules(root_module_path, excludes=None, pysuffix='.py'):
def is_module(_):
return _.endswith(pysuffix) and not _.startswith('__init__')
def is_blacklisted(_):
return excludes and any(_.startswith(exclude) for exclude in excludes)
for root, _, leaves in walk(root_module_path):
for leaf in filter(is_module, leaves):
module = _path_to_module(join_path(root, leaf), root_module_path)
if not is_blacklisted(module):
yield module
def import_or_reload(resource):
module = modules.get(resource)
if module:
return reload(module)
else:
return import_module(resource)
class PytonIsValidTestCase(BaseTestCase):
def test_all_modules_import_cleanly(self):
self.given_data(root_module_path=yunity.__path__[0])
self.given_data(excludes={
'yunity.resources', # intgration test data files have side-effects
'yunity.tests.integration.test_integration', # integration test runner has side-effects
'yunity.management.commands.create_sample_data', # sample data command has side-effects
})
self.when_importing_modules()
self.then_all_modules_import_cleanly()
def when_importing_modules(self):
self.exception = []
for module in iter_modules(*self.args, **self.kwargs):
try:
import_or_reload(module)
except Exception as e:
self.exception.append((module, e))
def then_all_modules_import_cleanly(self):
for module, exception in self.exception:
self.fail('{} did not import cleanly: {}'.format(module, exception.args[0]))
| agpl-3.0 | Python | |
a7cc097497164b36513874c74828d00fc1d42b0b | Add stack example | b-ritter/python-notes,b-ritter/python-notes | data-structures/stacks/stack.py | data-structures/stacks/stack.py | ex_strs = [
"(())",
"([[]])",
"(]",
"(((())))[",
"()"
]
def check_balanced(s):
stack = []
for char in s:
if char in ['(', '[']:
stack.append(char)
else:
if len(stack) == 0:
return False
top = stack.pop()
if (top == '[' and char != ']') or (top == '(' and char != ')'):
return False
return not bool(len(stack))
for s in ex_strs:
print(check_balanced(s)) | mit | Python | |
e349f976de289219978afa807e2f177e404a1182 | add mdbsi distributions | dit/dit,Autoplectic/dit,dit/dit,dit/dit,dit/dit,Autoplectic/dit,Autoplectic/dit,Autoplectic/dit,dit/dit,Autoplectic/dit | dit/example_dists/mdbsi.py | dit/example_dists/mdbsi.py | """
The two distributions studied in Multivariate Dependencies Beyond Shannon Information.
"""
from ..distconst import uniform
__all__ = ['dyadic', 'triadic']
dyadic = uniform(['000', '021', '102', '123', '210', '231', '312', '333'])
dyadic.set_rv_names('XYZ')
triadic = uniform(['000', '111', '022', '133', '202', '313', '220', '331'])
triadic.set_rv_names('XYZ')
| bsd-3-clause | Python | |
95d23d7c68ff03acc7d854cd91fa3eaabfaf1a4f | add a unit tests | tcc-unb-fga/debile,mdimjasevic/debile,lucaskanashiro/debile,opencollab/debile,lucaskanashiro/debile,mdimjasevic/debile,opencollab/debile,tcc-unb-fga/debile | tests/test_utils.py | tests/test_utils.py | from debile.utils.commands import run_command
from debile.utils.commands import safe_run
from debile.utils.deb822 import Changes
def test_run_command():
run_command("ls")
run_command("cat","foo")
(output, output_stderr, exit_status) = run_command("ls2")
assert exit_status != 0
def test_safe_run():
safe_run("ls",expected=0)
safe_run("ls",expected=0)
safe_run("cat","foo")
(output, output_stderr, exit_status) = safe_run("ls2",expected=-1)
assert exit_status != 0
def test_deb822():
files = Changes(open("tests/samples/morse-simulator_1.2.1-2_amd64.changes", "r")).get("Files", [])
| mit | Python | |
50803718c2629c53503ca6831dc97e2b263fe526 | add login script | parrt/msan692,parrt/msan692,parrt/msan692 | notes/code/selenium/login.py | notes/code/selenium/login.py | from Tkinter import *
master = Tk()
Label(master, text="Username").grid(row=0)
Label(master, text="Password").grid(row=1)
user = Entry(master)
password = Entry(master, show="*")
user.grid(row=0, column=1)
password.grid(row=1, column=1)
def login():
print "hi"
master.quit()
Button(master, text='Login', command=login).grid(row=3, column=0, sticky=W, pady=4)
mainloop()
print user.get(), password.get() | mit | Python | |
87fdc8ab59baa989d57c482085d67fb139573313 | Test - Trigger AttributeError with an get_name call in daemonlinks | titilambert/alignak,Alignak-monitoring/alignak,gst/alignak,titilambert/alignak,Alignak-monitoring/alignak,gst/alignak,gst/alignak,titilambert/alignak,titilambert/alignak,gst/alignak | test/test_get_name.py | test/test_get_name.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
import unittest2 as unittest
from alignak.objects.arbiterlink import ArbiterLink
from alignak.objects.schedulerlink import SchedulerLink
from alignak.objects.brokerlink import BrokerLink
from alignak.objects.reactionnerlink import ReactionnerLink
from alignak.objects.receiverlink import ReceiverLink
from alignak.objects.pollerlink import PollerLink
class template_DaemonLink_get_name():
def get_link(self):
cls = self.daemon_link
return cls({})
def test_get_name(self):
link = self.get_link()
try:
self.assertEquals("Unnamed {0}".format(self.daemon_link.my_type), link.get_name())
except AttributeError:
self.assertTrue(False, "get_name should not raise AttributeError")
class Test_ArbiterLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = ArbiterLink
class Test_SchedulerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = SchedulerLink
class Test_BrokerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = BrokerLink
class Test_ReactionnerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = ReactionnerLink
class Test_ReceiverLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = ReceiverLink
class Test_PollerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = PollerLink
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | Python | |
5fd7164238281c23ea89999687bde7b6b2d179ce | Create bisect_search.py | Navuchodonosor/octo-turtle,Navuchodonosor/octo-turtle,Navuchodonosor/octo-turtle | bisect_search.py | bisect_search.py | x=int(raw_input('Enter your integer number(!not negative!):'))
epsilon=0.01
low=0.0
high=x
ans=(high+low)/2.0
while abs(ans**2-x)>=epsilon:
if ans**2 < x:
low=ans
else:
high=ans
ans=(high+low)/2.0
print (str(ans) + ' is close to square root of ' + str(x))
| mit | Python | |
0e642c373a67997041664e51d0c867b1af4583fb | add missing file from last commit | blablacar/exabgp,earies/exabgp,benagricola/exabgp,earies/exabgp,blablacar/exabgp,fugitifduck/exabgp,fugitifduck/exabgp,dneiter/exabgp,blablacar/exabgp,PowerDNS/exabgp,fugitifduck/exabgp,dneiter/exabgp,chrisy/exabgp,earies/exabgp,chrisy/exabgp,lochiiconnectivity/exabgp,PowerDNS/exabgp,dneiter/exabgp,benagricola/exabgp,benagricola/exabgp,lochiiconnectivity/exabgp,chrisy/exabgp,PowerDNS/exabgp,lochiiconnectivity/exabgp | dev/apitest/operational-send.py | dev/apitest/operational-send.py | #!/usr/bin/env python
import os
import sys
import time
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 1
while True:
try:
time.sleep(1)
if counter % 2:
print 'operational adm "this is dynamic message #%d"' % counter
sys.stdout.flush()
print >> sys.stderr, 'operational adm "this is dynamic message #%d"' % counter
sys.stderr.flush()
else:
print 'operational asm "we SHOULD not send asm from the API"'
sys.stdout.flush()
print >> sys.stderr, 'operational asm "we SHOULD not send asm from the API"'
sys.stderr.flush()
counter += 1
except KeyboardInterrupt:
pass
except IOError:
break
| bsd-3-clause | Python | |
51947adcc02d6a5ae20494f577e6402f2f263fcf | Add the arrayfns compatibility library -- not finished. | teoliphant/numpy-refactor,Ademan/NumPy-GSoC,chadnetzer/numpy-gaurdro,Ademan/NumPy-GSoC,efiring/numpy-work,chadnetzer/numpy-gaurdro,Ademan/NumPy-GSoC,jasonmccampbell/numpy-refactor-sprint,Ademan/NumPy-GSoC,teoliphant/numpy-refactor,illume/numpy3k,illume/numpy3k,efiring/numpy-work,jasonmccampbell/numpy-refactor-sprint,chadnetzer/numpy-gaurdro,illume/numpy3k,teoliphant/numpy-refactor,teoliphant/numpy-refactor,efiring/numpy-work,illume/numpy3k,teoliphant/numpy-refactor,chadnetzer/numpy-gaurdro,efiring/numpy-work,jasonmccampbell/numpy-refactor-sprint,jasonmccampbell/numpy-refactor-sprint | numpy/oldnumeric/arrayfns.py | numpy/oldnumeric/arrayfns.py | """Backward compatible with arrayfns from Numeric
"""
__all__ = ['array_set', 'construct3', 'digitize', 'error', 'find_mask', 'histogram', 'index_sort',
'interp', 'nz', 'reverse', 'span', 'to_corners', 'zmin_zmax']
import numpy as nx
from numpy import asarray
class error(Exception):
pass
def array_set(vals1, indices, vals2):
indices = asarray(indices)
if indices.ndim != 1:
raise ValueError, "index array must be 1-d"
if not isinstance(vals1, ndarray):
raise TypeError, "vals1 must be an ndarray"
vals1 = asarray(vals1)
vals2 = asarray(vals2)
if vals1.ndim != vals2.ndim or vals1.ndim < 1:
raise error, "vals1 and vals2 must have same number of dimensions (>=1)"
vals1[indices] = vals2
def construct3(mask, itype):
raise NotImplementedError
from numpy import digitize
def find_mask(fs, node_edges):
raise NotImplementedError
def histogram(lst, weight=None):
raise NotImplementedError
def index_sort(arr):
return asarray(arr).argsort(kind='heap')
def interp(y, x, z, typ=None):
"""y(z) interpolated by treating y(x) as piecewise function
"""
res = numpy.interp(z, x, y)
if typ is None or typ == 'd':
return res
if typ == 'f':
return res.astype('f')
raise error, "incompatible typecode"
def nz(x):
x = asarray(x,dtype=nx.ubyte)
if x.ndim != 1:
raise TypeError, "intput must have 1 dimension."
indxs = nx.flatnonzero(x != 0)
return indxs[-1].item()+1
def reverse(x, n):
x = asarray(x,dtype='d')
if x.ndim != 2:
raise ValueError, "input must be 2-d"
y = nx.empty_like(x)
if n == 0:
y[...] = x[::-1,:]
elif n == 1:
y[...] = x[:,::-1]
return y
def span(lo, hi, num, d2=0):
x = linspace(lo, hi, num)
if d2 <= 0
return x
else:
ret = empty((d2,num),x.dtype)
ret[...] = x
return ret
def to_corners(arr, nv, nvsum):
raise NotImplementedError
def zmin_zmax(z, ireg):
raise NotImplementedError
| bsd-3-clause | Python | |
f52e033c433cba6de6611b123b1c075f34fe96bf | add tools/simple_gen.py | buganini/bsdconv,buganini/bsdconv,buganini/bsdconv,buganini/bsdconv | tools/simple_gen.py | tools/simple_gen.py | # simple_gen.py from_column to_column file
import sys
import re
def bsdconv01(dt):
dt=dt.strip().lstrip("0").upper()
if len(dt) & 1:
return "010"+dt
else:
return "01"+dt
stp = re.compile(r"^(U\+|0X)")
sep = re.compile(r"\s+")
vld = re.compile(r"^[a-fA-F0-9,]+$")
from_column = int(sys.argv[1])
to_column = int(sys.argv[2])
f=open(sys.argv[3])
for l in f:
l = l.strip().upper()
if l == "":
continue
if l.startswith("#"):
continue
a = sep.split(l)
fr = stp.sub("", a[from_column])
to = stp.sub("", a[to_column])
if not vld.match(fr):
continue
if not vld.match(to):
continue
print("%s\t%s" % (fr, bsdconv01(to)))
| bsd-2-clause | Python | |
003378314b8e11f6b67d155348708964ec184292 | add XML validation utility | benhowell/pycsw,ingenieroariel/pycsw,tomkralidis/pycsw,ricardogsilva/pycsw,kalxas/pycsw,geopython/pycsw,kevinpdavies/pycsw,mwengren/pycsw,ocefpaf/pycsw,PublicaMundi/pycsw,geopython/pycsw,ckan-fcd/pycsw-fcd,kevinpdavies/pycsw,ckan-fcd/pycsw-fcd,tomkralidis/pycsw,ocefpaf/pycsw,bukun/pycsw,ingenieroariel/pycsw,ricardogsilva/pycsw,ricardogsilva/pycsw,tomkralidis/pycsw,bukun/pycsw,PublicaMundi/pycsw,geopython/pycsw,rouault/pycsw,kalxas/pycsw,benhowell/pycsw,rouault/pycsw,mwengren/pycsw,bukun/pycsw,kalxas/pycsw | sbin/validate_xml.py | sbin/validate_xml.py | #!/usr/bin/python
# -*- coding: ISO-8859-15 -*-
# =================================================================
#
# $Id$
#
# Authors: Angelos Tzotsos <tzotsos@gmail.com>
#
# Copyright (c) 2011 Angelos Tzotsos
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import os
import sys
from lxml import etree
if len(sys.argv) < 3:
print 'Usage: %s <xml> <xsd>' % sys.argv[0]
sys.exit(1)
print 'Validating %s against schema %s' % (sys.argv[1], sys.argv[2])
schema = etree.XMLSchema(etree.parse(sys.argv[2]))
parser = etree.XMLParser(schema=schema)
try:
valid = etree.parse(sys.argv[1], parser)
print 'Valid XML document'
except Exception, err:
print 'ERROR: %s' % str(err)
| mit | Python | |
1aaf19e369033c9baf198b0ab1ee43067dd2669d | Add Base Bot | fisadev/choppycamp | bots/base_bot.py | bots/base_bot.py | import choppycamp.constants as constants
class BaseBot(object):
def __init__(self, name, map_, enemy):
self.name = name
self.map = map_
self.enemy = enemy
def act(self, map_):
return constants.DANCE
| mit | Python | |
fcf567669ac3cee053928ef7b2da0eead2e017a1 | Add initial tests for the helpers module. | wulczer/flvlib | test/test_helpers.py | test/test_helpers.py | import unittest
import datetime
from flvlib import helpers
class TestFixedOffsetTimezone(unittest.TestCase):
def test_utcoffset(self):
fo = helpers.FixedOffset(30, "Fixed")
self.assertEquals(fo.utcoffset(True), datetime.timedelta(minutes=30))
self.assertEquals(fo.utcoffset(False), datetime.timedelta(minutes=30))
fo = helpers.FixedOffset(-15, "Fixed")
self.assertEquals(fo.utcoffset(True), datetime.timedelta(minutes=-15))
fo = helpers.FixedOffset(0, "Fixed")
self.assertEquals(fo.utcoffset(True), datetime.timedelta(minutes=0))
def test_tzname(self):
fo = helpers.FixedOffset(15, "Fixed")
self.assertEquals(fo.tzname(True), "Fixed")
self.assertEquals(fo.tzname(False), "Fixed")
def test_dst(self):
fo = helpers.FixedOffset(15, "Fixed")
self.assertEquals(fo.dst(False), datetime.timedelta(0))
self.assertEquals(fo.dst(True), datetime.timedelta(0))
def test_repr(self):
fo = helpers.FixedOffset(15, "Fixed")
self.assertEquals(repr(fo),
"<FixedOffset %s>" % datetime.timedelta(minutes=15))
class TestOrderedAttrDict(unittest.TestCase):
def test_creating(self):
o1 = helpers.OrderedAttrDict()
o2 = helpers.OrderedAttrDict(dict(a=1, b='c'))
o3 = helpers.OrderedAttrDict(a=1, b='c')
self.assertNotEquals(o1, o2)
self.assertNotEquals(o1, o3)
self.assertEquals(o2, o3)
def test_mapping(self):
o = helpers.OrderedAttrDict({'a': 1, 'b': 'c'})
self.assertEquals(o['a'], 1)
self.assertEquals(o['b'], 'c')
self.assertRaises(KeyError, o.__getitem__, 'c')
self.assertTrue('a' in o)
self.assertFalse('c' in o)
o['c'] = 1.5
self.assertTrue('c' in o)
self.assertTrue(o.has_key('c'))
self.assertEquals(o['c'], 1.5)
o['a'] = 2
self.assertEquals(o['a'], 2)
del o['c']
self.assertTrue('c' not in o)
self.assertFalse(o.has_key('c'))
self.assertEquals(o.get('a', None), 2)
self.assertEquals(o.get('c', None), None)
self.assertEquals(o.items(), [('a', 2), ('b', 'c')])
i = o.iteritems()
self.assertTrue(iter(i), i)
self.assertEquals(list(i), [('a', 2), ('b', 'c')])
self.assertEquals(o.keys(), ['a', 'b'])
i = o.iterkeys()
self.assertTrue(iter(i), i)
self.assertEquals(list(i), ['a', 'b'])
self.assertEquals(o.pop('b'), 'c')
self.assertFalse('b' in o)
self.assertEquals(o.pop('c', None), None)
self.assertRaises(KeyError, o.pop, 'c')
self.assertEquals(o.values(), [2])
self.assertEquals(o.setdefault('a', 1), 2)
self.assertEquals(o.setdefault('b', 'c'), 'c')
self.assertEquals(o['b'], 'c')
self.assertTrue(bool(o))
del o['b']
self.assertEquals(o.popitem(), ('a', 2))
self.assertRaises(KeyError, o.popitem)
self.assertFalse(bool(o))
| mit | Python | |
d0f92caf504e78a3fd7257ac9fab1fbd9c039212 | Add simple tests for DAP validator wrappers. | enthought/distarray,RaoUmer/distarray,enthought/distarray,RaoUmer/distarray | distarray/tests/test_testing.py | distarray/tests/test_testing.py | # encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
import unittest
from distarray import testing
class TestRaiseTypeError(unittest.TestCase):
def test_good_dim_dict(self):
dim_dict = {}
success, msg = testing.validate_dim_dict(3, dim_dict)
self.assertTrue(success)
def test_good_bad_dim_dict(self):
dim_dict = {'dist_type': 'b'}
with self.assertRaises(TypeError):
testing.validate_dim_dict(3, dim_dict)
def test_good_dim_data(self):
dim_data = ({}, {}, {})
success, msg = testing.validate_dim_data(dim_data)
self.assertTrue(success)
def test_good_bad_dim_data(self):
dim_data = ({'dist_type': 'b'}, {}, {})
with self.assertRaises(TypeError):
testing.validate_dim_data(dim_data)
def test_good_distbuffer(self):
dim_data = ({},)
distbuffer = dict(__version__='0.10.0',
buffer=bytearray([1,2,3,4]),
dim_data=dim_data)
success, msg = testing.validate_distbuffer(distbuffer)
self.assertTrue(success)
def test_bad_distbuffer(self):
dim_data = ({},)
distbuffer = dict(__venison__='0.10.0',
biffer=bytearray([1,2,3,4]),
dim_doodle=dim_data)
with self.assertRaises(TypeError):
testing.validate_distbuffer(distbuffer)
| bsd-3-clause | Python | |
c977c382d49403f7856f1d3cb8398412a8212034 | Add script to update bio processes | clulab/bioresources | scripts/update_go.py | scripts/update_go.py | """This script updates bio_processes.tsv based on names and synonyms from GO.
It also incorporates old, presumably manually collected GO and MeSH entries
some of which provide synonyms that the official GO download doesn't.
This script therefore adds these, as long as they are not redundant.
"""
import os
import re
import csv
import obonet
from collections import defaultdict
def get_synonyms(syns_entry):
"""Return synonyms for a given entry."""
synonyms = []
for synonym in syns_entry:
match = re.match(r'^\"(.+)\" (EXACT|RELATED|NARROW|BROAD)',
synonym)
syn, status = match.groups()
if status in allowed_synonyms:
synonyms.append(syn)
return synonyms
def length_filter(txt):
# We filter out single-character names and names that are very long
# and so are unlikely to be ever found in text
return 2 <= len(txt) <= 50
def read_manual_entries():
"""Load the old manual entries."""
fname = os.path.join(kb_dir, 'bio_process_manual.tsv')
with open(fname, 'r') as fh:
reader = csv.reader(fh, delimiter='\t')
return [row for row in reader]
def get_entries_by_id(entries):
entries_by_id = defaultdict(set)
for txt, id, _, ns in entries:
entries_by_id[id].add((txt, ns))
return entries_by_id
if __name__ == '__main__':
# Basic positioning
here = os.path.dirname(os.path.abspath(__file__))
kb_dir = os.path.join(here, os.pardir, 'src', 'main', 'resources', 'org',
'clulab', 'reach', 'kb')
resource_fname = os.path.join(kb_dir, 'bio_process.tsv')
# Download GO resource file
url = 'http://purl.obolibrary.org/obo/go.obo'
g = obonet.read_obo(url)
allowed_synonyms = {'EXACT', 'RELATED'}
entries = []
for node, data in g.nodes(data=True):
name = data['name']
if data['namespace'] != 'biological_process':
continue
synonyms = get_synonyms(data.get('synonym', []))
entries += [(txt, node, '', 'go') for txt in ([name] + synonyms)
if length_filter(name, txt)]
# Here we sort out redundancies between old and new entries and add old
# ones only if they are not redundant
new_entries_by_id = get_entries_by_id(entries)
manual_entries = read_manual_entries()
manual_entries_by_id = get_entries_by_id(manual_entries)
for id, txt_ns in manual_entries_by_id.items():
for txt, ns in txt_ns:
# Make sure we don't already have that synonym
if ns == 'go' and (txt not in new_entries_by_id[id]):
print('Adding %s: %s' % (txt, id))
entries.append((txt, id, '', ns))
# Make sure the same synonym isn't in GO if this is from MeSH
elif ns == 'mesh' and (txt not in {e[0] for e in entries}):
print('Adding %s: %s' % (txt, id))
entries.append((txt, id, '', ns))
# We sort the entries first by the synonym but in a way that special
# characters and capitalization is ignored, then sort by ID
entries = sorted(set(entries), key=(lambda x:
(re.sub('[^A-Za-z0-9]', '', x[0]).lower(),
x[1])))
# Now dump the entries into an updated TSV file
with open(resource_fname, 'w') as fh:
writer = csv.writer(fh, delimiter='\t')
for entry in entries:
writer.writerow(entry)
| apache-2.0 | Python | |
987f8325b32c468ab2aa134bed35314e0170def1 | implement most of repl module | sammdot/circa | modules/repl.py | modules/repl.py | import code
import sys
from util.nick import nickeq, nicklower
class Repl(code.InteractiveConsole):
def __init__(self, circa, channel):
code.InteractiveConsole.__init__(self, {"circa": circa})
self.circa = circa
self.channel = channel
self.buf = ""
def write(self, data):
self.buf += data
def flush(self):
msg = self.buf.rstrip("\n")
if len(msg) > 0:
self.circa.say(self.channel, msg)
self.buf = ""
def run(self, code):
sys.stdout = sys.interp = self
self.push(code)
sys.stdout = sys.__stdout__
self.flush()
class ReplModule:
def __init__(self, circa):
import logging
logging.info("Loading repl")
self.circa = circa
self.repls = {}
def onload(self):
self.circa.add_listener("message", self.handle_repl)
def onunload(self):
self.circa.remove_listener("message", self.handle_repl)
def handle_repl(self, fr, to, text):
if text.startswith(">>> "):
self.repl(nicklower(fr), nicklower(fr) if nickeq(to, self.circa.nick) \
else nicklower(to), text[len(">>> "):])
def repl(self, fr, to, command):
if self.circa.is_admin(fr):
if to not in self.repls:
self.repls[to] = Repl(self.circa, to)
self.repls[to].run(command)
module = ReplModule
| bsd-3-clause | Python | |
89d25a460fd805a53bdfbced459c78a24f3b7da0 | Add yamtbx.ipython command | keitaroyam/yamtbx,keitaroyam/yamtbx,keitaroyam/yamtbx,keitaroyam/yamtbx | yamtbx/command_line/yamtbx_ipython.py | yamtbx/command_line/yamtbx_ipython.py | # LIBTBX_SET_DISPATCHER_NAME yamtbx.ipython
import sys
from IPython import start_ipython
if __name__ == '__main__':
sys.exit(start_ipython())
| bsd-3-clause | Python | |
3f7e08443e6c5a00b9df9831fad0a13f7c516dd0 | add simple test for login | opmuse/opmuse,opmuse/opmuse,opmuse/opmuse,opmuse/opmuse | opmuse/test/test_security.py | opmuse/test/test_security.py | from . import setup_db, teardown_db
from nose.tools import with_setup
from opmuse.security import User, hash_password
@with_setup(setup_db, teardown_db)
class TestSecurity:
def test_login(self):
user = self.session.query(User).filter_by(login="admin").one()
hashed = hash_password("admin", user.salt)
assert hashed == user.password
hashed = hash_password("wrong", user.salt)
assert hashed != user.password
| agpl-3.0 | Python | |
6dfa189bdab536ecfa2c14e4893017363923ee6a | Implement Naive Bayes Classifier builder method | ah450/ObjectRecognizer | bayes.py | bayes.py | import numpy as np
import cv2
# pos and neg are positive and negative instances
# each is a list of files of nparray dumps,
# nparray of BoW histograms; shape = (n, 101)
# of the class to be trained for
def build_trained_classifier(pos_files, neg_files):
total = len(pos_files) + len(neg_files)
samples = np.empty((total, 101), np.float32)
i = 0
for pos_file in pos_files:
samples[i] = np.load(pos_file)
i = i + 1
for neg_file in neg_files:
samples[i] = np.load(neg_file)
i = i + 1
labels = np.empty((total, 1), np.float32)
labels[0:len(pos_files), 0] = 1.0
labels[len(pos_files):, 0] = 0.0
return cv2.NormalBayesClassifier(samples, labels)
| mit | Python | |
e90fb0e3ca17f15a5058a3f1d1e08be376b1863b | Add Asset Instance model | cgwire/zou | zou/app/models/asset_instance.py | zou/app/models/asset_instance.py | from sqlalchemy_utils import UUIDType
from zou.app import db
from zou.app.models.serializer import SerializerMixin
from zou.app.models.base import BaseMixin
class AssetInstance(db.Model, BaseMixin, SerializerMixin):
asset_id = db.Column(UUIDType(binary=False), db.ForeignKey('entity.id'))
shot_id = db.Column(UUIDType(binary=False), db.ForeignKey('entity.id'))
number = db.Column(db.Integer())
description = db.Column(db.String(200))
__table_args__ = (
db.UniqueConstraint(
'asset_id',
'shot_id',
'number',
name='asset_instance_uc'
),
)
| agpl-3.0 | Python | |
6b215d2dd3c6915b4d1c5a46b2a890b14cae7d75 | Add test for Issue #1537 | aikramer2/spaCy,recognai/spaCy,spacy-io/spaCy,spacy-io/spaCy,aikramer2/spaCy,aikramer2/spaCy,recognai/spaCy,recognai/spaCy,honnibal/spaCy,explosion/spaCy,explosion/spaCy,aikramer2/spaCy,recognai/spaCy,spacy-io/spaCy,honnibal/spaCy,recognai/spaCy,honnibal/spaCy,explosion/spaCy,spacy-io/spaCy,spacy-io/spaCy,honnibal/spaCy,recognai/spaCy,explosion/spaCy,spacy-io/spaCy,aikramer2/spaCy,aikramer2/spaCy,explosion/spaCy,explosion/spaCy | spacy/tests/regression/test_issue1537.py | spacy/tests/regression/test_issue1537.py | '''Test Span.as_doc() doesn't segfault'''
from ...tokens import Doc
from ...vocab import Vocab
from ... import load as load_spacy
def test_issue1537():
string = 'The sky is blue . The man is pink . The dog is purple .'
doc = Doc(Vocab(), words=string.split())
doc[0].sent_start = True
for word in doc[1:]:
if word.nbor(-1).text == '.':
word.sent_start = True
else:
word.sent_start = False
sents = list(doc.sents)
sent0 = sents[0].as_doc()
sent1 = sents[1].as_doc()
assert isinstance(sent0, Doc)
assert isinstance(sent1, Doc)
# Currently segfaulting, due to l_edge and r_edge misalignment
#def test_issue1537_model():
# nlp = load_spacy('en')
# doc = nlp(u'The sky is blue. The man is pink. The dog is purple.')
# sents = [s.as_doc() for s in doc.sents]
# print(list(sents[0].noun_chunks))
# print(list(sents[1].noun_chunks))
| mit | Python | |
358ea3780ea5cbb165357f8089c4aa23aab7de73 | test route53. | jonhadfield/acli,jonhadfield/acli | tests/test_route53.py | tests/test_route53.py | from __future__ import (absolute_import, print_function, unicode_literals)
from acli.output.route53 import (output_route53_list, output_route53_info)
from acli.services.route53 import (route53_list, route53_info)
from acli.config import Config
from moto import mock_route53
import pytest
from boto3.session import Session
session = Session(region_name="eu-west-1")
@pytest.yield_fixture(scope='function')
def route53_zone():
"""ELB mock service"""
mock = mock_route53()
mock.start()
client = session.client('route53')
client.create_hosted_zone(Name="testdns.aws.com", CallerReference='auniqueref', HostedZoneConfig={'Comment': 'string', 'PrivateZone': False})
yield client.list_hosted_zones()
mock.stop()
config = Config(cli_args={'--region': 'eu-west-1',
'--access_key_id': 'AKIAIOSFODNN7EXAMPLE',
'--secret_access_key': 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY'})
def test_elb_list_service(route53_zone):
with pytest.raises(SystemExit):
assert route53_list(aws_config=config)
| mit | Python | |
b6b239cf16849890434a31755f3f3d2b8e510a95 | add benchmark comparing to SQLAlchemy, only 12x faster for simple select :) | mahmoud/_norm,mahmoud/_norm | benchmark.py | benchmark.py | import time
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey
from sqlalchemy.sql import select
from norm import SELECT
metadata = MetaData()
users = Table(
'users', metadata,
Column('id', Integer, primary_key=True),
Column('name', String),
Column('fullname', String))
addresses = Table(
'addresses', metadata,
Column('id', Integer, primary_key=True),
Column('user_id', None, ForeignKey('users.id')),
Column('email_address', String, nullable=False))
def sqlalchemy_bench():
s = select([users, addresses], users.c.id == addresses.c.user_id)
return str(s)
def norm_bench():
s = (SELECT('users.id',
'users.name',
'users.fullname',
'addresses.id',
'addresses.user_id',
'addresses.email_address')
.FROM('users')
.JOIN('addresses', ON='users.id = addresses.user_id'))
return s.query
def time_it(f):
start = time.time()
for x in xrange(50000):
f()
return time.time() - start
def run_benchmark():
print 'SQLAlchemy', time_it(sqlalchemy_bench)
print 'Norm', time_it(norm_bench)
if __name__ == '__main__':
run_benchmark()
| bsd-3-clause | Python | |
28801ac8f38e8560c17b2da57559762ecd5e06ca | copy babel.plural doctests as unit tests | jmagnusson/babel,lepistone/babel,iamshubh22/babel,mitsuhiko/babel,gutsy/babel,masklinn/babel,hanteng/babel,iamshubh22/babel,mbirtwell/babel,mitsuhiko/babel,nickretallack/babel,srisankethu/babel,jespino/babel,javacruft/babel,python-babel/babel,xlevus/babel,srisankethu/babel,prmtl/babel,xlevus/babel,yoloseem/babel,felixonmars/babel,st4lk/babel,python-babel/babel,iamshubh22/babel,upman/babel,moreati/babel,javacruft/babel,mbirtwell/babel,julen/babel,felixonmars/babel,julen/babel,lepistone/babel,skybon/babel,nickretallack/babel,moreati/babel,nandoflorestan/babel,KIT-XXI/babel,gutsy/babel,nickretallack/babel,mbirtwell/babel,upman/babel,mgax/babel,gutsy/babel,hanteng/babel,yoloseem/babel,masklinn/babel,mitsuhiko/babel,upman/babel,julen/babel,javacruft/babel,prmtl/babel,st4lk/babel,felixonmars/babel,KIT-XXI/babel,jespino/babel,prmtl/babel,nandoflorestan/babel,yoloseem/babel,srisankethu/babel,nandoflorestan/babel,mgax/babel,hanteng/babel,KIT-XXI/babel,skybon/babel,st4lk/babel,xlevus/babel,skybon/babel,jmagnusson/babel,jmagnusson/babel,masklinn/babel,lepistone/babel,moreati/babel | tests/test_plural.py | tests/test_plural.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
import doctest
import unittest
from babel import plural
class test_plural_rule():
rule = plural.PluralRule({'one': 'n is 1'})
assert rule(1) == 'one'
assert rule(2) == 'other'
rule = plural.PluralRule({'one': 'n is 1'})
assert rule.rules == {'one': 'n is 1'}
def test_to_javascript():
assert (plural.to_javascript({'one': 'n is 1'})
== "(function(n) { return (n == 1) ? 'one' : 'other'; })")
def test_to_python():
func = plural.to_python({'one': 'n is 1', 'few': 'n in 2..4'})
assert func(1) == 'one'
assert func(3) == 'few'
func = plural.to_python({'one': 'n in 1,11', 'few': 'n in 3..10,13..19'})
assert func(11) == 'one'
assert func(15) == 'few'
def test_to_gettext():
assert (plural.to_gettext({'one': 'n is 1', 'two': 'n is 2'})
== 'nplurals=3; plural=((n == 2) ? 1 : (n == 1) ? 0 : 2)')
def test_in_range_list():
assert plural.in_range_list(1, [(1, 3)])
assert plural.in_range_list(3, [(1, 3)])
assert plural.in_range_list(3, [(1, 3), (5, 8)])
assert not plural.in_range_list(1.2, [(1, 4)])
assert not plural.in_range_list(10, [(1, 4)])
assert not plural.in_range_list(10, [(1, 4), (6, 8)])
def test_within_range_list():
assert plural.within_range_list(1, [(1, 3)])
assert plural.within_range_list(1.0, [(1, 3)])
assert plural.within_range_list(1.2, [(1, 4)])
assert plural.within_range_list(8.8, [(1, 4), (7, 15)])
assert not plural.within_range_list(10, [(1, 4)])
assert not plural.within_range_list(10.5, [(1, 4), (20, 30)])
def test_cldr_modulo():
assert plural.cldr_modulo(-3, 5) == -3
assert plural.cldr_modulo(-3, -5) == -3
assert plural.cldr_modulo(3, 5) == 3
def suite():
suite = unittest.TestSuite()
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| # -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
import doctest
import unittest
from babel import plural
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(plural))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| bsd-3-clause | Python |
e3636bd897fbd7af4aab5b501269a55a89de29a0 | Add public view tests | nickfrostatx/corral,nickfrostatx/corral,nickfrostatx/corral | tests/test_public.py | tests/test_public.py | # -*- coding: utf-8 -*-
"""Test the public routes."""
from corral.app import create_app
def test_root():
app = create_app()
app.config['AUTH_KEY'] = 'secretpassword'
with app.test_client() as c:
rv = c.get('/')
assert b'<h1>Login</h1>' in rv.data
assert rv.status_code == 200
rv = c.get('/', headers={'Cookie': 'key=abc'})
assert b'<h1>Login</h1>' in rv.data
assert rv.status_code == 200
rv = c.get('/', headers={'Cookie': 'key=secretpassword'})
assert b'>Authenticated</h1>' in rv.data
assert rv.status_code == 200
| mit | Python | |
71b6246dda3e4812490a5c2936eac44e063806c0 | Add tests for sonify submodule | faroit/mir_eval,faroit/mir_eval,bmcfee/mir_eval,craffel/mir_eval,bmcfee/mir_eval,rabitt/mir_eval,craffel/mir_eval,rabitt/mir_eval | tests/test_sonify.py | tests/test_sonify.py | """ Unit tests for sonification methods """
import mir_eval
import numpy as np
def test_clicks():
# Test output length for a variety of parameter settings
for times in [np.array([1.]), np.arange(10)*1.]:
for fs in [8000, 44100]:
click_signal = mir_eval.sonify.clicks(times, fs)
assert len(click_signal) == times.max()*fs + int(fs*.1) + 1
click_signal = mir_eval.sonify.clicks(times, fs, length=1000)
assert len(click_signal) == 1000
click_signal = mir_eval.sonify.clicks(
times, fs, click=np.zeros(1000))
assert len(click_signal) == times.max()*fs + 1000 + 1
def test_time_frequency():
# Test length for different inputs
for fs in [8000, 44100]:
signal = mir_eval.sonify.time_frequency(
np.random.standard_normal((100, 1000)), np.arange(1, 101),
np.linspace(0, 10, 1000), fs)
assert len(signal) == 10*fs
signal = mir_eval.sonify.time_frequency(
np.random.standard_normal((100, 1000)), np.arange(1, 101),
np.linspace(0, 10, 1000), fs, length=fs*11)
assert len(signal) == 11*fs
def test_chroma():
for fs in [8000, 44100]:
signal = mir_eval.sonify.chroma(
np.random.standard_normal((12, 1000)),
np.linspace(0, 10, 1000), fs)
assert len(signal) == 10*fs
def test_chords():
for fs in [8000, 44100]:
intervals = np.array([np.arange(10), np.arange(1, 11)]).T
signal = mir_eval.sonify.chords(
['C', 'C:maj', 'D:min7', 'E:min', 'C#', 'C', 'C', 'C', 'C', 'C'],
intervals, fs)
assert len(signal) == 10*fs
| mit | Python | |
894aeb1485614f5676410f99b02ef4cb8e9ef8e3 | create NLP toolset | agbs2k8/toolbelt_dev | toolbelt/nlp_tools.py | toolbelt/nlp_tools.py | # -*- coding: utf-8 -*-
import string
import re
import nltk
from nltk.corpus import stopwords
from .utils import validate_str
my_stopwords = stopwords.words('english')
stemmer = nltk.stem.porter.SnowballStemmer()
def remove_punctuation(text):
"""
Simple function that will take a string or list of strings, and return them in the
same format with all punctuation marks removed
"""
p_translator = str.maketrans('', '', string.punctuation+'’–')
if isinstance(text, str):
return text.translate(p_translator)
elif isinstance(text, list):
return [x.translate(p_translator) if isinstance(x, str) else x for x in text]
else:
return text
def remove_digits(text):
"""
Simple function that will take a string or list of strings, and return them in the
same format with all numerical digits removed
"""
d_translator = str.maketrans('', '', string.digits)
if isinstance(text, str):
return text.translate(d_translator)
elif isinstance(text, list):
return [x.translate(d_translator) if isinstance(x, str) else x for x in text]
else:
return text
@validate_str
def tokenize_and_stem(text):
"""
Given a string, it returns a list of stemmed tokens i.e. the
derivative of each word, as a list of strings
"""
# first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
stems = [stemmer.stem(t) for t in filtered_tokens]
return stems
@validate_str
def tokenize_only(text):
"""
Given a string, it returns a list of tokens i.e. the words, as a list of strings
"""
# first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
return filtered_tokens
@validate_str
def n_grams(text, ngram=2):
"""
Uses NLTK functions to return a dictionary of all n-grams from a set of text
and a count of the iterations of those ngrams
N-gram = a set of words, of n length, that appear in the given text.
The ngram words, as a tuple, are the dict keys, and the values are the number of iterations
of that ngram found in the text
"""
return dict(nltk.FreqDist(nltk.ngrams(nltk.word_tokenize(remove_punctuation(remove_digits(text.lower()))), ngram)))
@validate_str
def sentences(text):
"""
Uses NLTK functions to return a dictionary of all sentences from a text and the
count of iterations of those sentences.
The dict key is the sentence, and the value is the number of iterations of that sentence
"""
return dict(nltk.FreqDist(remove_punctuation(nltk.sent_tokenize(remove_digits(text.lower())))))
| mit | Python | |
0050d1ca6f0fa15462231cf5531d494277c1f8ca | move events from claw to claw-scripts | dankilman/claw-scripts,ChenRoth/claw-scripts | scripts/events.py | scripts/events.py | #! /usr/bin/env claw
########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import sys
import json
from cloudify_cli.execution_events_fetcher import ExecutionEventsFetcher
from claw import cosmo
def script(execution_id,
output=None,
batch_size=1000,
include_logs=False,
timeout=3600):
"""Dump events of an execution in json format."""
fetcher = ExecutionEventsFetcher(execution_id=execution_id,
client=cosmo.client,
batch_size=batch_size,
include_logs=include_logs)
class Handler(object):
def __init__(self):
self.events = []
def handle(self, batch):
self.events += batch
cosmo.logger.debug('Fetched: {0}'.format(len(self.events)))
handler = Handler()
fetcher.fetch_and_process_events(events_handler=handler.handle,
timeout=timeout)
events_json = json.dumps(handler.events)
if not output:
sys.stdout.write(events_json)
else:
with open(output, 'w') as f:
f.write(events_json)
| apache-2.0 | Python | |
c3fd15307b20a891ef194221c51ec1897355fd29 | Use built-in print() instead of print statement | openstack/tempest,citrix-openstack-build/tempest,vedujoshi/tempest,itskewpie/tempest,Tesora/tesora-tempest,izadorozhna/tempest,Vaidyanath/tempest,cisco-openstack/tempest,BeenzSyed/tempest,Juraci/tempest,armando-migliaccio/tempest,eggmaster/tempest,jaspreetw/tempest,redhat-cip/tempest,afaheem88/tempest,manasi24/jiocloud-tempest-qatempest,sebrandon1/tempest,manasi24/jiocloud-tempest-qatempest,neerja28/Tempest,tonyli71/tempest,NexusIS/tempest,neerja28/Tempest,JioCloud/tempest,adkerr/tempest,cisco-openstack/tempest,akash1808/tempest,nunogt/tempest,armando-migliaccio/tempest,Juraci/tempest,CiscoSystems/tempest,vedujoshi/os_tempest,ntymtsiv/tempest,izadorozhna/tempest,openstack/tempest,tudorvio/tempest,jamielennox/tempest,vmahuli/tempest,vmahuli/tempest,zsoltdudas/lis-tempest,ebagdasa/tempest,afaheem88/tempest,alinbalutoiu/tempest,JioCloud/tempest,manasi24/tempest,dkalashnik/tempest,Juniper/tempest,rzarzynski/tempest,FujitsuEnablingSoftwareTechnologyGmbH/tempest,cloudbase/lis-tempest,Lilywei123/tempest,rakeshmi/tempest,rakeshmi/tempest,roopali8/tempest,queria/my-tempest,ntymtsiv/tempest,hpcloud-mon/tempest,danielmellado/tempest,bigswitch/tempest,masayukig/tempest,vedujoshi/tempest,roopali8/tempest,bigswitch/tempest,akash1808/tempest,danielmellado/tempest,flyingfish007/tempest,queria/my-tempest,itskewpie/tempest,hpcloud-mon/tempest,adkerr/tempest,zsoltdudas/lis-tempest,tudorvio/tempest,LIS/lis-tempest,pczerkas/tempest,sebrandon1/tempest,xbezdick/tempest,BeenzSyed/tempest,masayukig/tempest,Juniper/tempest,FujitsuEnablingSoftwareTechnologyGmbH/tempest,dkalashnik/tempest,rzarzynski/tempest,yamt/tempest,eggmaster/tempest,Tesora/tesora-tempest,Mirantis/tempest,pczerkas/tempest,flyingfish007/tempest,vedujoshi/os_tempest,NexusIS/tempest,afaheem88/tempest_neutron,Lilywei123/tempest,alinbalutoiu/tempest,hayderimran7/tempest,citrix-openstack-build/tempest,jaspreetw/tempest,xbezdick/tempest,afaheem88/tempest_neutron,citrix-openstack/build-tempest,nunogt/tempest,yamt/tempest,pandeyop/tempest,varunarya10/tempest,citrix-openstack/build-tempest,redhat-cip/tempest,manasi24/tempest,tonyli71/tempest,LIS/lis-tempest,Vaidyanath/tempest,pandeyop/tempest,jamielennox/tempest,Mirantis/tempest,hayderimran7/tempest,CiscoSystems/tempest,cloudbase/lis-tempest,ebagdasa/tempest,varunarya10/tempest | tools/install_venv.py | tools/install_venv.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# flake8: noqa
# Copyright 2010 OpenStack, LLC
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Installation script for Tempest's development virtualenv."""
import os
import sys
import install_venv_common as install_venv
class CentOS(install_venv.Fedora):
"""This covers CentOS."""
def post_process(self):
if not self.check_pkg('openssl-devel'):
self.yum.install('openssl-devel', check_exit_code=False)
def print_help():
"""This prints Help."""
help = """
Tempest development environment setup is complete.
Tempest development uses virtualenv to track and manage Python dependencies
while in development and testing.
To activate the Tempest virtualenv for the extent of your current shell
session you can run:
$ source .venv/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
print(help)
def main(argv):
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
venv = os.path.join(root, '.venv')
pip_requires = os.path.join(root, 'requirements.txt')
test_requires = os.path.join(root, 'test-requirements.txt')
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
project = 'Tempest'
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
py_version, project)
if os.path.exists('/etc/redhat-release'):
with open('/etc/redhat-release') as rh_release:
if 'CentOS' in rh_release.read():
install_venv.Fedora = CentOS
options = install.parse_args(argv)
install.check_python_version()
install.check_dependencies()
install.create_virtualenv(no_site_packages=options.no_site_packages)
install.install_dependencies()
install.post_process()
print_help()
if __name__ == '__main__':
main(sys.argv)
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# flake8: noqa
# Copyright 2010 OpenStack, LLC
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Installation script for Tempest's development virtualenv."""
import os
import sys
import install_venv_common as install_venv
class CentOS(install_venv.Fedora):
"""This covers CentOS."""
def post_process(self):
if not self.check_pkg('openssl-devel'):
self.yum.install('openssl-devel', check_exit_code=False)
def print_help():
"""This prints Help."""
help = """
Tempest development environment setup is complete.
Tempest development uses virtualenv to track and manage Python dependencies
while in development and testing.
To activate the Tempest virtualenv for the extent of your current shell
session you can run:
$ source .venv/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
print help
def main(argv):
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
venv = os.path.join(root, '.venv')
pip_requires = os.path.join(root, 'requirements.txt')
test_requires = os.path.join(root, 'test-requirements.txt')
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
project = 'Tempest'
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
py_version, project)
if os.path.exists('/etc/redhat-release'):
with open('/etc/redhat-release') as rh_release:
if 'CentOS' in rh_release.read():
install_venv.Fedora = CentOS
options = install.parse_args(argv)
install.check_python_version()
install.check_dependencies()
install.create_virtualenv(no_site_packages=options.no_site_packages)
install.install_dependencies()
install.post_process()
print_help()
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 | Python |
74e3e5a8fdbc5f9a6ee71f2ad1de4fd8a8807b5a | Add migration for default last polled date. | nirmeshk/oh-mainline,eeshangarg/oh-mainline,willingc/oh-mainline,Changaco/oh-mainline,openhatch/oh-mainline,ojengwa/oh-mainline,sudheesh001/oh-mainline,nirmeshk/oh-mainline,vipul-sharma20/oh-mainline,heeraj123/oh-mainline,ojengwa/oh-mainline,nirmeshk/oh-mainline,heeraj123/oh-mainline,eeshangarg/oh-mainline,onceuponatimeforever/oh-mainline,SnappleCap/oh-mainline,willingc/oh-mainline,eeshangarg/oh-mainline,mzdaniel/oh-mainline,ojengwa/oh-mainline,onceuponatimeforever/oh-mainline,vipul-sharma20/oh-mainline,sudheesh001/oh-mainline,vipul-sharma20/oh-mainline,Changaco/oh-mainline,moijes12/oh-mainline,mzdaniel/oh-mainline,onceuponatimeforever/oh-mainline,SnappleCap/oh-mainline,mzdaniel/oh-mainline,ojengwa/oh-mainline,ehashman/oh-mainline,campbe13/openhatch,ehashman/oh-mainline,ojengwa/oh-mainline,eeshangarg/oh-mainline,mzdaniel/oh-mainline,sudheesh001/oh-mainline,waseem18/oh-mainline,heeraj123/oh-mainline,campbe13/openhatch,openhatch/oh-mainline,eeshangarg/oh-mainline,openhatch/oh-mainline,vipul-sharma20/oh-mainline,waseem18/oh-mainline,Changaco/oh-mainline,campbe13/openhatch,openhatch/oh-mainline,vipul-sharma20/oh-mainline,campbe13/openhatch,waseem18/oh-mainline,jledbetter/openhatch,jledbetter/openhatch,SnappleCap/oh-mainline,jledbetter/openhatch,heeraj123/oh-mainline,ehashman/oh-mainline,waseem18/oh-mainline,mzdaniel/oh-mainline,moijes12/oh-mainline,willingc/oh-mainline,jledbetter/openhatch,openhatch/oh-mainline,onceuponatimeforever/oh-mainline,ehashman/oh-mainline,SnappleCap/oh-mainline,waseem18/oh-mainline,mzdaniel/oh-mainline,nirmeshk/oh-mainline,onceuponatimeforever/oh-mainline,moijes12/oh-mainline,willingc/oh-mainline,Changaco/oh-mainline,heeraj123/oh-mainline,Changaco/oh-mainline,ehashman/oh-mainline,campbe13/openhatch,sudheesh001/oh-mainline,SnappleCap/oh-mainline,mzdaniel/oh-mainline,nirmeshk/oh-mainline,jledbetter/openhatch,willingc/oh-mainline,moijes12/oh-mainline,moijes12/oh-mainline,sudheesh001/oh-mainline | mysite/search/migrations/0034_default_last_polled_date.py | mysite/search/migrations/0034_default_last_polled_date.py |
from south.db import db
from django.db import models
from mysite.search.models import *
class Migration:
def forwards(self, orm):
# Changing field 'Bug.last_polled'
# (to signature: django.db.models.fields.DateTimeField(default=datetime.datetime(1970, 1, 1, 0, 0)))
db.alter_column('search_bug', 'last_polled', orm['search.bug:last_polled'])
def backwards(self, orm):
# Changing field 'Bug.last_polled'
# (to signature: django.db.models.fields.DateTimeField())
db.alter_column('search_bug', 'last_polled', orm['search.bug:last_polled'])
models = {
'search.bug': {
'as_appears_in_distribution': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'bize_size_tag_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'canonical_bug_link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'concerns_just_documentation': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'date_reported': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'good_for_newcomers': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}),
'last_touched': ('django.db.models.fields.DateTimeField', [], {}),
'looks_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'people_involved': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'submitter_realname': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'submitter_username': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'search.hitcountcache': {
'hashed_query': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'hit_count': ('django.db.models.fields.IntegerField', [], {})
},
'search.project': {
'cached_contributor_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon_for_profile': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_raw': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'logo_contains_name': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True'})
}
}
complete_apps = ['search']
| agpl-3.0 | Python | |
c76775b244ccd07a73ec0d894f5e940ae673dd73 | implement ordereddict | DragonRoman/ovirt-engine-sdk,DragonRoman/ovirt-engine-sdk,DragonRoman/ovirt-engine-sdk | src/ovirtsdk/utils/ordereddict.py | src/ovirtsdk/utils/ordereddict.py |
from UserDict import UserDict
import thread
class OrderedDict(UserDict):
"""A dictionary preserving insert order"""
def __init__(self, dict=None):
self._keys = []
UserDict.__init__(self, dict)
self.__plock = thread.allocate_lock()
self.__rlock = thread.allocate_lock()
def clear(self):
"""Clears the dictionary"""
with self.__plock:
UserDict.clear(self)
self._keys = []
def copy(self):
"""Copying dictionary"""
dict = UserDict.copy(self)
dict._keys = self._keys[:]
return dict
def popitem(self):
"""Pops last item from the dictionary"""
with self.__plock:
if len(self._keys) == 0:
raise KeyError('Empty')
key = self._keys[-1]
val = self[key]
del self[key]
return (key, val)
def setdefault(self, key, failobj=None):
"""Sets default for dict items"""
with self.__plock:
if key not in self._keys:
self._keys.append(key)
return UserDict.setdefault(self, key, failobj)
def update(self, dict):
"""Updates dictionary with new items"""
with self.__rlock:
UserDict.update(self, dict)
for key in dict.keys():
if key not in self._keys:
self._keys.append(key)
def __delitem__(self, key):
with self.__plock:
UserDict.__delitem__(self, key)
self._keys.remove(key)
def __setitem__(self, key, item):
with self.__plock:
UserDict.__setitem__(self, key, item)
if key not in self._keys:
self._keys.append(key)
def values(self):
"""Returns values in a same order they where inserted"""
with self.__plock:
return map(self.get, self._keys)
def items(self):
"""Returns items in a same order they where inserted"""
with self.__plock:
return map(lambda key: (key, self[key]), self._keys)
def keys(self):
"""Returns keys in a same order they where inserted"""
with self.__plock:
return self._keys[:]
| apache-2.0 | Python | |
1f225b06eed8e7c3266c9d9c48e7b5e86ee677a0 | Test ssh | cindithompson/Clique-Finding-With-Patterns | run_time2.py | run_time2.py | import cProfile
cProfile.run("core_alg.process_from_file('/Users/cat/data/maxmal-cliques/turan30_10')") | mit | Python | |
f46361d1009665e87543b56d69212b04b9b14993 | Add scripts which Define a function to compute color histogram features | aguijarro/SelfDrivingCar | VehicleDetectionTracking/histo_colors.py | VehicleDetectionTracking/histo_colors.py | # Code given by Udacity, complete by Andres Guijarro
# Purpose: Define a function to compute color histogram features
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = mpimg.imread('cutout1.jpg')
# Define a function to compute color histogram features
def color_hist(img, nbins=32, bins_range=(0, 256)):
# Compute the histogram of the RGB channels separately
rhist = np.histogram(image[:, :, 0], bins=32, range=(0, 256))
ghist = np.histogram(image[:, :, 1], bins=32, range=(0, 256))
bhist = np.histogram(image[:, :, 2], bins=32, range=(0, 256))
# Generating bin centers
bin_edges = rhist[1]
bin_centers = (bin_edges[1:] + bin_edges[0:len(bin_edges) - 1]) / 2
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((rhist[0], ghist[0], bhist[0]))
# Return the individual histograms, bin_centers and feature vector
return rhist, ghist, bhist, bin_centers, hist_features
def main():
rh, gh, bh, bincen, feature_vec = color_hist(image,
nbins=32,
bins_range=(0, 256))
# Plot a figure with all three bar charts
if rh is not None:
fig = plt.figure(figsize=(12, 3))
plt.subplot(131)
plt.bar(bincen, rh[0])
plt.xlim(0, 256)
plt.title('R Histogram')
plt.subplot(132)
plt.bar(bincen, gh[0])
plt.xlim(0, 256)
plt.title('G Histogram')
plt.subplot(133)
plt.bar(bincen, bh[0])
plt.xlim(0, 256)
plt.title('B Histogram')
fig.tight_layout()
plt.show()
else:
print('Your function is returning None for at least one variable...')
if __name__ == '__main__':
main()
| mit | Python | |
ea8bafeb9a48ebaf8da9d69e107089cd26383e66 | patch handler redesigned | MaxMorais/frappe,rohitwaghchaure/vestasi-frappe,deveninfotech/deven-frappe,gangadharkadam/smrtfrappe,gangadharkadam/letzfrappe,gangadharkadam/smrtfrappe,rmehta/frappe,mbauskar/frappe,indictranstech/ebuy-now-frappe,pombredanne/frappe,indictranstech/reciphergroup-frappe,indictranstech/omnitech-frappe,StrellaGroup/frappe,gangadhar-kadam/prjlib,pawaranand/phr-frappe,letzerp/framework,chdecultot/frappe,indictranstech/reciphergroup-frappe,rohitw1991/adbwnf,mbauskar/omnitech-frappe,saurabh6790/medlib,gangadharkadam/letzfrappe,ashokrajbathu/secondrep,letzerp/framework,saurabh6790/omnit-lib,indictranstech/phr-frappe,adityahase/frappe,saurabh6790/test-frappe,deveninfotech/deven-frappe,saurabh6790/test-frappe,gangadhar-kadam/verve_test_frappe,rohitw1991/smarttailorfrappe,mbauskar/omnitech-demo-frappe,gangadhar-kadam/church-wnframework,rohitwaghchaure/New_Theme_frappe,webnotes/wnframework,saurabh6790/med_lib_rels,rohitwaghchaure/frappe,saurabh6790/med_lib_test,gangadharkadam/vervefrappe,indictranstech/omnitech-frappe,bcornwellmott/frappe,saurabh6790/ON-RISLIB,adityahase/frappe,mbauskar/omnitech-frappe,gangadhar-kadam/hrfrappe,saurabh6790/omnisys-lib,drukhil/frappe,Tejal011089/digitales_frappe,ShashaQin/frappe,cadencewatches/frappe,gangadhar-kadam/helpdesk-frappe,mhbu50/frappe,yashodhank/frappe,rohitwaghchaure/frappe-alec,tundebabzy/frappe,indictranstech/omnitech-frappe,gangadhar-kadam/verve_live_frappe,praba230890/frappe,rmehta/frappe,vCentre/vFRP-6233,gangadhar-kadam/sapphite_lib,drukhil/frappe,bcornwellmott/frappe,indictranstech/Das_frappe,saurabh6790/omnitech-libs,almeidapaulopt/frappe,aboganas/frappe,indictranstech/frappe-digitales,saurabh6790/-aimobilize-lib,rohitwaghchaure/vestasi-frappe,pawaranand/phr_frappe,adityahase/frappe,nerevu/frappe,saurabh6790/omnit-lib,saurabh6790/test-med-lib,erpletzerp/letzerpcore,pdvyas/frappe,neilLasrado/frappe,MaxMorais/frappe,sbktechnology/trufil-frappe,pranalik/parjanalib,suyashphadtare/propshikhari-frappe,jevonearth/frappe,gangadharkadam/saloon_frappe,rohitwaghchaure/frappe,gangadharkadam/v5_frappe,gangadharkadam/saloon_frappe,bohlian/frappe,rohitw1991/smarttailorfrappe,saurabh6790/tru_lib_back,saurabh6790/phr-frappe,letzerp/framework,gangadhar-kadam/lgnlvefrape,bohlian/frappe,saurabh6790/alert-med-lib,gangadharkadam/saloon_frappe,anandpdoshi/frappe,indictranstech/tele-frappe,chdecultot/frappe,gangadhar-kadam/sms-wnframework,saurabh6790/OFF-RISLIB,gangadhar-kadam/helpdesk-frappe,jevonearth/frappe,indictranstech/frappe-digitales,sbktechnology/sap_frappe,Amber-Creative/amber-frappe,MaxMorais/frappe,gangadharkadam/saloon_frappe_install,saurabh6790/omnitech-libs,BhupeshGupta/frappe,indictranstech/reciphergroup-frappe,ShashaQin/frappe,indictranstech/tele-frappe,hatwar/buyback-frappe,maxtorete/frappe,aboganas/frappe,ESS-LLP/frappe,saurabh6790/med_lib_rels,gangadharkadam/letzfrappe,PriyaShitole/MedViger-lib,praba230890/frappe,suyashphadtare/sajil-final-frappe,indictranstech/fbd_frappe,gangadharkadam/vervefrappe,yashodhank/frappe,gangadhar-kadam/laganfrappe,gangadharkadam/shfr,deveninfotech/deven-frappe,bohlian/frappe,saurabh6790/omnitech-lib,mbauskar/Das_frappe,saurabh6790/ON-RISLIB,rmehta/frappe,webnotes/wnframework,elba7r/frameworking,gangadharkadam/stfrappe,gangadharkadam/frappecontribution,deveninfotech/deven-frappe,rohitw1991/smartfrappe,saguas/frappe,gangadharkadam/v6_frappe,elba7r/builder,pdvyas/frappe,indictranstech/ebuy-now-frappe,mbauskar/phr-frappe,PriyaShitole/MedViger-lib,gangadharkadam/v4_frappe,rmehta/frappe,reachalpineswift/frappe-bench,gangadharkadam/vervefrappe,gangadhar-kadam/verve_frappe,sbkolate/sap_frappe_v6,mhbu50/frappe,rohitwaghchaure/frappe,rohitw1991/smartfrappe,suyashphadtare/sajil-frappe,shitolepriya/test-frappe,saurabh6790/med_test_lib,pombredanne/frappe,Tejal011089/medsyn2_lib,rohitwaghchaure/frappe-digitales,gangadharkadam/v6_frappe,saurabh6790/omnisys-lib,bcornwellmott/frappe,saurabh6790/phr-frappe,saurabh6790/medsynaptic-lib,sbktechnology/trufil-frappe,Tejal011089/digitales_frappe,indictranstech/frappe,saurabh6790/frappe,RicardoJohann/frappe,gangadharkadam/v5_frappe,vqw/frappe,rohitwaghchaure/frappe_smart,rohitwaghchaure/frappe-digitales,indautgrp/frappe,saurabh6790/omn-lib,elba7r/frameworking,mbauskar/helpdesk-frappe,saurabh6790/omn-lib,cadencewatches/frappe,aboganas/frappe,gangadhar-kadam/smrterpfrappe,BhupeshGupta/frappe,indictranstech/osmosis-frappe,nerevu/frappe,elba7r/builder,pombredanne/frappe,gangadharkadam/frappecontribution,vCentre/vFRP-6233,mbauskar/omnitech-demo-frappe,elba7r/frameworking,saurabh6790/frappe,saurabh6790/test_final_med_lib,mbauskar/omnitech-demo-frappe,mbauskar/tele-frappe,RicardoJohann/frappe,saurabh6790/frappe,gangadharkadam/v5_frappe,reachalpineswift/frappe-bench,drukhil/frappe,nerevu/frappe,rohitw1991/innoworth-lib,tundebabzy/frappe,gangadhar-kadam/church-wnframework,indictranstech/internal-frappe,gangadhar-kadam/smrterpfrappe,saurabh6790/frappe,indictranstech/reciphergroup-frappe,indictranstech/frappe-digitales,mhbu50/frappe,gangadharkadam/vervefrappe,indictranstech/osmosis-frappe,StrellaGroup/frappe,rohitw1991/frappe,gangadhar-kadam/nassimlib,indictranstech/Das_frappe,saguas/frappe,saurabh6790/omni-libs,mbauskar/Das_frappe,frappe/frappe,MaxMorais/frappe,rohitwaghchaure/vestasi-frappe,saurabh6790/medsyn-lib1,saurabh6790/medsyn-lib,ESS-LLP/frappe,saurabh6790/omni-libs,gangadharkadam/vlinkfrappe,vCentre/vFRP-6233,indictranstech/phr-frappe,nerevu/frappe,maxtorete/frappe,mbauskar/frappe,reachalpineswift/frappe-bench,gangadharkadam/frappecontribution,hernad/frappe,tundebabzy/frappe,rohitw1991/latestadbwnf,saurabh6790/test_final_med_lib,indictranstech/ebuy-now-frappe,jevonearth/frappe,mbauskar/Das_frappe,saurabh6790/pow-lib,Amber-Creative/amber-frappe,saurabh6790/medsynaptic1-lib,vqw/frappe,saurabh6790/medsyn-lib,sbkolate/sap_frappe_v6,gangadharkadam/vlinkfrappe,hatwar/buyback-frappe,mhbu50/frappe,maxtorete/frappe,saurabh6790/test-med-lib,indictranstech/frappe,drukhil/frappe,indictranstech/tele-frappe,reachalpineswift/frappe-bench,hatwar/buyback-frappe,rkawale/Internalhr-frappe,hernad/frappe,gangadhar-kadam/adb-wnf,mbauskar/Das_frappe,webnotes/wnframework,indictranstech/trufil-frappe,saurabh6790/med_new_lib,indictranstech/ebuy-now-frappe,saurabh6790/phr-frappe,shitolepriya/test-frappe,mbauskar/helpdesk-frappe,indautgrp/frappe,pranalik/parjanalib,neilLasrado/frappe,ShashaQin/frappe,gangadhar-kadam/laganfrappe,gangadharkadam/v6_frappe,saurabh6790/med_lib_test,pawaranand/phr_frappe,rohitw1991/frappe,bohlian/frappe,indictranstech/phr-frappe,neilLasrado/frappe,letzerp/framework,saurabh6790/test-frappe,anandpdoshi/frappe,gangadhar-kadam/adb-wnf,geo-poland/frappe,gangadharkadam/tailorfrappe,indictranstech/Das_frappe,rohitwaghchaure/vestasi-frappe,mbauskar/frappe,gangadharkadam/v4_frappe,saurabh6790/medsyn-lib1,gangadharkadam/v4_frappe,gangadhar-kadam/laganfrappe,gangadhar-kadam/lgnlvefrape,gangadhar-kadam/nassimlib,indictranstech/frappe,gangadhar-kadam/mtn-wnframework,pranalik/frappe-bb,vjFaLk/frappe,saurabh6790/trufil_lib,adityahase/frappe,hatwar/buyback-frappe,vjFaLk/frappe,gangadharkadam/johnfrappe,rohitw1991/latestadbwnf,gangadharkadam/office_frappe,paurosello/frappe,vjFaLk/frappe,indictranstech/frappe,sbktechnology/trufil-frappe,BhupeshGupta/frappe,rohitwaghchaure/frappe-digitales,elba7r/builder,indictranstech/trufil-frappe,rohitwaghchaure/frappe-alec,Amber-Creative/amber-frappe,praba230890/frappe,mbauskar/frappe,paurosello/frappe,gangadhar-kadam/verve_frappe,vqw/frappe,manassolanki/frappe,gangadharkadam/saloon_frappe,gangadharkadam/saloon_frappe_install,frappe/frappe,gangadharkadam/johnfrappe,saguas/frappe,paurosello/frappe,saurabh6790/phr-frappe,gangadharkadam/office_frappe,gangadhar-kadam/lgnlvefrape,indautgrp/frappe,elba7r/frameworking,mbauskar/phr-frappe,vjFaLk/frappe,Tejal011089/digitales_frappe,mbauskar/helpdesk-frappe,aboganas/frappe,suyashphadtare/propshikhari-frappe,geo-poland/frappe,pombredanne/frappe,indictranstech/osmosis-frappe,pranalik/frappe-bb,paurosello/frappe,RicardoJohann/frappe,gangadhar-kadam/verve_frappe,geo-poland/frappe,gangadhar-kadam/prjlib,maxtorete/frappe,gangadhar-kadam/mtn-wnframework,saurabh6790/OFF-RISLIB,suyashphadtare/sajil-frappe,BhupeshGupta/frappe,erpletzerp/letzerpcore,RicardoJohann/frappe,indictranstech/omnitech-frappe,neilLasrado/frappe,rohitwaghchaure/New_Theme_frappe,praba230890/frappe,yashodhank/frappe,gangadhar-kadam/verve_live_frappe,gangadharkadam/office_frappe,almeidapaulopt/frappe,suyashphadtare/sajil-final-frappe,elba7r/builder,pawaranand/phr_frappe,gangadharkadam/stfrappe,manassolanki/frappe,saurabh6790/medsynaptic-lib,saurabh6790/aimobilize-lib-backup,gangadharkadam/v4_frappe,saguas/frappe,saurabh6790/trufil_lib,mbauskar/omnitech-demo-frappe,suyashphadtare/propshikhari-frappe,shitolepriya/test-frappe,erpletzerp/letzerpcore,hernad/frappe,tundebabzy/frappe,erpletzerp/letzerpcore,tmimori/frappe,gangadhar-kadam/sapphite_lib,gangadharkadam/v5_frappe,Tejal011089/digitales_frappe,indictranstech/internal-frappe,pawaranand/phr_frappe,hernad/frappe,gangadharkadam/saloon_frappe_install,sbkolate/sap_frappe_v6,gangadharkadam/v6_frappe,almeidapaulopt/frappe,sbktechnology/sap_frappe,ESS-LLP/frappe,mbauskar/phr-frappe,vCentre/vFRP-6233,indictranstech/frappe-digitales,gangadharkadam/frappecontribution,gangadhar-kadam/sms-wnframework,nabinhait/frappe,saurabh6790/med_new_lib,pranalik/frappe-bb,bcornwellmott/frappe,gangadharkadam/vlinkfrappe,rohitw1991/adbwnf,gangadharkadam/letzfrappe,tmimori/frappe,indictranstech/internal-frappe,saurabh6790/alert-med-lib,saurabh6790/-aimobilize-lib,saurabh6790/omnisys-lib,pranalik/frappe-bb,gangadharkadam/tailorfrappe,Amber-Creative/amber-frappe,StrellaGroup/frappe,pawaranand/phr-frappe,suyashphadtare/propshikhari-frappe,jevonearth/frappe,indictranstech/phr-frappe,gangadhar-kadam/mic-wnframework,mbauskar/tele-frappe,saurabh6790/med_test_lib,vqw/frappe,sbktechnology/sap_frappe,indictranstech/tele-frappe,gangadharkadam/saloon_frappe_install,indictranstech/internal-frappe,sbktechnology/trufil-frappe,manassolanki/frappe,Tejal011089/medsyn2_lib,rkawale/Internalhr-frappe,gangadhar-kadam/verve_live_frappe,saurabh6790/pow-lib,rohitwaghchaure/frappe,gangadhar-kadam/helpdesk-frappe,gangadhar-kadam/helpdesk-frappe,rohitwaghchaure/frappe_smart,gangadhar-kadam/verve_live_frappe,indautgrp/frappe,tmimori/frappe,anandpdoshi/frappe,nabinhait/frappe,sbktechnology/sap_frappe,saurabh6790/tru_lib_back,ashokrajbathu/secondrep,ashokrajbathu/secondrep,mbauskar/omnitech-frappe,gangadharkadam/shfr,indictranstech/fbd_frappe,saurabh6790/medlib,ESS-LLP/frappe,indictranstech/Das_frappe,saurabh6790/aimobilize-lib-backup,yashodhank/frappe,indictranstech/trufil-frappe,gangadhar-kadam/verve_frappe,gangadhar-kadam/sapphite_lib,saurabh6790/test-frappe,rohitw1991/innoworth-lib,frappe/frappe,indictranstech/osmosis-frappe,suyashphadtare/sajil-frappe,gangadhar-kadam/verve_test_frappe,shitolepriya/test-frappe,saurabh6790/omnitech-lib,mbauskar/phr-frappe,pawaranand/phr-frappe,indictranstech/trufil-frappe,gangadhar-kadam/verve_test_frappe,mbauskar/omnitech-frappe,sbkolate/sap_frappe_v6,chdecultot/frappe,mbauskar/tele-frappe,ashokrajbathu/secondrep,chdecultot/frappe,anandpdoshi/frappe,almeidapaulopt/frappe,rohitwaghchaure/frappe-digitales,gangadhar-kadam/verve_test_frappe,ShashaQin/frappe,rohitwaghchaure/New_Theme_frappe,mbauskar/helpdesk-frappe,saurabh6790/medsynaptic1-lib,rohitwaghchaure/frappe-alec,indictranstech/fbd_frappe,suyashphadtare/sajil-final-frappe,gangadhar-kadam/hrfrappe,gangadhar-kadam/mic-wnframework,mbauskar/tele-frappe,manassolanki/frappe,saurabh6790/trufil_lib,tmimori/frappe,gangadharkadam/vlinkfrappe,indictranstech/fbd_frappe | py/webnotes/modules/patch_handler.py | py/webnotes/modules/patch_handler.py | # patch manager
#---------------
import webnotes
def run(patch_list, overwrite = 0, log_exception=1, conn = '', db_name = '', root_pwd = ''):
# db connection
if not conn:
connect_db(db_name, root_pwd)
else:
webnotes.conn = conn
# session
if not webnotes.session:
webnotes.session = {'user':'Administrator'}
# no patches on accounts
if webnotes.conn.cur_db_name=='accounts':
return
# check if already applied
if not overwrite:
patch_list = check_already_applied_patch(patch_list)
for p in patch_list:
webnotes.conn.begin()
# execute patch
execute_patch(p, log_exception)
# update patch log table
webnotes.conn.sql("insert into `__PatchLog` (patch, applied_on) values (%s, now())", p)
webnotes.conn.commit()
print "Patch: %s applied successfully..." % p
#-----------------------------------------------------
def execute_patch(p, log_exception):
if log_exception:
try:
exec('from patches import ' + p)
eval(p).execute()
except Exception, e:
write_log()
webnotes.conn.rollback()
return
else:
exec('from patches import ' + p)
eval(p).execute()
#-----------------------------------------------------
def check_already_applied_patch(patch_list):
"""
Remove if patch already applied
"""
webnotes.conn.sql("create table if not exists `__PatchLog` (patch TEXT, applied_on DATETIME)")
already_patched = [d[0] for d in webnotes.conn.sql("select distinct patch from `__PatchLog`")]
pending_patch = []
for p in patch_list:
if p not in already_patched:
pending_patch.append(p)
return pending_patch
#-----------------------------------------------------
def connect_db(db_name, pwd):
"""
Connect database
"""
import webnotes.db
webnotes.conn = webnotes.db.Database(user='root', password=pwd)
webnotes.conn.use(db_name)
#-----------------------------------------------------
def write_log():
import os
import webnotes.defs
import webnotes
patch_log = open(os.path.join(webnotes.defs.modules_path, 'patches', 'patch.log'), 'a')
patch_log.write(('\n\nError in %s:\n' % webnotes.conn.cur_db_name) + webnotes.getTraceback())
patch_log.close()
if getattr(webnotes.defs,'admin_email_notification',0):
from webnotes.utils import sendmail
subj = 'Patch Error. <br>Account: %s' % webnotes.conn.cur_db_name
msg = subj + '<br><br>' + webnotes.getTraceback()
print msg
#sendmail(['nabin@erpnext.com'], sender='automail@erpnext.com', subject= subj, parts=[['text/plain', msg]])
| mit | Python | |
428670421c60305c7c89579fe81419bfc0a920fa | Create facebook.py | Asyncode/ACR2.0,GluuIO/ACR2.0 | ACR/components/facebook.py | ACR/components/facebook.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Asyncode Runtime - enabling non-tech people to develop internet software.
# Copyright (C) 2014-2015 Asyncode Ltd.
# PROPRIETARY component
from ACR.components import *
from ACR.utils import generateID, replaceVars
from ACR.utils.interpreter import makeTree
from ACR import acconfig
from ACR.errors import Error
from ACR.session.mongoSession import MongoSession
import re,json
#http://www.gevent.org/intro.html
#from gevent import monkey; monkey.patch_socket()
from urllib2 import urlopen
from urlparse import parse_qs
V="v2.2"
API_URL="https://graph.facebook.com/"+V+"/"
class Facebook(Component):
#defaults
def __init__(self,config):
cfg={}
for i in config:
cfg[i[0]]=i[2][0]
config=cfg
if not config:
config={}
self.server=config.get("appID")
self.port=config.get("appSecret")
def generate(self,acenv,conf):
r=urlopen(API_URL+conf["params"]["url"].execute(acenv)).read()
try:
return json.loads(r)
except:
res=parse_qs(r, keep_blank_values=True)
for i in res:
res[i]=res[i][0]
return res
def parseAction(self,config):
s=[]
fields={}
pars=config["params"]
for elem in config["content"]:
if type(elem) is tuple:
if elem[0]=="where":
pars["where"]=makeTree("".join(elem[2]))
#deprecated
elif elem[0]=="field":
fields[elem[1]["name"]]=bool(str2obj(elem[1]["show"]))
else:
pars[elem[0]]=(elem[1],elem[2])
elif type(elem) is str:
s.append(elem.strip())
for i in pars:
pars[i]=makeTree(pars[i])
return {
"command":config["command"],
"content":"".join(s),
"params":pars
}
def getObject(config):
return Facebook(config)
| agpl-3.0 | Python | |
ff19fd40ea90da1b47bdaa26522f0a30ca18e73f | make all mysql tables explicitly innodb | isyippee/nova,Triv90/Nova,tianweizhang/nova,belmiromoreira/nova,noironetworks/nova,devendermishrajio/nova,usc-isi/extra-specs,JianyuWang/nova,CiscoSystems/nova,openstack/nova,redhat-openstack/nova,sacharya/nova,tudorvio/nova,houshengbo/nova_vmware_compute_driver,cernops/nova,silenceli/nova,takeshineshiro/nova,savi-dev/nova,thomasem/nova,iuliat/nova,NeCTAR-RC/nova,dstroppa/openstack-smartos-nova-grizzly,alvarolopez/nova,jianghuaw/nova,dims/nova,cyx1231st/nova,virtualopensystems/nova,CCI-MOC/nova,fajoy/nova,affo/nova,eharney/nova,devendermishrajio/nova,fajoy/nova,Metaswitch/calico-nova,JioCloud/nova,rahulunair/nova,gooddata/openstack-nova,TwinkleChawla/nova,mahak/nova,vladikr/nova_drafts,joker946/nova,Juniper/nova,mandeepdhami/nova,eonpatapon/nova,zhimin711/nova,dstroppa/openstack-smartos-nova-grizzly,projectcalico/calico-nova,jianghuaw/nova,apporc/nova,NeCTAR-RC/nova,yosshy/nova,affo/nova,ruslanloman/nova,maelnor/nova,cernops/nova,petrutlucian94/nova,mikalstill/nova,yrobla/nova,mgagne/nova,whitepages/nova,plumgrid/plumgrid-nova,savi-dev/nova,paulmathews/nova,DirectXMan12/nova-hacking,badock/nova,Stavitsky/nova,mgagne/nova,shootstar/novatest,josephsuh/extra-specs,isyippee/nova,bgxavier/nova,petrutlucian94/nova_dev,silenceli/nova,fnordahl/nova,Tehsmash/nova,tangfeixiong/nova,kimjaejoong/nova,jianghuaw/nova,cloudbase/nova,bgxavier/nova,Francis-Liu/animated-broccoli,jeffrey4l/nova,eayunstack/nova,NewpTone/stacklab-nova,gooddata/openstack-nova,klmitch/nova,LoHChina/nova,klmitch/nova,berrange/nova,SUSE-Cloud/nova,BeyondTheClouds/nova,adelina-t/nova,edulramirez/nova,mmnelemane/nova,NoBodyCam/TftpPxeBootBareMetal,luogangyi/bcec-nova,adelina-t/nova,tealover/nova,rrader/nova-docker-plugin,ntt-sic/nova,aristanetworks/arista-ovs-nova,petrutlucian94/nova_dev,varunarya10/nova_test_latest,cloudbau/nova,Yusuke1987/openstack_template,mmnelemane/nova,nikesh-mahalka/nova,imsplitbit/nova,joker946/nova,DirectXMan12/nova-hacking,imsplitbit/nova,usc-isi/nova,scripnichenko/nova,belmiromoreira/nova,cloudbase/nova,thomasem/nova,ruslanloman/nova,mahak/nova,mikalstill/nova,citrix-openstack-build/nova,OpenAcademy-OpenStack/nova-scheduler,barnsnake351/nova,SUSE-Cloud/nova,rajalokan/nova,Stavitsky/nova,ted-gould/nova,vmturbo/nova,fajoy/nova,Juniper/nova,sebrandon1/nova,gspilio/nova,mahak/nova,CCI-MOC/nova,Triv90/Nova,zzicewind/nova,Juniper/nova,badock/nova,rajalokan/nova,orbitfp7/nova,devoid/nova,aristanetworks/arista-ovs-nova,citrix-openstack-build/nova,dawnpower/nova,leilihh/nova,josephsuh/extra-specs,jianghuaw/nova,tanglei528/nova,alexandrucoman/vbox-nova-driver,iuliat/nova,BeyondTheClouds/nova,hanlind/nova,mandeepdhami/nova,orbitfp7/nova,TieWei/nova,blueboxgroup/nova,bigswitch/nova,j-carpentier/nova,bclau/nova,bclau/nova,yrobla/nova,zhimin711/nova,akash1808/nova_test_latest,spring-week-topos/nova-week,sridevikoushik31/openstack,MountainWei/nova,Yuriy-Leonov/nova,savi-dev/nova,maheshp/novatest,tangfeixiong/nova,sacharya/nova,Brocade-OpenSource/OpenStack-DNRM-Nova,alvarolopez/nova,Metaswitch/calico-nova,fnordahl/nova,JioCloud/nova_test_latest,devoid/nova,rrader/nova-docker-plugin,plumgrid/plumgrid-nova,devendermishrajio/nova_test_latest,watonyweng/nova,leilihh/novaha,JianyuWang/nova,gspilio/nova,usc-isi/nova,petrutlucian94/nova,kimjaejoong/nova,akash1808/nova,dawnpower/nova,saleemjaveds/https-github.com-openstack-nova,NewpTone/stacklab-nova,NewpTone/stacklab-nova,watonyweng/nova,rahulunair/nova,shootstar/novatest,redhat-openstack/nova,hanlind/nova,yatinkumbhare/openstack-nova,leilihh/novaha,maelnor/nova,akash1808/nova,rickerc/nova_audit,CiscoSystems/nova,maheshp/novatest,cloudbase/nova-virtualbox,Triv90/Nova,noironetworks/nova,psiwczak/openstack,cloudbau/nova,sridevikoushik31/openstack,rahulunair/nova,cyx1231st/nova,ewindisch/nova,usc-isi/extra-specs,berrange/nova,mikalstill/nova,virtualopensystems/nova,eayunstack/nova,rickerc/nova_audit,scripnichenko/nova,dims/nova,vmturbo/nova,angdraug/nova,shail2810/nova,cloudbase/nova,NoBodyCam/TftpPxeBootBareMetal,nikesh-mahalka/nova,BeyondTheClouds/nova,dstroppa/openstack-smartos-nova-grizzly,CEG-FYP-OpenStack/scheduler,shahar-stratoscale/nova,gooddata/openstack-nova,usc-isi/extra-specs,felixma/nova,double12gzh/nova,vmturbo/nova,eonpatapon/nova,Yusuke1987/openstack_template,jeffrey4l/nova,double12gzh/nova,devendermishrajio/nova_test_latest,CloudServer/nova,projectcalico/calico-nova,viggates/nova,openstack/nova,usc-isi/nova,ntt-sic/nova,houshengbo/nova_vmware_compute_driver,TwinkleChawla/nova,OpenAcademy-OpenStack/nova-scheduler,Tehsmash/nova,bigswitch/nova,openstack/nova,blueboxgroup/nova,vladikr/nova_drafts,j-carpentier/nova,maoy/zknova,barnsnake351/nova,aristanetworks/arista-ovs-nova,qwefi/nova,angdraug/nova,CloudServer/nova,shahar-stratoscale/nova,psiwczak/openstack,sridevikoushik31/nova,takeshineshiro/nova,tealover/nova,maoy/zknova,apporc/nova,CEG-FYP-OpenStack/scheduler,hanlind/nova,varunarya10/nova_test_latest,whitepages/nova,MountainWei/nova,alexandrucoman/vbox-nova-driver,yosshy/nova,psiwczak/openstack,Francis-Liu/animated-broccoli,cloudbase/nova-virtualbox,raildo/nova,paulmathews/nova,shail2810/nova,NoBodyCam/TftpPxeBootBareMetal,ewindisch/nova,klmitch/nova,LoHChina/nova,alaski/nova,klmitch/nova,eharney/nova,vmturbo/nova,sridevikoushik31/nova,maoy/zknova,JioCloud/nova_test_latest,akash1808/nova_test_latest,sebrandon1/nova,leilihh/nova,zzicewind/nova,Juniper/nova,yrobla/nova,DirectXMan12/nova-hacking,sridevikoushik31/nova,josephsuh/extra-specs,ted-gould/nova,sridevikoushik31/nova,houshengbo/nova_vmware_compute_driver,zaina/nova,yatinkumbhare/openstack-nova,phenoxim/nova,maheshp/novatest,tanglei528/nova,gooddata/openstack-nova,Yuriy-Leonov/nova,gspilio/nova,sebrandon1/nova,rajalokan/nova,tudorvio/nova,alaski/nova,TieWei/nova,tianweizhang/nova,sridevikoushik31/openstack,luogangyi/bcec-nova,paulmathews/nova,zaina/nova,saleemjaveds/https-github.com-openstack-nova,cernops/nova,spring-week-topos/nova-week,rajalokan/nova,JioCloud/nova,felixma/nova,Brocade-OpenSource/OpenStack-DNRM-Nova,raildo/nova,viggates/nova,edulramirez/nova,qwefi/nova,phenoxim/nova | nova/db/sqlalchemy/migrate_repo/versions/086_set_engine_mysql_innodb.py | nova/db/sqlalchemy/migrate_repo/versions/086_set_engine_mysql_innodb.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name == "mysql":
migrate_engine.execute("ALTER TABLE agent_builds Engine=InnoDB")
migrate_engine.execute("ALTER TABLE aggregate_hosts Engine=InnoDB")
migrate_engine.execute("ALTER TABLE aggregate_metadata Engine=InnoDB")
migrate_engine.execute("ALTER TABLE aggregates Engine=InnoDB")
migrate_engine.execute(
"ALTER TABLE block_device_mapping Engine=InnoDB")
migrate_engine.execute("ALTER TABLE bw_usage_cache Engine=InnoDB")
migrate_engine.execute("ALTER TABLE dns_domains Engine=InnoDB")
migrate_engine.execute("ALTER TABLE instance_faults Engine=InnoDB")
migrate_engine.execute(
"ALTER TABLE instance_type_extra_specs Engine=InnoDB")
migrate_engine.execute("ALTER TABLE provider_fw_rules Engine=InnoDB")
migrate_engine.execute("ALTER TABLE quota_classes Engine=InnoDB")
migrate_engine.execute("ALTER TABLE s3_images Engine=InnoDB")
migrate_engine.execute("ALTER TABLE sm_backend_config Engine=InnoDB")
migrate_engine.execute("ALTER TABLE sm_flavors Engine=InnoDB")
migrate_engine.execute("ALTER TABLE sm_volume Engine=InnoDB")
migrate_engine.execute(
"ALTER TABLE virtual_storage_arrays Engine=InnoDB")
migrate_engine.execute("ALTER TABLE volume_metadata Engine=InnoDB")
migrate_engine.execute(
"ALTER TABLE volume_type_extra_specs Engine=InnoDB")
migrate_engine.execute("ALTER TABLE volume_types Engine=InnoDB")
def downgrade(migrate_engine):
pass
| apache-2.0 | Python | |
19297efacbc15434e13854aff9c204520ce45179 | Create example_uncertainty.py | gwtsa/gwtsa,pastas/pasta,pastas/pastas | examples/example_uncertainty.py | examples/example_uncertainty.py | import matplotlib.pyplot as plt
import pandas as pd
import pastas as ps
ps.set_log_level("ERROR")
# read observations and create the time series model
obs = pd.read_csv("data/head_nb1.csv", index_col=0, parse_dates=True,
squeeze=True)
ml = ps.Model(obs, name="groundwater head")
# read weather data and create stressmodel
rain = pd.read_csv("data/rain_nb1.csv", index_col=0, parse_dates=True,
squeeze=True)
evap = pd.read_csv("data/evap_nb1.csv", index_col=0, parse_dates=True,
squeeze=True)
sm = ps.RechargeModel(prec=rain, evap=evap, rfunc=ps.Exponential,
recharge="Linear", name='recharge')
ml.add_stressmodel(sm)
# Solve
ml.solve()
df = ml.uncertainty.block_response("recharge", n=1000)
ax = ml.get_block_response("recharge").plot(color="C1")
df.plot(color="k", linestyle="--", ax=ax)
df = ml.uncertainty.block_response("recharge", n=1000, alpha=0.01)
df.plot(color="gray", linestyle="--", ax=ax)
df = ml.uncertainty.step_response("recharge", n=1000)
ax = ml.get_step_response("recharge").plot(color="C1")
df.plot(color="k", linestyle="--", ax=ax)
df = ml.uncertainty.step_response("recharge", n=1000, alpha=0.01)
df.plot(color="gray", linestyle="--", ax=ax) | mit | Python | |
9b099f9b9fb51d5bc1983e68f8267b2fca0ddfec | Create close.py | kellogg76/ArduinoTelescopeDustCover | close.py | close.py | ## Open a serial connection with Arduino.
import time
import serial
ser = serial.Serial("COM9", 9600) # Open serial port that Arduino is using
time.sleep(3) # Wait 3 seconds for Arduino to reset
print ser # Print serial config
print "Sending serial command to CLOSE the dust cover."
ser.write("C")
print "Closing serial connection."
ser.close()
# Reminder to close the connection when finished
if(ser.isOpen()):
print "Serial connection is still open."
| mit | Python | |
b303eccd4b3ec902f90c0e4bfb18314b37dd763b | print using future example | ET-CS/python-patterns | examples/python2/basic/print.py | examples/python2/basic/print.py | #!/usr/bin/env python
# to change to python 3 print syntax (reommended!)
from __future__ import print_function
print('o' 'n' "e")
# >>> one
| apache-2.0 | Python | |
7eb21d4cb05dd26835e10cb17ff1ef399b228067 | Add api tests | njbbaer/unicorn-remote,njbbaer/unicorn-remote,njbbaer/unicorn-remote | app/tests/test_api.py | app/tests/test_api.py | import unittest
from app import app, state
class TestAPI(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
def tearDown(self):
state.stop_program()
def test_start_all(self):
programs= ["ascii_text", "cheertree", "cross", "demo", "dna",
"game_of_life", "matrix", "psychedelia", "rain", "rainbow",
"random_blinky", "random_sparkles", "simple", "snow", "trig"]
for program in programs:
with self.subTest(program=program):
r = self.app.put("/api/program/" + program)
self.assertEqual(r.status_code, 200)
def test_start_not_found(self):
r = self.app.put("/api/program/does_not_exist")
self.assertEqual(r.status_code, 404)
def test_start_with_good_params(self):
r = self.app.put("/api/program/demo?brightness=0.5&rotation=0")
self.assertEqual(r.status_code, 200)
def test_start_with_bad_brightness(self):
r = self.app.put("/api/program/demo?brightness=1.1")
self.assertEqual(r.status_code, 400)
def test_start_with_bad_rotation(self):
r = self.app.put("/api/program/demo?rotation=91")
self.assertEqual(r.status_code, 400)
def test_stop_program(self):
r = self.app.delete("/api/program")
self.assertEqual(r.status_code, 200) | mit | Python | |
a2e18f9b10e5e6bbcad6c13cdc5c76047d319fc2 | Add fake limited composite example | Kitware/tonic-data-generator,Kitware/tonic-data-generator | python/tests/test_pv_composite_wavelet.py | python/tests/test_pv_composite_wavelet.py | # -----------------------------------------------------------------------------
# User configuration
# -----------------------------------------------------------------------------
outputDir = '/Users/seb/Desktop/float-image/'
# -----------------------------------------------------------------------------
from paraview import simple
from tonic.paraview.dataset_builder import *
# -----------------------------------------------------------------------------
# VTK Pipeline creation
# -----------------------------------------------------------------------------
wavelet = simple.Wavelet()
calc = simple.Calculator()
calc.Function = 'coordsX'
calc.ResultArrayName = 'x'
contour = simple.Contour(
PointMergeMethod="Uniform Binning",
ComputeScalars = 1,
ComputeNormals = 1,
Isosurfaces = 157.09,
ContourBy = ['POINTS', 'RTData'])
clip = simple.Clip()
clip.ClipType.Normal = [0.0, 0.0, -1.0]
# -----------------------------------------------------------------------------
# Data To Export
# -----------------------------------------------------------------------------
layerMesh = {
'core 1': False,
'core 2': True,
'core 3': True,
'core 4': True,
'core 5': True
}
fields = ['RTData', 'x']
cores = ['core 1', 'core 2', 'core 3', 'core 4', 'core 5']
isoValues = [ 77.26, 117.18, 157.09, 197.0, 236.92 ]
# -----------------------------------------------------------------------------
# Data Generation
# -----------------------------------------------------------------------------
db = LayerDataSetBuilder(clip, outputDir, {'type': 'spherical', 'phi': range(-10, 11, 10), 'theta': range(-10, 11, 10)}, [400,400])
# Setup view with camera position
view = db.getView()
simple.Show(wavelet, view)
simple.Render(view)
simple.ResetCamera(view)
simple.Hide(wavelet, view)
db.start()
layerIdx = 0
for layer in cores:
# Select only one layer
contour.Isosurfaces = isoValues[layerIdx]
# Capture each field of each layer
for field in fields:
db.setActiveLayer(layer, field, layerMesh[layer])
db.writeLayerData()
# Move to the next layer
layerIdx += 1
db.stop()
| bsd-3-clause | Python | |
3e85c471765f03151d0f6d11680b16c6eccedbec | Add Django admin for browsers, browser versions | mdn/browsercompat,jwhitlock/web-platform-compat,renoirb/browsercompat,renoirb/browsercompat,jwhitlock/web-platform-compat,mdn/browsercompat,renoirb/browsercompat,jwhitlock/web-platform-compat,mdn/browsercompat | webplatformcompat/admin.py | webplatformcompat/admin.py | from django.contrib import admin
from simple_history.admin import SimpleHistoryAdmin
from .models import Browser, BrowserVersion
class BrowserAdmin(SimpleHistoryAdmin):
pass
class BrowserVersionAdmin(SimpleHistoryAdmin):
pass
admin.site.register(Browser, BrowserAdmin)
admin.site.register(BrowserVersion, BrowserVersionAdmin)
| mpl-2.0 | Python | |
503179f2ed6416a719c1caea90a7882519bedfa9 | Add py23 compat layer | fonttools/fonttools,googlefonts/fonttools | Lib/fontTools/misc/py23.py | Lib/fontTools/misc/py23.py | """Python 2/3 compat layer."""
try:
basestring
except NameError:
basestring = str
try:
unicode
except NameError:
unicode = str
try:
unichr
bytechr = chr
except:
unichr = chr
def bytechr(n):
return bytes([n])
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
| mit | Python | |
96897b01ef1fa8ff33d6831ba01e7d1c6fc02b67 | Add ufo2ft.preProcessor module | jamesgk/ufo2ft,googlei18n/ufo2ft,moyogo/ufo2ft,googlefonts/ufo2ft,jamesgk/ufo2fdk | Lib/ufo2ft/preProcessor.py | Lib/ufo2ft/preProcessor.py | from __future__ import (
print_function, division, absolute_import, unicode_literals)
from ufo2ft.filters import loadFilters
from ufo2ft.filters.decomposeComponents import DecomposeComponentsFilter
from copy import deepcopy
class BasePreProcessor(object):
"""Base class for objects that performs pre-processing operations on
the UFO glyphs, such as decomposing composites, removing overlaps, or
applying custom filters.
The input UFO is **not** modified. The ``process`` method returns a
dictionary containing the modified glyphset, keyed by glyph name.
Subclasses can override the ``initDefaultFilters`` method and return
a list of built-in filters which are performed in a predefined order,
between the user-defined pre- and post-filters.
The extra kwargs passed to the constructor can be used to customize the
initialization of the default filters.
Custom filters can be applied before or after the default filters.
These are specified in the UFO lib.plist under the private key
"com.github.googlei18n.ufo2ft.filters".
"""
def __init__(self, ufo, **kwargs):
self.ufo = ufo
self.glyphSet = {g.name: _copyGlyph(g) for g in ufo}
self.defaultFilters = self.initDefaultFilters(**kwargs)
self.preFilters, self.postFilters = loadFilters(self.ufo)
def initDefaultFilters(self, **kwargs):
return []
def process(self):
glyphSet = self.glyphSet
for func in self.preFilters + self.defaultFilters + self.postFilters:
func(glyphSet)
return glyphSet
class OTFPreProcessor(BasePreProcessor):
"""Preprocessor for building CFF-flavored OpenType fonts.
By default, it decomposes all the components.
If ``removeOverlaps`` is True, it performs a union boolean operation on
all the glyphs' contours.
"""
def initDefaultFilters(self, removeOverlaps=False):
filters = [DecomposeComponentsFilter()]
if removeOverlaps:
from ufo2ft.filters.removeOverlaps import RemoveOverlapsFilter
filters.append(RemoveOverlapsFilter())
return filters
class TTFPreProcessor(OTFPreProcessor):
"""Preprocessor for building TrueType-flavored OpenType fonts.
By default, it decomposes all the glyphs with mixed component/contour
outlines.
If ``removeOverlaps`` is True, it performs a union boolean operation on
all the glyphs' contours.
By default, it also converts all the PostScript cubic Bezier curves to
TrueType quadratic splines. If the outlines are already quadratic, you
can skip this by setting ``convertCubics`` to False.
The optional ``conversionError`` argument controls the tolerance
of the approximation algorithm. It is measured as the maximum distance
between the original and converted curve, and it's relative to the UPM
of the font (default: 1/1000 or 0.001).
When converting curves to quadratic, it is assumed that the contours'
winding direction is set following the PostScript counter-clockwise
convention. Thus, by default the direction is reversed, in order to
conform to opposite clockwise convention for TrueType outlines.
You can disable this by setting ``reverseDirection`` to False.
"""
def initDefaultFilters(self, removeOverlaps=False, convertCubics=True,
conversionError=None, reverseDirection=True):
# len(g) is the number of contours, so we include the all glyphs
# that have both components and at least one contour
filters = [DecomposeComponentsFilter(include=lambda g: len(g))]
if removeOverlaps:
from ufo2ft.filters.removeOverlaps import RemoveOverlapsFilter
filters.append(RemoveOverlapsFilter())
if convertCubics:
from ufo2ft.filters.cubicToQuadratic import CubicToQuadraticFilter
filters.append(
CubicToQuadraticFilter(conversionError=conversionError,
unitsPerEm=self.ufo.info.unitsPerEm,
reverseDirection=reverseDirection))
return filters
def _copyGlyph(glyph):
# copy everything except unused attributes: 'guidelines', 'note', 'image'
copy = glyph.__class__()
copy.name = glyph.name
copy.width = glyph.width
copy.height = glyph.height
copy.unicodes = list(glyph.unicodes)
copy.anchors = [dict(a) for a in glyph.anchors]
copy.lib = deepcopy(glyph.lib)
pointPen = copy.getPointPen()
glyph.drawPoints(pointPen)
return copy
| mit | Python | |
5fa27287c37dd77ebcabf7759e4bb96693b86d8d | Create Africa2010_A.py | Pouf/CodingCompetition,Pouf/CodingCompetition | GoogleCodeJam/Africa2010_A.py | GoogleCodeJam/Africa2010_A.py | def solve(i, d):
C, I, prices = d
C = int(C)
prices = list(map(int, prices.split()))
for p1, P1 in enumerate(prices[:-1]):
remains = C-P1
left = prices[p1+1:]
if remains in left:
result = [p1+1, p1+left.index(remains)+2]
return i, ' '.join(map(str, result))
| mit | Python | |
e1e7922efac2b7fdfab7555baaf784edb345c222 | Add circle sort implementation (#5548) | TheAlgorithms/Python | sorts/circle_sort.py | sorts/circle_sort.py | """
This is a Python implementation of the circle sort algorithm
For doctests run following command:
python3 -m doctest -v circle_sort.py
For manual testing run:
python3 circle_sort.py
"""
def circle_sort(collection: list) -> list:
"""A pure Python implementation of circle sort algorithm
:param collection: a mutable collection of comparable items in any order
:return: the same collection in ascending order
Examples:
>>> circle_sort([0, 5, 3, 2, 2])
[0, 2, 2, 3, 5]
>>> circle_sort([])
[]
>>> circle_sort([-2, 5, 0, -45])
[-45, -2, 0, 5]
>>> collections = ([], [0, 5, 3, 2, 2], [-2, 5, 0, -45])
>>> all(sorted(collection) == circle_sort(collection) for collection in collections)
True
"""
if len(collection) < 2:
return collection
def circle_sort_util(collection: list, low: int, high: int) -> bool:
"""
>>> arr = [5,4,3,2,1]
>>> circle_sort_util(lst, 0, 2)
True
>>> arr
[3, 4, 5, 2, 1]
"""
swapped = False
if low == high:
return swapped
left = low
right = high
while left < right:
if collection[left] > collection[right]:
collection[left], collection[right] = (
collection[right],
collection[left],
)
swapped = True
left += 1
right -= 1
if left == right:
if collection[left] > collection[right + 1]:
collection[left], collection[right + 1] = (
collection[right + 1],
collection[left],
)
swapped = True
mid = low + int((high - low) / 2)
left_swap = circle_sort_util(collection, low, mid)
right_swap = circle_sort_util(collection, mid + 1, high)
return swapped or left_swap or right_swap
is_not_sorted = True
while is_not_sorted is True:
is_not_sorted = circle_sort_util(collection, 0, len(collection) - 1)
return collection
if __name__ == "__main__":
user_input = input("Enter numbers separated by a comma:\n").strip()
unsorted = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| mit | Python | |
fff27fd33a4139c7d22b086364ab483819081adf | Create costs.py | evanscottgray/parse_rax_email_invoices,evanscottgray/parse_rax_email_invoices | costs.py | costs.py | #!/usr/bin/env python
import re
import sys
import json
def get_stdin():
stdin_lines = []
for line in sys.stdin:
stdin_lines.append(line)
return ''.join(stdin_lines)
def get_domains(text):
# NOTE(evanscottgray) YES I KNOW THAT THIS IS NOT PEP8
rgx = re.compile(r'(([a-zA-Z]{1})|([a-zA-Z]{1}[a-zA-Z]{1})|([a-zA-Z]{1}[0-9]{1})|([0-9]{1}[a-zA-Z]{1})|([a-zA-Z0-9][a-zA-Z0-9-_]{1,61}[a-zA-Z0-9]))\.([a-zA-Z]{2,6}|[a-zA-Z0-9-]{2,30}\.[a-zA-Z]{2,3})')
raw_domains = rgx.findall(text)
domains = ['.'.join(d[-2:]) for d in raw_domains]
return set(domains)
def read_invoice(filename='inv.txt'):
with open(filename, 'r') as f:
d = f.read()
return d
def dom_cost(txt, dom):
c = [float(l.split()[-1].strip('$'))
for l in txt.splitlines() if len(l.split()) > 4 and dom in l.split()]
return sum(c)
def calculate_totals(doms, txt):
t = {d: dom_cost(txt, d) for d in doms}
return t
text = get_stdin()
domains = get_domains(text)
totals = calculate_totals(list(domains), text)
print json.dumps(totals)
| mit | Python | |
0899700549466f27748b8ac907c0d582aaad5556 | add the python file to test cntk and GPU | LingyuMa/kaggle_planet | test_cntk.py | test_cntk.py | # -*- coding: utf-8 -*-
"""
Created on Wed May 31 22:46:43 2017
@author: Lingyu
"""
from cntk.device import try_set_default_device, gpu
print('Use GPU: {}'.format(try_set_default_device(gpu(0))))
import numpy as np
import cntk as C
from cntk.learners import sgd, learning_rate_schedule, UnitType
from cntk.logging import ProgressPrinter
from cntk.layers import Dense, Sequential
def generate_random_data(sample_size, feature_dim, num_classes):
# Create synthetic data using NumPy.
Y = np.random.randint(size=(sample_size, 1), low=0, high=num_classes)
# Make sure that the data is separable
X = (np.random.randn(sample_size, feature_dim) + 3) * (Y + 1)
X = X.astype(np.float32)
# converting class 0 into the vector "1 0 0",
# class 1 into vector "0 1 0", ...
class_ind = [Y == class_number for class_number in range(num_classes)]
Y = np.asarray(np.hstack(class_ind), dtype=np.float32)
return X, Y
def ffnet():
inputs = 2
outputs = 2
layers = 2
hidden_dimension = 50
# input variables denoting the features and label data
features = C.input_variable((inputs), np.float32)
label = C.input_variable((outputs), np.float32)
# Instantiate the feedforward classification model
my_model = Sequential ([
Dense(hidden_dimension, activation=C.sigmoid),
Dense(outputs)])
z = my_model(features)
ce = C.cross_entropy_with_softmax(z, label)
pe = C.classification_error(z, label)
# Instantiate the trainer object to drive the model training
lr_per_minibatch = learning_rate_schedule(0.125, UnitType.minibatch)
progress_printer = ProgressPrinter(0)
trainer = C.Trainer(z, (ce, pe), [sgd(z.parameters, lr=lr_per_minibatch)], [progress_printer])
# Get minibatches of training data and perform model training
minibatch_size = 25
num_minibatches_to_train = 1024
aggregate_loss = 0.0
for i in range(num_minibatches_to_train):
train_features, labels = generate_random_data(minibatch_size, inputs, outputs)
# Specify the mapping of input variables in the model to actual minibatch data to be trained with
trainer.train_minibatch({features : train_features, label : labels})
sample_count = trainer.previous_minibatch_sample_count
aggregate_loss += trainer.previous_minibatch_loss_average * sample_count
last_avg_error = aggregate_loss / trainer.total_number_of_samples_seen
test_features, test_labels = generate_random_data(minibatch_size, inputs, outputs)
avg_error = trainer.test_minibatch({features : test_features, label : test_labels})
print(' error rate on an unseen minibatch: {}'.format(avg_error))
return last_avg_error, avg_error
np.random.seed(98052)
ffnet()
| mit | Python | |
6a2bd578cc22231bce66a4d110b4ff1536743097 | Add index to the `created_at` column in `published_award_financial_assistance` and the `updated_at` column in `detached_award_procurement` | fedspendingtransparency/data-act-broker-backend,fedspendingtransparency/data-act-broker-backend | dataactcore/migrations/versions/7597deb348fb_fabs_created_at_and_fpds_updated_at_.py | dataactcore/migrations/versions/7597deb348fb_fabs_created_at_and_fpds_updated_at_.py | """FABS created_at and FPDS updated_at indexes
Revision ID: 7597deb348fb
Revises: b168f0cdc5a8
Create Date: 2018-02-06 16:08:20.985202
"""
# revision identifiers, used by Alembic.
revision = '7597deb348fb'
down_revision = 'b168f0cdc5a8'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_detached_award_procurement_updated_at'), 'detached_award_procurement', ['updated_at'], unique=False)
op.create_index(op.f('ix_published_award_financial_assistance_created_at'), 'published_award_financial_assistance', ['created_at'], unique=False)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_detached_award_procurement_updated_at'), table_name='detached_award_procurement')
op.drop_index(op.f('ix_published_award_financial_assistance_created_at'), table_name='published_award_financial_assistance')
### end Alembic commands ###
| cc0-1.0 | Python | |
847aef794f77ed036c77d7334263434a7d4c0084 | Add pad.py: pad file to given size using given byte. | S010/misc,S010/misc,S010/misc,S010/misc,S010/misc,S010/misc,S010/misc,S010/misc,S010/misc | tools/pad.py | tools/pad.py | #!/usr/bin/python3
#
# Read a file from standard input and if it's size is less the specified size
# pad it writing the result to standard output.
#
# Example:
# echo -n 'abc' | ./pad.py 16 0x50 | hexdump -C
# 00000000 61 62 63 50 50 50 50 50 50 50 50 50 50 50 50 50 |abcPPPPPPPPPPPPP|
# 00000010
#
import sys
import os
import getopt
READ_SIZE = 64 * 1024
def main(args):
try:
opts, args = getopt.getopt(args, 'h')
for opt in opts:
if opt[0] == '-h':
usage()
if len(args) not in (1, 2):
usage()
size = int(args[0])
byte = 0
if len(args) == 2:
byte = int(args.pop(), 16)
pad(size, byte)
except Exception as e:
fatal('%s' % str(e))
def pad(size, byte):
n = 0
while True:
buf = sys.stdin.buffer.read(READ_SIZE)
sys.stdout.buffer.write(buf)
n += len(buf)
if len(buf) < READ_SIZE:
break
if n < size:
buf = bytes([byte for x in range(size - n)])
sys.stdout.buffer.write(buf)
sys.stdout.buffer.flush()
def usage():
print('usage: %s <size> [<fill_byte>] < input > output' % sys.argv[0])
print('Pad an input file to <size> using <fill_byte> (0x00 by default).')
def fatal(msg):
sys.stderr.write('%s: %s\n' % (sys.argv[0], msg))
sys.exit(-1)
if __name__ == '__main__':
main(sys.argv[1:])
| isc | Python | |
80889986cd4742d7c63be46447b0097eac5bd745 | Update letter-case-permutation.py | kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode | Python/letter-case-permutation.py | Python/letter-case-permutation.py | # Time: O(n * 2^n)
# Space: O(n * 2^n)
# Given a string S, we can transform every letter individually to be lowercase
# or uppercase to create another string. Return a list of all possible strings we could create.
#
# Examples:
# Input: S = "a1b2"
# Output: ["a1b2", "a1B2", "A1b2", "A1B2"]
#
# Input: S = "3z4"
# Output: ["3z4", "3Z4"]
#
# Input: S = "12345"
# Output: ["12345"]
#
# Note:
# - S will be a string with length at most 12.
# - S will consist only of letters or digits.
class Solution(object):
def letterCasePermutation(self, S):
"""
:type S: str
:rtype: List[str]
"""
result = [[]]
for c in S:
if c.isalpha():
for i in xrange(len(result)):
result.append(result[i][:])
result[i].append(c.lower())
result[-1].append(c.upper())
else:
for s in result:
s.append(c)
return map("".join, result)
| # Time: O(n * 2^n)
# Space: O(n * 2^n)
# Given a string S, we can transform every letter individually to be lowercase
# or uppercase to create another string. Return a list of all possible strings we could create.
#
# Examples:
# Input: S = "a1b2"
# Output: ["a1b2", "a1B2", "A1b2", "A1B2"]
#
# Input: S = "3z4"
# Output: ["3z4", "3Z4"]
#
# Input: S = "12345"
# Output: ["12345"]
#
# Note:
# - S will be a string with length at most 12.
# - S will consist only of letters or digits.
class Solution(object):
def letterCasePermutation(self, S):
"""
:type S: str
:rtype: List[str]
"""
result = [[]]
for c in S:
if c.isalpha():
for i in xrange(len(result)):
result.append(result[i][:])
result[-1].append(c.upper())
result[i].append(c.lower())
else:
for s in result:
s.append(c)
return map("".join, result)
| mit | Python |
af530c45d11a22d53a25150583299b6137e70fe4 | add reddcoin overrides | neocogent/sqlchain,neocogent/sqlchain,neocogent/sqlchain,neocogent/sqlchain | sqlchain/reddcoin.py | sqlchain/reddcoin.py | #
# Override Block and Tx decoding for Reddcoin (Proof of Stake)
#
# Changes as per reddcoin source core.h
#
# CTransaction - if version > POW_TX_VERSION then unsigned int nTime follows nLockTime
# CBlock - if version > POW_BLOCK_VERSION then BlockSig string follows tx array
# Transactions can be CoinStake, then Block gets marked as PoSV
#
import hashlib
from struct import unpack, unpack_from
from sqlchain.util import decodeVarInt, decodeScriptPK
POW_BLOCK_VERSION = 2
POW_TX_VERSION = 1
# raw data decoding stuff
def decodeBlock(data):
hdr = ['version','previousblockhash','merkleroot', 'time', 'bits', 'nonce']
hv = unpack_from('<I32s32s3I', data)
block = dict(zip(hdr,hv))
block['hdr'] = data[:80]
block['hash'] = hashlib.sha256(hashlib.sha256(block['hdr']).digest()).digest()
block['bits'] = '%08x' % block['bits']
txcnt,off = decodeVarInt(data[80:89])
off += 80
block['tx'] = []
while txcnt > 0:
tx = decodeTx(data[off:])
block['tx'].append(tx)
off += tx['size']
txcnt -= 1
if block['version'] > POW_BLOCK_VERSION:
block['blocksig'] = ''
block['height'] = 0
block['coinbase'] = block['tx'][0]['vin'][0]['coinbase']
block['coinstake'] = txcnt > 1 and 'coinstake' in block['tx'][0] # mark as posv when first tx is CoinStake
if block['version'] > 1 and block['height'] >= 227836 and block['coinbase'][0] == '\x03':
block['height'] = unpack('<I', block['coinbase'][1:4]+'\0')[0]
return block
def decodeTx(data): # pylint:disable=too-many-locals
vers, = unpack_from('<I', data)
tx = { 'version': vers, 'vin':[], 'vout':[] }
off = 4
vicnt,ioff = decodeVarInt(data[off:off+9])
off += ioff
while vicnt > 0:
txid,vout = unpack_from('<32sI', data, off)
sigsz,soff = decodeVarInt(data[off+36:off+36+9])
off += soff+36
seq, = unpack_from('<I', data, off+sigsz)
if txid == '\0'*32 and vout == 0xffffffff:
tx['vin'].append({'coinbase':data[off:off+sigsz], 'sequence':seq })
else:
tx['vin'].append({'txid':txid, 'vout':vout, 'scriptSig':data[off:off+sigsz], 'sequence':seq })
off += sigsz+4
vicnt -= 1
vocnt,voff = decodeVarInt(data[off:off+9])
off += voff
n = 0
while n < vocnt:
value, = unpack_from('<Q', data, off)
pksz,soff = decodeVarInt(data[off+8:off+8+9])
off += 8+soff
tx['vout'].append({'value':value, 'n':n, 'scriptPubKey':decodeScriptPK( data[off:off+pksz] ) })
off += pksz
n += 1
if vocnt > 1 and vicnt > 0 and emptyTXO(tx['vout'][0]) and not 'coinbase' in tx['vin'][0]: # mark as coinstake
tx['coinstake'] = True
tx['locktime'], = unpack_from('<I', data, off)
if vers > POW_TX_VERSION:
tx['time'], = unpack_from('<I', data, off)
off += 4
tx['size'] = off+4
tx['txid'] = hashlib.sha256(hashlib.sha256(data[:tx['size']]).digest()).digest()
return tx
def emptyTXO(txo):
return txo['value'] == 0 and txo['scriptPubKey']['type'] == 'other' and txo['scriptPubKey']['data'] == ''
| mit | Python | |
9c17b5eada0c9830c6b6a9a8fe20bfe2ec6e8728 | Create conflict_test.py | MSU-CS-Software-Engineering/habitgame,MSU-CS-Software-Engineering/habitgame | conflict_test.py | conflict_test.py | """The point of this exercise is to create a merging conflict.
Accomplish this by deleting the line containing the if statement
above your name. Only modify the one if statement with your name"""
## BEFORE
## if("name"):
## print "name"
##
## AFTER
## print "name"
def print_name():
if("Timothy"):
print "Timothy"
if("Chris W."):
print "Chris W."
if("Sean"):
print "Sean"
if("Chris R."):
print "Chris R."
if("Bryan"):
print "Bryan"
if("Matthew"):
print "Matthew"
if("Dustin"):
print "Dustin"
if("Darron"):
print "Darron"
| mit | Python | |
8e98b8d884b53eaeb43c03548a32520e34fb340e | Add pID | panoptes/POCS,AstroHuntsman/POCS,panoptes/POCS,AstroHuntsman/POCS,panoptes/POCS,panoptes/environmental-analysis-system,AstroHuntsman/POCS,AstroHuntsman/POCS,panoptes/POCS,panoptes/PEAS | peas/PID.py | peas/PID.py | from datetime import datetime
class PID:
'''
Pseudocode from Wikipedia:
previous_error = 0
integral = 0
start:
error = setpoint - measured_value
integral = integral + error*dt
derivative = (error - previous_error)/dt
output = Kp*error + Ki*integral + Kd*derivative
previous_error = error
wait(dt)
goto start
'''
def __init__(self, Kp=2., Ki=0., Kd=1.,
set_point=None, output_limits=None,
max_age=None):
self.Kp = Kp
self.Ki = Ki
self.Kd = Kd
self.Pval = None
self.Ival = 0.0
self.Dval = 0.0
self.previous_error = None
self.set_point = None
if set_point:
self.set_point = set_point
self.output_limits = output_limits
self.history = []
self.max_age = max_age
self.last_recalc_time = None
self.last_interval = 0.
def recalculate(self, value, interval=None,
reset_integral=False,
new_set_point=None):
if new_set_point:
self.set_point = float(new_set_point)
if reset_integral:
self.history = []
if not interval:
if self.last_recalc_time:
now = datetime.utcnow()
interval = (now - self.last_recalc_time).total_seconds()
else:
interval = 0.0
# Pval
error = self.set_point - value
self.Pval = error
# Ival
for entry in self.history:
entry[2] += interval
for entry in self.history:
if self.max_age:
if entry[2] > self.max_age:
self.history.remove(entry)
self.history.append([error, interval, 0])
new_Ival = 0
for entry in self.history:
new_Ival += entry[0] * entry[1]
self.Ival = new_Ival
# Dval
if self.previous_error:
self.Dval = (error - self.previous_error) / interval
# Output
output = self.Kp * error + self.Ki * self.Ival + self.Kd * self.Dval
if self.output_limits:
if output > max(self.output_limits):
output = max(self.output_limits)
if output < min(self.output_limits):
output = min(self.output_limits)
self.previous_error = error
self.last_recalc_time = datetime.utcnow()
self.last_interval = interval
return output
def tune(self, Kp=None, Ki=None, Kd=None):
if Kp:
self.Kp = Kp
if Ki:
self.Ki = Ki
if Kd:
self.Kd = Kd
| mit | Python | |
29d0797540461f8c021ecad6e5d1e724dcc3e378 | Make a simple infinite while loop to run tests. | orbitfold/tardis,kaushik94/tardis,kaushik94/tardis,orbitfold/tardis,orbitfold/tardis,orbitfold/tardis,kaushik94/tardis,kaushik94/tardis | tardis/tests/tests_slow/runner.py | tardis/tests/tests_slow/runner.py | import time
import subprocess
if __name__ == "__main__":
while True:
subprocess.call([
"python", "setup.py", "test", "--test-path=tardis/tests/test_util.py",
])
time.sleep(20)
| bsd-3-clause | Python | |
04cebfba4c7ee5bf4b05bab811a944724224e3c3 | add 0000 file | Show-Me-the-Code/python,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Show-Me-the-Code/python,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2 | Drake-Z/0000/0000.py | Drake-Z/0000/0000.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'第 0000 题:将你的 QQ 头像(或者微博头像)右上角加上红色的数字,类似于微信未读信息数量那种提示效果。 类似于图中效果'
__author__ = 'Drake-Z'
from PIL import Image, ImageDraw, ImageFont
def add_num(filname, text = '4', fillcolor = (255, 0, 0)):
img = Image.open(filname)
width, height = img.size
myfont = ImageFont.truetype('C:/windows/fonts/Arial.ttf', size=width//8)
fillcolor = (255, 0, 0)
draw = ImageDraw.Draw(img)
draw.text((width-width//8, 0), text, font=myfont, fill=fillcolor)
img.save('1.jpg','jpeg')
return 0
if __name__ == '__main__':
filname = '0.jpg'
text = '4'
fillcolor = (255, 0, 0)
add_num(filname, text, fillcolor) | mit | Python | |
a11ac7235e448aeed145cc7b98e43180e1428155 | Create __openerp__.py | OdooCommunityWidgets/website_navigation_megamenu | __openerp__.py | __openerp__.py | {
'name': 'Website MegaMenu [Bootstrap]',
'description': 'This module will implement MegaMenu functionality to improve on the default built-in Odoo menu for use with both the E-commerce and CMS modules.',
'category': 'Website',
'version': '1.0',
'author': 'Luke Branch',
'website': 'https://github.com/OdooCommunityWidgets',
'depends': ['website'],
'data': [
'views/website_category_extended.xml',
'views/website_menuitem_extended.xml',
'views/website_settings_extended.xml',
'views/website_usermenu_extended.xml'
],
'application': True,
}
| mit | Python | |
ec82be36659443c66d72f6cbd09055e572628cec | Create storj_login.py | lakewik/storj-gui-client | UI/qt_interfaces/design/storj_login.py | UI/qt_interfaces/design/storj_login.py | <?xml version="1.0" encoding="UTF-8"?>
<ui version="4.0">
<class>Login</class>
<widget class="QDialog" name="Login">
<property name="geometry">
<rect>
<x>0</x>
<y>0</y>
<width>400</width>
<height>428</height>
</rect>
</property>
<property name="windowTitle">
<string>Login to your Storj Account</string>
</property>
<widget class="QLabel" name="label_2">
<property name="geometry">
<rect>
<x>30</x>
<y>80</y>
<width>101</width>
<height>21</height>
</rect>
</property>
<property name="text">
<string><html><head/><body><p><span style=" font-size:12pt; font-weight:600;">Bridge URL:</span></p></body></html></string>
</property>
</widget>
<widget class="QLabel" name="label_3">
<property name="geometry">
<rect>
<x>30</x>
<y>220</y>
<width>101</width>
<height>21</height>
</rect>
</property>
<property name="text">
<string><html><head/><body><p><span style=" font-size:12pt; font-weight:600;">Password:</span></p></body></html></string>
</property>
</widget>
<widget class="QPushButton" name="login_bt">
<property name="geometry">
<rect>
<x>20</x>
<y>310</y>
<width>361</width>
<height>61</height>
</rect>
</property>
<property name="text">
<string>Login!</string>
</property>
</widget>
<widget class="Line" name="line">
<property name="geometry">
<rect>
<x>40</x>
<y>50</y>
<width>311</width>
<height>20</height>
</rect>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
</widget>
<widget class="QLineEdit" name="bridge_url">
<property name="geometry">
<rect>
<x>130</x>
<y>70</y>
<width>231</width>
<height>41</height>
</rect>
</property>
<property name="text">
<string>http://api.storj.io</string>
</property>
</widget>
<widget class="QLabel" name="label_5">
<property name="geometry">
<rect>
<x>30</x>
<y>140</y>
<width>101</width>
<height>21</height>
</rect>
</property>
<property name="text">
<string><html><head/><body><p><span style=" font-size:12pt; font-weight:600;">E-mail:</span></p></body></html></string>
</property>
</widget>
<widget class="QLineEdit" name="password">
<property name="geometry">
<rect>
<x>40</x>
<y>250</y>
<width>311</width>
<height>41</height>
</rect>
</property>
<property name="text">
<string/>
</property>
</widget>
<widget class="QLineEdit" name="email">
<property name="geometry">
<rect>
<x>40</x>
<y>170</y>
<width>311</width>
<height>41</height>
</rect>
</property>
<property name="text">
<string/>
</property>
</widget>
<widget class="QLabel" name="label">
<property name="geometry">
<rect>
<x>20</x>
<y>10</y>
<width>361</width>
<height>41</height>
</rect>
</property>
<property name="text">
<string><html><head/><body><p align="center"><span style=" font-size:16pt; font-weight:600;">Login to your Storj Account</span></p></body></html></string>
</property>
</widget>
<widget class="Line" name="line_2">
<property name="geometry">
<rect>
<x>40</x>
<y>110</y>
<width>311</width>
<height>20</height>
</rect>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
</widget>
<widget class="QPushButton" name="cancel_bt">
<property name="geometry">
<rect>
<x>20</x>
<y>380</y>
<width>361</width>
<height>31</height>
</rect>
</property>
<property name="text">
<string>Cancel</string>
</property>
</widget>
</widget>
<resources/>
<connections/>
</ui>
| mit | Python | |
9013f072a8b82ab65ad2c599fe331f7835ebee47 | Test users app URL patterns | hackebrot/cookiecutter-django,topwebmaster/cookiecutter-django,trungdong/cookiecutter-django,thisjustin/cookiecutter-django,luzfcb/cookiecutter-django,webyneter/cookiecutter-django,aleprovencio/cookiecutter-django,hackebrot/cookiecutter-django,hairychris/cookiecutter-django,gappsexperts/cookiecutter-django,asyncee/cookiecutter-django,hairychris/cookiecutter-django,ad-m/cookiecutter-django,topwebmaster/cookiecutter-django,gappsexperts/cookiecutter-django,thisjustin/cookiecutter-django,gappsexperts/cookiecutter-django,asyncee/cookiecutter-django,hairychris/cookiecutter-django,webyneter/cookiecutter-django,ryankanno/cookiecutter-django,ddiazpinto/cookiecutter-django,schacki/cookiecutter-django,pydanny/cookiecutter-django,schacki/cookiecutter-django,asyncee/cookiecutter-django,thisjustin/cookiecutter-django,webspired/cookiecutter-django,kappataumu/cookiecutter-django,Parbhat/cookiecutter-django-foundation,mistalaba/cookiecutter-django,Parbhat/cookiecutter-django-foundation,trungdong/cookiecutter-django,webyneter/cookiecutter-django,bopo/cookiecutter-django,schacki/cookiecutter-django,webspired/cookiecutter-django,Parbhat/cookiecutter-django-foundation,schacki/cookiecutter-django,ad-m/cookiecutter-django,ddiazpinto/cookiecutter-django,gappsexperts/cookiecutter-django,kappataumu/cookiecutter-django,topwebmaster/cookiecutter-django,luzfcb/cookiecutter-django,ddiazpinto/cookiecutter-django,bopo/cookiecutter-django,asyncee/cookiecutter-django,ad-m/cookiecutter-django,luzfcb/cookiecutter-django,trungdong/cookiecutter-django,aleprovencio/cookiecutter-django,pydanny/cookiecutter-django,ryankanno/cookiecutter-django,hackebrot/cookiecutter-django,mistalaba/cookiecutter-django,pydanny/cookiecutter-django,ryankanno/cookiecutter-django,trungdong/cookiecutter-django,mistalaba/cookiecutter-django,hackebrot/cookiecutter-django,Parbhat/cookiecutter-django-foundation,mistalaba/cookiecutter-django,pydanny/cookiecutter-django,bopo/cookiecutter-django,webyneter/cookiecutter-django,topwebmaster/cookiecutter-django,webspired/cookiecutter-django,luzfcb/cookiecutter-django,aleprovencio/cookiecutter-django,hairychris/cookiecutter-django,ad-m/cookiecutter-django,ryankanno/cookiecutter-django,kappataumu/cookiecutter-django,aleprovencio/cookiecutter-django,bopo/cookiecutter-django,ddiazpinto/cookiecutter-django,kappataumu/cookiecutter-django,thisjustin/cookiecutter-django,webspired/cookiecutter-django | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/users/tests/test_urls.py | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/users/tests/test_urls.py | from django.core.urlresolvers import reverse, resolve
from test_plus.test import TestCase
class TestUserURLs(TestCase):
"""Test URL patterns for users app."""
def setUp(self):
self.user = self.make_user()
def test_list_reverse(self):
"""users:list should reverse to /users/."""
self.assertEqual(reverse('users:list'), '/users/')
def test_list_resolve(self):
"""/users/ should resolve to users:list."""
self.assertEqual(resolve('/users/').view_name, 'users:list')
def test_redirect_reverse(self):
"""users:redirect should reverse to /users/~redirect/."""
self.assertEqual(reverse('users:redirect'), '/users/~redirect/')
def test_redirect_resolve(self):
"""/users/~redirect/ should resolve to users:redirect."""
self.assertEqual(
resolve('/users/~redirect/').view_name,
'users:redirect'
)
def test_detail_reverse(self):
"""users:detail should reverse to /users/testuser/."""
self.assertEqual(
reverse('users:detail', kwargs={'username': 'testuser'}),
'/users/testuser/'
)
def test_detail_resolve(self):
"""/users/testuser/ should resolve to users:detail."""
self.assertEqual(resolve('/users/testuser/').view_name, 'users:detail')
def test_update_reverse(self):
"""users:update should reverse to /users/~update/."""
self.assertEqual(reverse('users:update'), '/users/~update/')
def test_update_resolve(self):
"""/users/~update/ should resolve to users:update."""
self.assertEqual(
resolve('/users/~update/').view_name,
'users:update'
)
| bsd-3-clause | Python | |
9a0806560ee3cc5d6baa2c17c7b8c9bc0dfbffca | add script to identify and count all the unique words in our titles | paregorios/awol-utils | ParseAndCountTitleWords.py | ParseAndCountTitleWords.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from collections import Counter
import glob
import logging as l
import os
import re
import sys
import traceback
import xml.etree.ElementTree as xmlParser
import codecs
SCRIPT_DESC = 'parse and count all unique words in AWOL blog titles'
DEFAULTINPATH='D:\\GitHub\\awol-backup\\'
DEFAULTOUTPATH='.\\parse.log'
def main():
"""
main function: parse colon prefixes out of the awol xml dump files
"""
global args
if args.outpath:
DEFAULTOUTPATH = args.outpath
if args.verbose:
l.basicConfig(level=l.DEBUG)
else:
l.basicConfig(level=l.INFO)
rxpunct = re.compile(u'[:,\'\"\(\)\{\}\[\]\.\\,\-!’–&/$+;=\?]+')
rxsep = re.compile(u'\p{Separator}+')
rxspace = re.compile(u'\s+')
titleWords = []
wordListFile = codecs.open(DEFAULTOUTPATH, 'a', 'utf-8')
# Gets the list of all the *-atom.xmls from the awol-backup directory
atomXMLsPath = args.inpath
xmlList = glob.glob(atomXMLsPath + '*-atom.xml')
#Loop through the list of xmls
for item in xmlList:
l.debug("trying to parse %s" % item)
doc=xmlParser.parse(item) #Parse each of the atom.xml
root = doc.getroot()
try:
l.debug("trying to get text content of the 'title' element")
titleText = unicode(root.find("{http://www.w3.org/2005/Atom}title").text) #Get the text from the title element
except UnicodeEncodeError, e:
l.debug("******ERROR******")
l.debug(e)
l.debug("*****************")
else:
# strip punctuation, regularize whitespace and split into individual words
l.debug('raw: %s' % titleText)
titleText = rxpunct.sub(u'', titleText.strip())
l.debug('depunct: %s' % titleText)
titleText = rxsep.split(titleText)
l.debug('sepsplit: %s' % titleText)
fooText = []
for c in titleText:
fooText.extend(rxspace.split(c.strip()))
l.debug('spacesplit: %s' % fooText)
titleWords.extend([c for c in fooText])
titleWords = [w.lower() for w in titleWords]
c=Counter(titleWords)
for uniqueWord in sorted(c.keys()):
wordListFile.write(u"%s,%s\n" % (unicode(uniqueWord), c[uniqueWord]))
if __name__ == "__main__":
try:
parser = argparse.ArgumentParser(description=SCRIPT_DESC, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument ("-v", "--verbose", action="store_true", help="verbose output (i.e., debug logging")
parser.add_argument ("-i", "--input",dest="inpath",type=str,default=DEFAULTINPATH,help='input path (directory)')
parser.add_argument ("-o", "--output",dest="outpath",type=str,default=DEFAULTOUTPATH,help='output path (directory)')
args = parser.parse_args()
main()
sys.exit(0)
except KeyboardInterrupt, e: # Ctrl-C
raise e
except SystemExit, e: # sys.exit()
raise e
except Exception, e:
l.debug("ERROR, UNEXPECTED EXCEPTION")
l.debug(e)
traceback.print_exc()
os._exit(1)
| bsd-3-clause | Python | |
c960f88cc1f8c15bf20a426d8e1e5f2b34799a13 | Create calc.py | WebShark025/TheZigZagProject,WebShark025/TheZigZagProject | plugins/calc.py | plugins/calc.py | @bot.message_handler(commands=['calc'])
def clac(m):
userid = m.from_user.id
banlist = redisserver.sismember('zigzag_banlist', '{}'.format(userid))
if banlist:
return
if len(m.text.split()) < 2:
bot.reply_to(m, "How can i calculate null?")
return
text = m.text.replace("/calc ","")
res = urllib.urlopen("http://api.mathjs.org/v1/?expr={}".format(text).replace("+","%2B")).read()
bot.send_message(m.chat.id, "_{}_ = `{}`".format(text,res), parse_mode="Markdown", disable_web_page_preview=True)
| mit | Python | |
96e1b85370f363c9ddda01c6052a94d6a78e2528 | Add VoltageTrace class in traces module | bryanwweber/UConnRCMPy | uconnrcmpy/traces.py | uconnrcmpy/traces.py | """All of the kinds of traces in UConnRCMPy"""
# System imports
# Third-party imports
import numpy as np
from scipy import signal as sig
# Local imports
class VoltageTrace(object):
"""Class for the voltage trace of an experiment"""
def __init__(self, file_path):
self.signal = np.genfromtxt(str(self.file_path))
self.time = self.signal[:, 0]
"""The time loaded from the signal trace."""
self.frequency = np.rint(1/self.time[1])
"""The sampling frequency of the pressure trace."""
self.filtered_voltage = self.filtering(self.signal[:, 1])
self.smoothed_voltage = self.smoothing(self.filtered_voltage)
def smoothing(self, data, span=21):
"""
Smooth the input `data` using a moving average of width `span`.
"""
window = np.ones(span)/span
output = sig.fftconvolve(data, window, mode='same')
midpoint = (span - 1)/2
output[:midpoint] = output[midpoint]
return output
def filtering(self, data, cutoff_hz=10000):
"""
Filter the input `data` using a low-pass filter with cutoff at 10 kHz
"""
nyquist_freq = self.frequency/2.0
n_taps = 2**14
low_pass_filter = sig.firwin(
n_taps,
cutoff_hz/nyquist_freq,
window='blackman',
)
return sig.fftconvolve(data, low_pass_filter, mode='same')
| bsd-3-clause | Python | |
0b5dfb1c421998884afd59a37da3c8eaef389471 | Create makedict.py | oguzdag/oguzdag.github.io,oguzdag/oguzdag.github.io | projects/ansible/makedict.py | projects/ansible/makedict.py | from collections import defaultdict
class FilterModule(object):
def filters(self):
return {
'createmylist': self.createlistfunction
}
def createlistfunction(self, instancelist, clustervars, hostvars_tmp,playhost_tmp, startindex, endindex):
retval = []
clusterinfo = defaultdict(list)
myserverlist = []
for i in range(int(startindex),int(endindex)+1):
clusterinfo = {
# "index": instance["index"],
"name": clustervars["startstr"]+clustervars["envname"]+clustervars["middlestr"]+str(i)+clustervars["endstr"],
"multicastEnabled": "false",
"servers":[]
}
for item in playhost_tmp:
serverid=0
for item2 in hostvars_tmp[item]['muleserverids']:
if item2['index'] == i :
serverid= item2['serverId']
serverIp=hostvars_tmp[item]['serverIp']
serverlist={ "serverId": serverid, "serverIp": serverIp }
clusterinfo["servers"].append(serverlist)
#clusterinfo["servers"].append(myserverlist)
myserverlist = []
retval.append(clusterinfo)
return retval
| mit | Python | |
d9f1b68ab9e3090289e2720937c051eb020dcc7c | Add new simulation. | ssh0/growing-string,ssh0/growing-string | triangular_lattice/correlation.py | triangular_lattice/correlation.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by Shotaro Fujimoto
# 2016-10-07
"""頂点間距離とベクトルの向きの相関
"""
from growing_string import Main
import matplotlib.pyplot as plt
import numpy as np
import random
from tqdm import tqdm
import time
def choose_indexes(_list, num, L):
"""Choose the index pairs whose width is fixed. """
N = len(_list)
if N - (2 * L) < num:
raise StopIteration('list index is smaller than expected (%d), '
% (num + 2 * L)
+ 'given (%d).' % N
)
return sorted(random.sample(_list[L:N - L], num))
def _to_radian(i, j):
k = (i + 6 - j) % 6
if k == 0: return 0.
if k == 1: return np.pi / 3
if k == 2: return 2 * np.pi / 3
if k == 3: return np.pi
if k == 4: return 4 * np.pi / 3
if k == 5: return 5 * np.pi / 3
to_radian = np.vectorize(_to_radian)
def calc_order_param(theta):
itheta = np.array([1j*t for t in theta])
R = abs(np.sum(np.exp(itheta))/float(len(theta)))
return R
def get_correlation(beta, num_of_strings, L, frames, num_of_pairs=300):
len_vec = frames + 2
Lp = range(2, (len_vec - num_of_pairs) / 2)
_Cs = []
for s in tqdm(range(num_of_strings)):
_Cs.append(get_correlation_for_each_string(Lp, L, frames, num_of_pairs))
Cs = np.average(np.array(_Cs), axis=0)
return Lp, Cs
def get_correlation_for_each_string(Lp, L, frames, num_of_pairs):
main = Main(Lx=L, Ly=L, plot=False, frames=frames, beta=beta,
strings=[{'id': 1, 'x': L/4, 'y': L/2, 'vec': [0, 4]}])
len_vec = len(main.strings[0].vec)
# # 1. 同string内の2点を選ぶ
# # (1.A) ランダム
# random_i = np.random.randint(len_vec, size=num_of_pairs)
# random_j = np.random.randint(len_vec, size=num_of_pairs)
# (1.B) 等パス長となる2点を同数ずつ抽出
random_i, random_j = [], []
for lp in Lp:
random_i.append(np.array(choose_indexes(range(len_vec),
num_of_pairs, lp)))
random_j.append(random_i[-1] + lp)
random_i = np.array(random_i).flatten()
random_j = np.array(random_j).flatten()
# 2. 各点でのベクトルの相関を計算
vec0 = np.array(main.strings[0].vec)[random_i].reshape((len(Lp), num_of_pairs))
vec1 = np.array(main.strings[0].vec)[random_j].reshape((len(Lp), num_of_pairs))
# ペア間の角度
rad = to_radian(vec0, vec1)
# 角度の揃い具合を計算
Cs = [calc_order_param(rad[i]) for i in range(len(Lp))]
return Cs
if __name__ == '__main__':
start_time = time.strftime("%y%m%d_%H%M%S")
num_of_strings = 30
# betas = [0., 5., 10., 15., 20.]
betas = [float(i) for i in range(21)]
# betas = [20.]
frames = 1000
L = 1000
num_of_pairs = 300
fig, ax = plt.subplots()
for beta in betas:
print "beta = %2.2f" % beta
Lp, Cs = get_correlation(beta, num_of_strings, L, frames, num_of_pairs)
ax.plot(Lp, Cs, '.', label=r'$\beta = %2.2f$' % beta)
# save the data
result_data_path = "results/data/correlation/beta=%2.2f" % beta
result_data_path += "_" + start_time
result_data_path += ".npz"
np.savez(result_data_path,
num_of_strings=num_of_strings,
beta=beta,
L=L,
frames=frames,
Lp=Lp,
Cs=Cs)
ax.set_xlabel('Path length')
ax.set_ylabel('Correlation of the vectors')
ax.set_title('Correlation of the vectors')
ax.legend(loc='best')
result_image_path = "results/img/correlation/strings=%d" % num_of_strings
result_image_path += "_" + start_time
result_image_path += ".png"
plt.savefig(result_image_path)
plt.close()
print "[saved] " + result_image_path
# plt.show()
| mit | Python | |
9d48ac6d4d9f48d7b51e67bfe351c20ba0d259c7 | add class to show all item_req, inherits from ItemsListWidget | develersrl/rooms,develersrl/rooms,develersrl/rooms,develersrl/rooms,develersrl/rooms,develersrl/rooms,develersrl/rooms | trunk/editor/itemreqlistwidget.py | trunk/editor/itemreqlistwidget.py | #!/usr/bin/env python
from itemslistwidget import ItemsListWidget
class ItemReqListWidget(ItemsListWidget):
"""
classe utilizzata per la visualizzazione degli item_req
"""
def changeSelection(self, row, column):
selection = self.table.selectedRanges()
for sel in selection:
if (sel.bottomRow() == row and sel.columnCount() == 2):
index = selection.index(sel)
selection.pop(index)
self.table.setRangeSelected(selection)
break
elif sel.bottomRow() == row and sel.columnCount() == 1 and\
sel.leftColumn() == column:
self.table.selectRow(row)
break
| mit | Python | |
4c2125a440d06963aa5ea52d805bd021efcafb17 | Add : dash class to be the interface between the Flask views and the localization/parsing APIs. | nocternology/fail2dash,nocternology/fail2dash | core/dash.py | core/dash.py | from parser import Parser
from geoloc import Geoloc
class Dasher(object):
"""
Dasher class definition.
Simply the middleware between the raw datalog and the view functions.
Takes the raw lists of usefull info and does all the counting and preparing
for view by Flask.
"""
def __init__(self, config):
"""
Inits the object by registering the configuration object
"""
self.config = config
self.parser = Parser(self.config)
self.geoloc = Geoloc(self.config)
self.data = self.parser.getData()
def initialCheck(self):
"""
Once the class is instanciated, this method will do the basic checks
from the logs and call different other functions.
"""
bans = []
for entry in self.data:
ban = {}
# Step 1 : get the geoloc data corresponding to the IP address
geoloc = self.geoloc.get(entry["ip"])
| mit | Python | |
8b0b42dcf0402fff4dee1b7a452977e25776a514 | Create Search_for_a_range.py | UmassJin/Leetcode | Array/Search_for_a_range.py | Array/Search_for_a_range.py | Given a sorted array of integers, find the starting and ending position of a given target value.
Your algorithm's runtime complexity must be in the order of O(log n).
If the target is not found in the array, return [-1, -1].
For example,
Given [5, 7, 7, 8, 8, 10] and target value 8,
return [3, 4].
class Solution:
# @param A, a list of integers
# @param target, an integer to be searched
# @return a list of length 2, [index1, index2]
def searchRange(self, A, target):
tmp_start = -1
tmp_end = -1
left = 0
right = len(A)-1
while left <= right:
mid = (left + right)/2
if A[mid] == target:
#if start == -1 or mid < start:
# start = mid
#elif end == -1 or mid > end:
# end = mid
tmp_start = mid
while tmp_start-1 >= 0 and A[tmp_start-1] == target:
tmp_start -= 1
tmp_end = mid
while tmp_end+1 <= len(A)-1 and A[tmp_end +1] == target:
tmp_end += 1
return [tmp_start, tmp_end]
elif A[mid] < target:
left = mid +1
elif A[mid] > target:
right = mid -1
return [tmp_start, tmp_end]
| mit | Python | |
ff7755dbf7e5eee1dfaee60eb061479cca823cff | Create portscan.py | 7base/portscan | portscan.py | portscan.py | #!/usr/bin/python
import os
import sys
import socket
import string
import multiprocessing
from multiprocessing import Lock
def PORTscanner(IP, PORT, proc, lock):
openPorts = []
socket.setdefaulttimeout(2)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((IP, PORT))
if result == 0:
print "\033[1;32m[+] Port "+str(PORT)+": \t\t Open"
openPorts.append(PORT)
sock.close()
filename = "/tmp/port"+str(IP)+":"+str(proc)
fileHandle = open ( filename, 'w' )
fileHandle.write (str(openPorts))
fileHandle.close()
def main():
remoteServer = sys.argv[1]
IP = socket.gethostbyname(remoteServer)
l = Lock()
proclist = []
minimals = {}
maximals = {}
PORTs = [20, 21, 22, 23, 25, 53, 80, 110, 433, 587, 3389, 5222, 5223, 25565, 51413]
print "\033[1;34m[*] Starting portscan on "+str(IP)+"... please wait"
for n in range(14):
PORT = PORTs[n]
process = multiprocessing.Process(target=PORTscanner, args=[IP, PORT, n, l])
process.start()
proclist.append(process)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print "\033[1;31m\n[-] You pressed Ctrl+C\n\x1b[0m\r"
for process in proclist: # then kill them all off
process.terminate()
sys.exit()
print "\n\x1b[0m\r"
| apache-2.0 | Python | |
18facb30efeffb88e3b96d1d899f249bfd00f776 | Create problem4.py | ryanseys/project-euler,ryanseys/project-euler | problem4.py | problem4.py | # Pretty brute force-ish way of attacking this problem.
# Just wanted to keep it simple stupid, plus python can handle it ;)
# Answer: 906609
def problem4():
results = []
for i in reversed(range(999)):
for j in reversed(range(999)):
result = str(i*j)
if result == result[::-1]:
results.append(int(result))
return max(results)
print problem4() # prints 906609
| mit | Python | |
e7685951e1d271b07df0e4a0681a2404806f4028 | Add (simple) test cases for Stock API | SchrodingersGat/InvenTree,SchrodingersGat/InvenTree,SchrodingersGat/InvenTree,inventree/InvenTree,SchrodingersGat/InvenTree,inventree/InvenTree,inventree/InvenTree,inventree/InvenTree | InvenTree/stock/test_api.py | InvenTree/stock/test_api.py | from rest_framework.test import APITestCase
from rest_framework import status
from django.urls import reverse
from django.contrib.auth import get_user_model
from .models import StockLocation, StockItem
class StockLocationTest(APITestCase):
"""
Series of API tests for the StockLocation API
"""
list_url = reverse('api-location-list')
def setUp(self):
# Create a user for auth
User = get_user_model()
User.objects.create_user('testuser', 'test@testing.com', 'password')
self.client.login(username='testuser', password='password')
# Add some stock locations
StockLocation.objects.create(name='top', description='top category')
def test_list(self):
# Check that we can request the StockLocation list
response = self.client.get(self.list_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertGreaterEqual(len(response.data), 1)
def test_add(self):
# Check that we can add a new StockLocation
data = {
'parent': 1,
'name': 'Location',
'description': 'Another location for stock'
}
response = self.client.post(self.list_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
class StockItemTest(APITestCase):
"""
Series of API tests for the StockItem API
"""
list_url = reverse('api-stock-list')
def detail_url(self, pk):
return revere('api-stock-detail', kwargs={'pk': pk})
def setUp(self):
# Create a user for auth
User = get_user_model()
User.objects.create_user('testuser', 'test@testing.com', 'password')
self.client.login(username='testuser', password='password')
# Create some stock locations
top = StockLocation.objects.create(name='A', description='top')
StockLocation.objects.create(name='B', description='location b', parent=top)
StockLocation.objects.create(name='C', description='location c', parent=top)
def test_get_stock_list(self):
response = self.client.get(self.list_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
| mit | Python | |
a676dfce887129c196d94bb3289c7bab8081368b | Remove get_internal_type and let the parent class handle it. | apokinsocha/django-push-notifications,rmoorman/django-push-notifications,azevakin/django-push-notifications,leonmu/django-push-notifications,dilvane/django-push-notifications,Tictrac/django-push-notifications,Ubiwhere/django-push-notifications,nnseva/django-push-notifications,CustomerSupport/django-push-notifications,giserh/django-push-notifications,lukeburden/django-push-notifications,Adys/django-push-notifications,AndreasBackx/django-push-notifications,matthewh/django-push-notifications,1vank1n/django-push-notifications,vuchau/django-push-notifications,vikcena01/django-push-notification,shigmas/django-push-notifications,jamaalscarlett/django-push-notifications,gkirkpatrick/django-push-notifications,avichalp/django-push-notifications,ajatamayo/django-push-notifications,lneoe/django-push-notifications,hylje/django-push-notifications,Dubrzr/django-push-notifications,fsto/django-push-notifications,leonsas/django-push-notifications,cristiano2lopes/django-push-notifications,GaleDragon/django-push-notifications,freakboy3742/django-push-notifications,Ian-Foote/django-push-notifications,gio82/django-push-notifications,IvoPintodaSilva/django-push-notifications,jleclanche/django-push-notifications,rsalmaso/django-push-notifications | push_notifications/fields.py | push_notifications/fields.py | import re
import struct
from django import forms
from django.core.validators import RegexValidator
from django.db import models, connection
from django.utils.six import with_metaclass
from django.utils.translation import ugettext_lazy as _
__all__ = ["HexadecimalField", "HexIntegerField"]
hex_re = re.compile(r"^0x[0-9a-fA-F]+$")
class HexadecimalField(forms.CharField):
"""
A form field that accepts only hexadecimal numbers
"""
def __init__(self, *args, **kwargs):
self.default_validators = [RegexValidator(hex_re, _("Enter a valid hexadecimal number"), "invalid")]
super(HexadecimalField, self).__init__(*args, **kwargs)
class HexIntegerField(with_metaclass(models.SubfieldBase, models.BigIntegerField)):
"""
This field stores a hexadecimal *string* of up to 64 bits as an unsigned integer
on *all* backends including postgres.
Reasoning: Postgres only supports signed bigints. Since we don't care about
signedness, we store it as signed, and cast it to unsigned when we deal with
the actual value (with struct)
On sqlite and mysql, native unsigned bigint types are used. In all cases, the
value we deal with in python is always in hex.
"""
def db_type(self, connection):
engine = connection.settings_dict["ENGINE"]
if engine == "django.db.backends.mysql":
return "bigint unsigned"
elif engine == "django.db.backends.sqlite":
return "UNSIGNED BIG INT"
else:
return super(HexIntegerField, self).db_type(connection)
def get_prep_value(self, value):
if value is None or value is "":
return None
value = int(value, 16)
# on postgres only, interpret as signed
if connection.settings_dict["ENGINE"] == "django.db.backends.postgresql_psycopg2":
value = struct.unpack("q", struct.pack("Q", value))[0]
return value
def to_python(self, value):
if isinstance(value, str):
return value
if value is None:
return ""
# on postgres only, re-interpret from signed to unsigned
if connection.settings_dict["ENGINE"] == "django.db.backends.postgresql_psycopg2":
value = hex(struct.unpack("Q", struct.pack("q", value))[0])
return value
def formfield(self, **kwargs):
defaults = {"form_class": HexadecimalField}
defaults.update(kwargs)
# yes, that super call is right
return super(models.IntegerField, self).formfield(**defaults)
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^push_notifications\.fields\.HexIntegerField"])
except ImportError:
pass
| import re
import struct
from django import forms
from django.core.validators import RegexValidator
from django.db import models, connection
from django.utils.six import with_metaclass
from django.utils.translation import ugettext_lazy as _
__all__ = ["HexadecimalField", "HexIntegerField"]
hex_re = re.compile(r"^0x[0-9a-fA-F]+$")
class HexadecimalField(forms.CharField):
"""
A form field that accepts only hexadecimal numbers
"""
def __init__(self, *args, **kwargs):
self.default_validators = [RegexValidator(hex_re, _("Enter a valid hexadecimal number"), "invalid")]
super(HexadecimalField, self).__init__(*args, **kwargs)
class HexIntegerField(with_metaclass(models.SubfieldBase, models.BigIntegerField)):
"""
This field stores a hexadecimal *string* of up to 64 bits as an unsigned integer
on *all* backends including postgres.
Reasoning: Postgres only supports signed bigints. Since we don't care about
signedness, we store it as signed, and cast it to unsigned when we deal with
the actual value (with struct)
On sqlite and mysql, native unsigned bigint types are used. In all cases, the
value we deal with in python is always in hex.
"""
def db_type(self, connection):
engine = connection.settings_dict["ENGINE"]
if engine == "django.db.backends.mysql":
return "bigint unsigned"
elif engine == "django.db.backends.sqlite":
return "UNSIGNED BIG INT"
else:
return super(HexIntegerField, self).db_type(connection)
def get_internal_type(self):
return self.__class__.__name__
def get_prep_value(self, value):
if value is None or value is "":
return None
value = int(value, 16)
# on postgres only, interpret as signed
if connection.settings_dict["ENGINE"] == "django.db.backends.postgresql_psycopg2":
value = struct.unpack("q", struct.pack("Q", value))[0]
return value
def to_python(self, value):
if isinstance(value, str):
return value
if value is None:
return ""
# on postgres only, re-interpret from signed to unsigned
if connection.settings_dict["ENGINE"] == "django.db.backends.postgresql_psycopg2":
value = hex(struct.unpack("Q", struct.pack("q", value))[0])
return value
def formfield(self, **kwargs):
defaults = {"form_class": HexadecimalField}
defaults.update(kwargs)
# yes, that super call is right
return super(models.IntegerField, self).formfield(**defaults)
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^push_notifications\.fields\.HexIntegerField"])
except ImportError:
pass
| mit | Python |
4c5f51e49ed5bfa12e0c784457c46f8d0e1cb041 | Update regression test | recognai/spaCy,explosion/spaCy,oroszgy/spaCy.hu,honnibal/spaCy,recognai/spaCy,explosion/spaCy,oroszgy/spaCy.hu,honnibal/spaCy,raphael0202/spaCy,recognai/spaCy,aikramer2/spaCy,spacy-io/spaCy,spacy-io/spaCy,Gregory-Howard/spaCy,spacy-io/spaCy,explosion/spaCy,spacy-io/spaCy,Gregory-Howard/spaCy,aikramer2/spaCy,recognai/spaCy,raphael0202/spaCy,oroszgy/spaCy.hu,oroszgy/spaCy.hu,raphael0202/spaCy,honnibal/spaCy,aikramer2/spaCy,spacy-io/spaCy,aikramer2/spaCy,Gregory-Howard/spaCy,oroszgy/spaCy.hu,explosion/spaCy,Gregory-Howard/spaCy,raphael0202/spaCy,explosion/spaCy,spacy-io/spaCy,recognai/spaCy,Gregory-Howard/spaCy,oroszgy/spaCy.hu,honnibal/spaCy,aikramer2/spaCy,raphael0202/spaCy,recognai/spaCy,raphael0202/spaCy,Gregory-Howard/spaCy,aikramer2/spaCy,explosion/spaCy | spacy/tests/regression/test_issue636.py | spacy/tests/regression/test_issue636.py | # coding: utf8
from __future__ import unicode_literals
from ...tokens.doc import Doc
import pytest
@pytest.mark.xfail
@pytest.mark.models
@pytest.mark.parametrize('text', ["I cant do this."])
def test_issue636(EN, text):
"""Test that to_bytes and from_bytes don't change the token lemma."""
doc1 = EN(text)
doc2 = Doc(EN.vocab)
doc2.from_bytes(doc1.to_bytes())
assert doc1[2].lemma_ == doc2[2].lemma_
| mit | Python | |
06877818a354face257852bfc03eef8d70cd0b0a | add VIN query example | commaai/panda,commaai/panda,commaai/panda,commaai/panda | examples/query_vin.py | examples/query_vin.py | #!/usr/bin/env python
def msg(x):
print "S:",x.encode("hex")
if len(x) <= 7:
ret = chr(len(x)) + x
else:
assert False
return ret.ljust(8, "\x00")
def isotp_send(panda, x, addr, bus=0):
if len(x) <= 7:
panda.can_send(addr, msg(x), bus)
else:
ss = chr(0x10 + (len(x)>>8)) + chr(len(x)&0xFF) + x[0:6]
x = x[6:]
idx = 1
sends = []
while len(x) > 0:
sends.append(((chr(0x20 + (idx&0xF)) + x[0:7]).ljust(8, "\x00")))
x = x[7:]
idx += 1
# actually send
panda.can_send(addr, ss, bus)
rr = recv(1)[0]
panda.can_send_many([(addr, None, s, 0) for s in sends])
kmsgs = []
def recv(panda, cnt, addr, nbus):
global kmsgs
ret = []
while len(ret) < cnt:
kmsgs += panda.can_recv()
nmsgs = []
for ids, ts, dat, bus in kmsgs:
if ids == addr and bus == nbus and len(ret) < cnt:
ret.append(dat)
else:
pass
kmsgs = nmsgs
return map(str, ret)
def isotp_recv(panda, addr, bus=0):
msg = recv(panda, 1, addr, bus)[0]
if ord(msg[0])&0xf0 == 0x10:
# first
tlen = ((ord(msg[0]) & 0xf) << 8) | ord(msg[1])
dat = msg[2:]
# 0 block size?
CONTINUE = "\x30" + "\x00"*7
panda.can_send(addr-8, CONTINUE, bus)
idx = 1
for mm in recv(panda, (tlen-len(dat) + 7)/8, addr, bus):
assert ord(mm[0]) == (0x20 | idx)
dat += mm[1:]
idx += 1
elif ord(msg[0])&0xf0 == 0x00:
# single
tlen = ord(msg[0]) & 0xf
dat = msg[1:]
else:
assert False
dat = dat[0:tlen]
print "R:",dat.encode("hex")
return dat
if __name__ == "__main__":
from panda import Panda
print "getting VIN"
panda = Panda()
panda.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
panda.can_clear(0)
ret = isotp_send(panda, "\x09\x02", 0x7df)
print "VIN: %s" % isotp_recv(panda, 0x7e8)
| mit | Python | |
5c142d7e7a311013dd940a6d6900b5d9984dc0fe | Create dynamic image & draw some text on it | symisc/pixlab,symisc/pixlab,symisc/pixlab | python/dynamic_image_meme.py | python/dynamic_image_meme.py | import requests
import json
# Dynamically create a 300x300 PNG image with a yellow background and draw some text on the center of it later.
# Refer to https://pixlab.io/#/cmd?id=newimage && https://pixlab.io/#/cmd?id=drawtext for additional information.
req = requests.get('https://api.pixlab.io/newimage',params={
'key':'My_Pix_Key',
"width":300,
"height":300,
"color":"yellow"
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
exit();
# Link to the new image
img = reply['link'];
# Draw some text now on the new image
req = requests.get('https://api.pixlab.io/drawtext',params={
'img':img, #The newly created image
'key':'My_Pix_Key',
"cap":True, #Uppercase
"color":"black", #Text color
"font":"wolf",
"center":"bonjour"
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Pic location: "+ reply['link'])
| bsd-2-clause | Python | |
88877163201ce32d28633b833e1ec17cd3429650 | Add script for cleaning up old SMS/MMS text messages | bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile | python/misc/clean-sms-mms.py | python/misc/clean-sms-mms.py | #!/usr/bin/env python3
''' Deletes old messages from a backup file created by Titanium Backup Pro
'''
import datetime
import lxml.etree
import shutil
import sys
MAXIMUM_MESSAGE_AGE_IN_DAYS = 365
if len(sys.argv) < 2:
sys.exit('USAGE: %s /path/to/com.keramidas.virtual.XML_MESSAGES-XXXXXXXX-XXXXXX.xml' % (sys.argv[0]))
infile_name = sys.argv[1]
# Create a backup copy since we'll modify the original
outfile_name = infile_name + '.bak'
shutil.copy2(infile_name, outfile_name)
# Remove any SMS/MMS messages older than MAXIMUM_MESSAGE_AGE_IN_DAYS
root = lxml.etree.parse(infile_name)
for element in root.iter():
if element.tag == '{http://www.titaniumtrack.com/ns/titanium-backup/messages}sms' \
or element.tag == '{http://www.titaniumtrack.com/ns/titanium-backup/messages}mms':
message_date = datetime.datetime.strptime(element.get('date'), '%Y-%m-%dT%H:%M:%S.%fZ')
if datetime.datetime.now() - message_date > datetime.timedelta(MAXIMUM_MESSAGE_AGE_IN_DAYS):
element.getparent().remove(element)
with open(infile_name, 'wb') as infile:
infile.write(lxml.etree.tostring(root, pretty_print=True, xml_declaration=True))
| mit | Python | |
7977b18aa4028e83515076bacb1260b61f0a5192 | add collector | yfsuse/Necromancer | yeahmobi/collector.py | yeahmobi/collector.py | #! /usr/bin/env python
import time
import thread
import os
import urllib2
import urllib
import time
import sys
from random import choice
from string import letters,digits
count = 0
def mutiChoice(maxCount):
returnStr = ''
selectLetters = letters + digits
for i in range(maxCount):
returnStr += choice(selectLetters)
return returnStr
def genTransactionId():
return '{0}-{1}-{2}-{3}-{4}'.format(mutiChoice(8), mutiChoice(4), mutiChoice(4), mutiChoice(4), mutiChoice(12))
def mockYeahmobi(sleeptime):
ISOTIMEFORMAT='%Y-%m-%d %X'
global count
while True:
print '>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', count
data = {'collector_param':{"aff_id":"19900305"+str(choice(range(1, 100000))),"aff_manager":str(choice(range(1, 100000))),"aff_sub1":"affsub","aff_sub2":"affsub2","aff_sub3":"affsub3","aff_sub4":"affsub4","aff_sub5":"affsub5","aff_sub6":"affsub6","aff_sub7":"affsub7","aff_sub8":"affsub8","adv_id":str(choice(range(1, 1000000))),"adv_manager":"0","adv_sub1":"affsub","adv_sub2":"advsub2","adv_sub3":"advsub3","adv_sub4":"advsub4","adv_sub5":"advsub5","adv_sub6":"advsub6","adv_sub7":"advsub7","adv_sub8":"advsub8","offer_id":str(choice(range(1, 100000))),"currency":"GBP","rpa":"20.0","cpa":"15.0","click_ip":"54.86.55.142","conv_ip":"54.86.55.142","transaction_id":genTransactionId(),"click_time":time.strftime(ISOTIMEFORMAT, time.localtime()),"conv_time":time.strftime(ISOTIMEFORMAT, time.localtime()),"user_agent":"Mozilla/5.0+(iPhone;+U;+CPU+iPhone+OS+4_3_2+like+Mac+OS+X;+zh-cn)+AppleWebKit/533.17.9+(KHTML+like+Gecko)+Version/5.0.2+Mobile/8H7+Safari/6533.18.5","browser":"1","device_model":"1","os_ver":"0","country":"ZZ","log_type":choice(("1", "0")),"visitor_id":"3274c4c8-0f0c-4517-8956-77eef6a21d83","forward_ips":"127.0.0.1","state":"-1","city":"-1","isp":"-1","mobile_brand":"-1","platform_id":"","screen_width":"320","screen_height":"480","conversions":"","track_type":"0","session_id":"e97f1f87-7a08-4b0a-8465-02bf53c5685f","visitor_node_id":"ubuntu-template","expiration_date":"2014-12-05","is_unique_click":"1","gcid":"","gcname":"","browser_name":"Mobile+Safari","device_brand_name":"Apple","device_brand":"1","device_model_name":"iPhone","device_type_name":"Mobile","device_type":"1","platform_name":"iOS","os_ver_name":"4.3.2","ref_conv_track":"http://www.65536_conv_07081624.com","referer":"http://www.65536_click_1913.com"},'platformName':'yfnormalpf'}
postdata = urllib.urlencode(data)
rsp = urllib2.build_opener().open(urllib2.Request('http://172.20.0.69:8080/collector/collector?', postdata)).read()
time.sleep(sleeptime)
count += 1
thread.exit_thread()
def mockTradingdesk(sleeptime):
ISOTIMEFORMAT='%Y-%m-%d %X'
global count
while True:
print '>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', count
data = {'collector_param':{"time_stamp":time.strftime(ISOTIMEFORMAT, time.localtime()),"click_id":"td_test","campaign_id":"3","offer_id":"4","ref_site":"5","site":"6","click_time":"7","cost_per_click":"8","payout":"9","real_ip":"10","proxy_ip":"11","device_id":"12","os_id":"13","carrier_id":"14","mobile_brand_id":"15","screen_h":"16","screen_w":"17","screen_id":"18","city_id":"19","brand_id":"20","model_id":"21","country_id":"22","state_id":"23","conversion_time":"24","event":"25","sub1":"26","sub2":"27","sub3":"28","sub4":"29","sub5":"30","sub6":"31","sub7":"32","sub8":"33","click":"34","lp_click":"35","conversion":"36","sub_campaign_id":"37"},'platformName':'hbtradingdesk01'}
postdata = urllib.urlencode(data)
rsp = urllib2.build_opener().open(urllib2.Request('http://172.20.0.69:8080/collector/collector?', postdata)).read()
time.sleep(sleeptime)
count += 1
thread.exit_thread()
def testYM(threadnum): #Use thread.start_new_thread() to create 2 new threads
for i in range(threadnum):
thread.start_new_thread(mockYeahmobi,(0.1,))
def testTD(threadnum): #Use thread.start_new_thread() to create 2 new threads
for i in range(threadnum):
thread.start_new_thread(mockTradingdesk,(0.1,))
def main():
print './collector.py [td, ym] threadNum runHour'
if sys.argv[1] == 'td':
testTD(int(sys.argv[2]))
elif sys.argv[1] == 'ym':
testYM(int(sys.argv[2]))
else:
pass
time.sleep(float(sys.argv[3])*3600)
if __name__=='__main__':
main() | apache-2.0 | Python | |
c8ec6825e8e5cbf465b06f426324d481249c61d6 | Create octree.py | gameplex/game | pysrc/octree.py | pysrc/octree.py | class Address(object):
def __init__(self, x, y, z, depth):
self.x = x
self.y = y
self.z = z
self.depth = depth
def get_address_at_depth(depth):
mask = 1 << depth
x_bit = (mask & self.x) >> depth
y_bit = (mask & self.y) >> depth
z_bit = (mask & self.z) >> depth
return (x, y, z)
class Octree(object):
def __init__(self, address, data):
self.parent = None
self.children = [
[
[None, None],
[None, None]
],
[
[None, None],
[None, None]
]
]
self.address = address
self.data = data
def insert(self, node):
pass
def pop(self, address):
pass
| agpl-3.0 | Python | |
ba907a0c12c5bf90fa796d36fe18218df12281ae | Add GDB pretty-printer for short_vector<T,N> | devinamatthews/marray,devinamatthews/marray,devinamatthews/marray | printers.py | printers.py | import gdb
import re
class ShortVectorPrinter:
def __init__(self, val):
self.val = val
def to_string(self):
size = int(self.val['_size'])
N = int(self.val.type.template_argument(1))
cap = N if size <= N else int(self.val['_capacity'])
return 'MArray::short_vector<%d> of length %d, capacity %d' % (N, size, cap)
def children(self):
size = int(self.val['_size'])
data = self.val['_alloc']['_data']
for i in range(size):
yield ('[%d]' % i, data.dereference())
data = data + 1
def display_hint(self):
return 'array'
def str_lookup_function(val):
lookup_tag = val.type.strip_typedefs().tag
if lookup_tag == None:
return None
regex = re.compile("^MArray::short_vector<.*>$")
if regex.match(lookup_tag):
return ShortVectorPrinter(val)
return None
gdb.pretty_printers.append(str_lookup_function)
| bsd-3-clause | Python | |
abee38d119cf49388081d01dc2484b58775333a8 | fix image URLs | danvk/oldnyc,luster/oldnyc,luster/oldnyc,danvk/oldnyc,danvk/oldnyc,nypl-spacetime/oldnyc,nypl-spacetime/oldnyc,luster/oldnyc,nypl-spacetime/oldnyc,danvk/oldnyc,luster/oldnyc,nypl-spacetime/oldnyc,luster/oldnyc,nypl-spacetime/oldnyc | record_fixer.py | record_fixer.py | #!/usr/bin/python
# This fixes some of the image URLs in records.pickle.
import record
import re
import cPickle
rs = record.AllRecords()
for idx, r in enumerate(rs):
url = r.photo_url
# Some images have thumbnails but are missing the full photo URL.
# Convert
# http://webbie1.sfpl.org/multimedia/thumbnails/aaa9774_x.jpg
# -> http://webbie1.sfpl.org/multimedia/sfphotos/aaa-9774.jpg
if not url:
url = r.thumbnail_url
if 'aaf' in url:
# There are only two of these:
# http://sflib1.sfpl.org:82/record=b1026391~S0
# http://sflib1.sfpl.org:82/record=b1036043~S0
# They're both links to larger collections of images and don't really fit
# the mold of the other pages. So we remove them.
del rs[idx]
continue
else:
# Two of these... I think they're just omissions.
url = re.sub(r'(.*)/thumbnails/(...)(\d+)_x\.jpg', r'\1/sfphotos/\2-\3.jpg', url)
# Remove trailing spaces from image URLs.
# Maybe ~4 of these.
if url[-1] == ' ':
url = url[0:-1]
# Change 'foojpg' -> 'foo.jpg'
# Just a typo. There are ~4 of these, too.
if url[-3:] == 'jpg' and url[-4:] != '.jpg':
url = url[:-3] + '.jpg'
print "Re-pickling"
output_file = "records.pickle"
f = file(output_file, "w")
p = cPickle.Pickler(f, 2)
for r in rs:
count += 1
p.dump(r)
if count % 100 == 0:
print "Pickled %d records" % count
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.