commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
711de8a04598fb531b5f70f334633b713dfa76c7 | Create TypeIt.py | DanielPinigin/Final-Project | TypeIt.py | TypeIt.py | print("Hello Daniel")
| mit | Python | |
94151b40c3b862c5ddf57c11228f6c99a8c38a7e | Define manage.py to launch app and app-related tasks | McGillX/edx_data_research,McGillX/edx_data_research,McGillX/edx_data_research | edx_data_research/web_app/manage.py | edx_data_research/web_app/manage.py | #!/usr/bin/python
from flask.ext.script import Manager, Server, Shell
from edx_data_research.web_app import app
from edx_data_research.web_app.models import User, Role
manager = Manager(app)
manager.add_command('run-server', Server(use_debugger=True, use_reloader=True,
host='0.0.0.0'))
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role)
manager.add_command('shell', Shell(make_context=make_shell_context))
if __name__ == '__main__':
manager.run()
| mit | Python | |
ec919af7fba21e98e73e6c435dda4f10e90b82ba | Create Vector.py | Berkmann18/Asteroid | Vector.py | Vector.py | # The Vector class
class Vector:
# Initialiser
def __init__(self, p=(0,0)):
self.x = p[0]
self.y = p[1]
# Returns a string representation of the vector
def __str__(self):
return "("+ str(self.x) + "," + str(self.y) + ")"
# Tests the equality of this vector and another
def __eq__(self, other):
return self.x == other.x and self.y == other.y
# Tests the inequality of this vector and another
def __ne__(self, other):
return not self.__eq__(other)
# Returns a tuple with the point corresponding to the vector
def getP(self):
return (self.x, self.y)
# Returns a copy of the vector
def copy(self):
v = Vector()
v.x = self.x
v.y = self.y
return v
# Multiplies the vector by a scalar
def mult(self, k):
self.x *= k
self.y *= k
return self
# Divides the vector by a scalar
def div(self, k):
self.x /= k
self.y /= k
return self
# Normalizes the vector
def normalise(self):
v = math.sqrt(self.x**2 + self.y**2)
self.x /= v
self.y /= v
# Returns a normalized version of the vector
def getNormalised(self):
return (self.x/math.sqrt(math.pow(self.x, 2)+math.pow(self.y, 2)), self.y/math.sqrt(math.pow(self.x, 2)+math.pow(self.y, 2)))
def getNormal(self):
return Vector(self.getNormalised())
# Adds another vector to this vector
def add(self, other):
self.x += other.x
self.y += other.y
return self
# Subtracts another vector from this vector
def sub(self, other):
self.x -= other.x
self.y -= other.y
return self
# Returns the zero vector
def zero(self):
self.x = 0
self.y = 0
return self
# Negates the vector (makes it point in the opposite direction)
def negate(self):
self.x = -self.x
self.y = -self.y
return self
# Returns the dot product of this vector with another one
def dot(self, other):
return self.x*other.x + self.y*other.y
# Returns the length of the vector
def length(self):
return math.sqrt(self.lengthSquared())
# Returns the squared length of the vector
def lengthSquared(self):
return self.x**2 + self.y**2
# Reflect this vector on a normal
def reflect(self, normal):
n = normal.copy()
n.mult(2*self.dot(normal))
self.sub(n)
return self
# Returns the angle between this vector and another one
def angle(self, other):
a = math.sqrt(self.x**2 + self.y**2)
b = math.sqrt(math.pow(other.x, 2) + math.pow(other.y, 2))
return math.acos((self.x*other.x+self.y*other.y)/(a*b))
| mit | Python | |
cab2ad2d82951ad988c1c2da146b4d62b6d90ec6 | Add track evalution code. | myfavouritekk/TPN | src/tpn/evaluate.py | src/tpn/evaluate.py | #!/usr/bin/env python
import argparse
import os
import os.path as osp
import glob
from data_io import tpn_test_iterator
from vdetlib.utils.protocol import proto_load
import numpy as np
import sys
sys.path.insert(0, '/Volumes/Research/ImageNet2016/Code/external/kwang/py-faster-rcnn-craft/lib')
from fast_rcnn.nms_wrapper import nms
import cPickle
def _frame_dets(tracks, frame_idx, score_key, box_key):
scores = []
boxes = []
for track in tracks:
if frame_idx not in track['frame']: continue
assert score_key in track
assert box_key in track
ind = track['frame'] == frame_idx
cur_scores = track[score_key][ind]
cur_boxes = track[box_key][ind,:]
num_cls = cur_scores.shape[1]
# repeat boxes if not class specific
if cur_boxes.shape[1] != num_cls:
cur_boxes = np.repeat(cur_boxes[:,np.newaxis,:], num_cls, axis=1)
scores.append(cur_scores)
boxes.append(cur_boxes)
scores = np.concatenate(scores, 0)
boxes = np.concatenate(boxes, 0)
return scores, boxes
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('track_dir',
help='Directory that contains all track detection results.')
parser.add_argument('vid_dir')
parser.add_argument('image_list')
parser.add_argument('score_key')
parser.add_argument('box_key')
parser.add_argument('output_dir')
parser.add_argument('--thresh', type=float, default=0.05)
parser.add_argument('--num_classes', type=int, default=31)
parser.add_argument('--max_per_image', type=int, default=100)
args = parser.parse_args()
# read image_list
with open(args.image_list, 'r') as f:
image_list = dict([line.strip().split() for line in f])
num_classes = args.num_classes
all_boxes = [[[] for _ in xrange(len(image_list))]
for _ in xrange(num_classes)]
# process vid detections
vids = glob.glob(osp.join(args.track_dir, '*'))
for vid_path in vids:
print vid_path
vid_name = osp.split(vid_path)[-1].split('.')[0]
vid_proto = proto_load(osp.join(args.vid_dir, vid_name + '.vid'))
tracks = tpn_test_iterator(vid_path)
for frame in vid_proto['frames']:
frame_name = osp.join(vid_name, osp.splitext(frame['path'])[0])
if frame_name not in image_list.keys(): continue
frame_idx = frame['frame']
global_idx = int(image_list[frame_name]) - 1
scores, boxes = _frame_dets(tracks, frame_idx, args.score_key, args.box_key)
boxes = boxes.reshape((boxes.shape[0], -1))
for j in xrange(1, num_classes):
inds = np.where(scores[:, j] > args.thresh)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, 0.3)
cls_dets = cls_dets[keep, :]
all_boxes[j][global_idx] = cls_dets
# Limit to max_per_image detections *over all classes*
if args.max_per_image > 0:
image_scores = np.hstack([all_boxes[j][global_idx][:, -1]
for j in xrange(1, num_classes)])
if len(image_scores) > args.max_per_image:
image_thresh = np.sort(image_scores)[-args.max_per_image]
for j in xrange(1, num_classes):
keep = np.where(all_boxes[j][global_idx][:, -1] >= image_thresh)[0]
all_boxes[j][global_idx] = all_boxes[j][global_idx][keep, :]
det_file = osp.join(args.output_dir, 'detections.pkl')
if not osp.isdir(args.output_dir):
os.makedirs(args.output_dir)
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
| mit | Python | |
a70abcdd95612fe3df4fc3dd9c4ae8151add5a28 | add an example file | vlas-sokolov/pyscatter-3d | example.py | example.py | import numpy as np
from pyscatter3d import pyscatter3d
X0,Y0 = np.meshgrid(np.linspace(-3,3,50), np.linspace(-3,3,50))
D = np.sqrt(X0**2+Y0**2) # radial distance
Z0 = np.sinc(D)
_ = np.random.randn(3, 1e3)
X1,Y1,Z1 = _/np.linalg.norm(_, axis=0)
np.savetxt('sinc.csv', np.array([arr.flatten() for arr in [X0,Y0,Z0,1/D]]).T,
delimiter=',', header='X,Y,Z,inv_dist', comments='')
np.savetxt('sphere.csv', np.array([arr.flatten() for arr in [X1,Y1,Z1]]).T,
delimiter=',', header='X,Y,Z', comments='')
csv_dir = '/home/vsokolov/Projects/g35.39/tables/'
# dataset names and their corresponding filenames
datasets = {'sinc' : 'sinc.csv',
'sphere': 'sphere.csv'}
# what column names to use as X/Y/Z values
which_x, which_y, which_z = 'X', 'Y', 'Z'
# which_s controls the marker size
which_s = 'inv_dist'
# output file name (without an extension)
outfile = 'example'
default_size = {'matplotlib': 30,
'plotly' : 10 }
line2text = {'sinc': 'sinc(x)', 'sphere': 'sphere' }
line2color = {'sinc': '#e41a1c', 'sphere': '#377eb8'}
# NOTE: the 'star' symbol doesn't work with plotly scatter3d :C
# see here: https://github.com/plotly/plotly.py/issues/454
# supported marker symbols in plotly scatter-3d:
# (enumerated: "circle" | "circle-open" | "square" | "square-open" |
# "diamond" | "diamond-open" | "cross" | "x" )
line2symbol = {'matplotlib': {'sphere': 'd', 'sinc': 'o' },
'plotly' : {'sphere': 'diamond', 'sinc': 'circle'} }
pyscatter3d(datasets, line2text, line2color, line2symbol,
which_x, which_y, which_z, which_s, default_size,
outfile=outfile, backend='both')
| mit | Python | |
28e6c21e2a8bc78a6f4292eef2daec4b70d0b887 | Add support for Pocket | foauth/foauth.org,foauth/foauth.org,foauth/foauth.org | services/pocket.py | services/pocket.py | from werkzeug.urls import url_decode
import requests
import foauth.providers
class Pocket(foauth.providers.OAuth2):
# General info about the provider
provider_url = 'http://getpocket.com/'
docs_url = 'http://getpocket.com/developer/docs/overview'
category = 'News'
# URLs to interact with the API
request_token_url = 'https://getpocket.com/v3/oauth/request'
authorize_url = 'https://getpocket.com/auth/authorize'
access_token_url = 'https://getpocket.com/v3/oauth/authorize'
api_domain = 'getpocket.com'
available_permissions = [
(None, 'access your saved articles'),
]
supports_state = False
def get_authorize_params(self, redirect_uri, scopes):
params = super(Pocket, self).get_authorize_params(redirect_uri, scopes)
r = requests.post(self.request_token_url, data={
'consumer_key': params['client_id'],
'redirect_uri': redirect_uri,
})
data = url_decode(r.content)
redirect_uri = '%s&code=%s' % (params['redirect_uri'], data['code'])
return {
'request_token': data['code'],
'redirect_uri': redirect_uri,
}
def get_access_token_response(self, redirect_uri, data):
return requests.post(self.get_access_token_url(), {
'consumer_key': self.client_id,
'code': data['code'],
'redirect_uri': redirect_uri
})
def parse_token(self, content):
data = url_decode(content)
data['service_user_id'] = data['username']
return data
def bearer_type(self, token, r):
r.prepare_url(r.url, {'consumer_key': self.client_id, 'access_token': token})
return r
| bsd-3-clause | Python | |
e42b22dc0a71fb5c7572ca69c63ab6a7b0ba8479 | add error handling for celery tasks | helfertool/helfertool,helfertool/helfertool,helfertool/helfertool,helfertool/helfertool | src/helfertool/tasks.py | src/helfertool/tasks.py | from __future__ import absolute_import
from celery.signals import task_failure
from django.conf import settings
from django.core.mail import mail_admins
from django.views.debug import ExceptionReporter
@task_failure.connect
def celery_error_handler(task_id, exception, traceback, einfo, *args, **kwargs):
if settings.DEBUG:
return
mail_subject = "Task exception - {}".format(exception)
mail_subject = mail_subject.replace("\n", " ")[:250]
reporter = ExceptionReporter(None, einfo.type, exception, traceback)
mail_text = reporter.get_traceback_text()
mail_admins(mail_subject, mail_text)
| agpl-3.0 | Python | |
73f75483156056b61f3b6bec4fe2f09522c2c34a | Add tests for mixin order | AleksNeStu/ggrc-core,j0gurt/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,j0gurt/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,kr41/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,NejcZupec/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core | test/integration/ggrc/models/test_eager_query.py | test/integration/ggrc/models/test_eager_query.py | # Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Tests for making sure eager queries are working on all mixins."""
from ggrc.models import all_models
from ggrc.models import mixins
from integration.ggrc import TestCase
class TestAllModels(TestCase):
"""Test basic model structure for all models"""
def test_all_model_mro(self):
"""Test the correct mixin order for eager queries.
This test checks that all models that have an eager query, have the last
mixin in the mro Identifiable. If there are any other mixins with eager
query after it, the eager query on those is ignored and that is an error.
"""
errors = set()
for model in all_models.all_models:
eager = [mixin for mixin in model.mro()
if hasattr(mixin, "eager_query")]
if eager:
try:
self.assertEqual(
eager[-1], mixins.Identifiable,
"Model {}, has wrong mixin order. The last mixin with "
"eager_query is '{}' instead of 'Identifiable'.".format(
model.__name__, eager[-1].__name__),
)
except AssertionError as error:
errors.add(error)
self.assertEqual(set(), errors)
| apache-2.0 | Python | |
6ed99163b10209566a0575a9a67d1ab2ad552fd9 | Add test for committee subscriptions page | Code4SA/pmg-cms-2,Code4SA/pmg-cms-2,Code4SA/pmg-cms-2 | tests/views/test_committee_subscriptions_page.py | tests/views/test_committee_subscriptions_page.py | import datetime
from tests import PMGLiveServerTestCase
from tests.fixtures import dbfixture, HouseData, CommitteeData
THIS_YEAR = datetime.datetime.today().year
class TestCommitteeSubscriptionsPage(PMGLiveServerTestCase):
def test_committee_subscriptions_page(self):
"""
Test committee subscriptions page (/committee-subscriptions)
"""
self.make_request("/committee-subscriptions", follow_redirects=True)
self.assertIn(
"Access to meeting reports for premium committees from before {} is freely accessible to everyone.".format(
THIS_YEAR - 1
),
self.html,
)
| apache-2.0 | Python | |
5e9b6bc60f0f81db3ed451eb89c23b77888e1167 | Update a comment | grzes/djangae,kirberich/djangae,kirberich/djangae,asendecka/djangae,asendecka/djangae,potatolondon/djangae,chargrizzle/djangae,asendecka/djangae,armirusco/djangae,chargrizzle/djangae,armirusco/djangae,chargrizzle/djangae,grzes/djangae,armirusco/djangae,grzes/djangae,potatolondon/djangae,kirberich/djangae | djangae/db/backends/appengine/expressions.py | djangae/db/backends/appengine/expressions.py | from django.db.models.expressions import F
from djangae.db.utils import get_prepared_db_value
CONNECTORS = {
F.ADD: lambda l, r: l + r,
F.SUB: lambda l, r: l - r,
F.MUL: lambda l, r: l * r,
F.DIV: lambda l, r: l / r,
}
def evaluate_expression(expression, instance, connection):
""" A limited evaluator for Django's F expressions. This are evaluated within
the get/put transaction in _update_entity so these will happen atomically
"""
if isinstance(expression, (basestring, int, float)):
return expression
if hasattr(expression, 'name'):
field = instance._meta.get_field(expression.name)
return get_prepared_db_value(connection, instance._original, field)
if hasattr(expression, 'value'):
return expression.value
if hasattr(expression, 'connector') and expression.connector in CONNECTORS:
if hasattr(expression, 'children'):
lhs, rhs = expression.children
else:
lhs, rhs = expression.lhs, expression.rhs
return CONNECTORS[expression.connector](
evaluate_expression(lhs, instance, connection),
evaluate_expression(rhs, instance, connection),
)
raise NotImplementedError("Support for expression %r isn't implemented", expression)
| from django.db.models.expressions import F
from djangae.db.utils import get_prepared_db_value
CONNECTORS = {
F.ADD: lambda l, r: l + r,
F.SUB: lambda l, r: l - r,
F.MUL: lambda l, r: l * r,
F.DIV: lambda l, r: l / r,
}
def evaluate_expression(expression, instance, connection):
""" A limited evaluator for Django's F expressions. Although they're evaluated
before the database call, so they don't provide the race condition protection,
but neither does our update() implementation so we provide this for convenience.
"""
if isinstance(expression, (basestring, int, float)):
return expression
if hasattr(expression, 'name'):
field = instance._meta.get_field(expression.name)
return get_prepared_db_value(connection, instance._original, field)
if hasattr(expression, 'value'):
return expression.value
if hasattr(expression, 'connector') and expression.connector in CONNECTORS:
if hasattr(expression, 'children'):
lhs, rhs = expression.children
else:
lhs, rhs = expression.lhs, expression.rhs
return CONNECTORS[expression.connector](
evaluate_expression(lhs, instance, connection),
evaluate_expression(rhs, instance, connection),
)
raise NotImplementedError("Support for expression %r isn't implemented", expression)
| bsd-3-clause | Python |
37e59cbd7e8b4901644adcb73a7f491247fdea69 | Add py-pyperclip package (#12375) | LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack | var/spack/repos/builtin/packages/py-pyperclip/package.py | var/spack/repos/builtin/packages/py-pyperclip/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPyperclip(PythonPackage):
"""A cross-platform clipboard module for Python."""
homepage = "https://github.com/asweigart/pyperclip"
url = "https://pypi.io/packages/source/p/pyperclip/pyperclip-1.7.0.tar.gz"
version('1.7.0', sha256='979325468ccf682104d5dcaf753f869868100631301d3e72f47babdea5700d1c')
depends_on('py-setuptools', type='build')
| lgpl-2.1 | Python | |
a773f428512ecc9bc4f81b633aaf3dfa8faa10ed | Create client.py | filip-marinic/LGS_SimSuite | client.py | client.py | #!/usr/bin/env python
#LGS SimSuite Client
#Copyright (c) 2015 Filip Marinic
from time import gmtime, strftime, time, sleep
import socket
import math
import sys
import paramiko
#server parameters
server_username = "Pi"
server_password = "********"
server_path = "python /home/pi/server.py" #if server script is compiled then: server_path = "/home/pi/server"
BUFFER_SIZE = 1300
frame_start = 1000000000
legal_ip = False
dest_port = datarate = None
try:
while legal_ip == False:
dest_ip = raw_input("\nEnter TM server IP address: ")
if dest_ip == '' : dest_ip = 'abc'
else : dest_ip = str(dest_ip)
try:
socket.inet_pton(socket.AF_INET,dest_ip)
legal_ip = True
except socket.error:
legal_ip = False
print "Invalid IP address."
while dest_port < 1025:
dest_port = raw_input("\nEnter desired TM port (1024-65535): ")
if dest_port == '' : dest_port = 0
else : dest_port = int(dest_port)
while not datarate:
datarate = raw_input("\nEnter desired TM datarate in kbps (or 0 for max speed): ")
try :
datarate = int(datarate)
except :
datarate = None
print "\nPreparing server... "
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(dest_ip, username=server_username, password=server_password)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command("nohup "+str(server_path)+" "+str(dest_port)+" "+str(datarate))#
sleep(3)
print "Server ready!"
while True:
frames_requested = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 240000)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.connect((dest_ip, dest_port))
except:
print "Cannot connect to server."
sys.exit(1)
while not frames_requested:
frames_requested = raw_input("\nEnter how many TM frames shall server send (or enter 0 to exit): ")
frames_requested = int(frames_requested)
if frames_requested > 0 : pass
else :
terminate = "terminated"
s.send(terminate)
s.shutdown(2)
s.close()
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command("killall server")
sys.exit(1)
total_frames = 0
first_frame_time = 0
MESSAGE = str(frame_start + frames_requested)
s.send(MESSAGE)
while True:
packet = s.recv(BUFFER_SIZE, socket.MSG_WAITALL)
if packet :
current_frame = packet[:30]
timestamp = strftime("%d/%m/%Y-%H:%M:%S", gmtime())
total_frames += 1
print current_frame, "--> Receive time:", timestamp
if total_frames == 1 : first_frame_time = time()
else :
sleep (0.1)
if total_frames == frames_requested :
print '\nTotal TM frames received: ', total_frames
speed_f = math.ceil((total_frames/(time()-first_frame_time))*100)/100
speed_b = math.ceil((((total_frames*1300*8)/(time()-first_frame_time))/1000)*100)/100
print 'Average TM framerate:', speed_f, 'frames/sec'
print 'Average datarate:', speed_b, 'kbps'
if datarate > 0 : print '(desired datarate:', datarate, 'kbps)'
else : print '(desired datarate: max speed)'
total_frames = 0
terminate = "terminated"
s.send(terminate)
break
except (KeyboardInterrupt):
s.close()
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command("killall server")
sys.exit(1)
| mit | Python | |
6427c55bbd51abaef6847e4f2af239d5977d0048 | Create client.py | Laserbear/PypeBomb | client.py | client.py | import socket
target_host = "0.0.0.0"
target_port = 9999
if(len(sys.argv) > 1):
try:
target_ip = sys.argv[1]
target_port = int(sys.argv[2])
except Exception:
pass #lazy
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((target_host, target_port))
'''
client.send("ls")
response = client.recv(4096)
print "Output: " + response.rstrip("\n")
'''
while True:
lmao = raw_input("Enter Command:\n")
client.send(lmao)
response = client.recv(4096)
print "Output: " + response.rstrip("\n")
| mit | Python | |
04d6765c14de3d6d5eb36d9ad268012f9e7625bc | add test for search items #30 | rosariomgomez/tradyfit,rosariomgomez/tradyfit,rosariomgomez/tradyfit,rosariomgomez/tradyfit,rosariomgomez/tradyfit | vagrant/tradyfit/tests/functional/test_search.py | vagrant/tradyfit/tests/functional/test_search.py | # -*- coding: utf-8 -*-
import re
from bs4 import BeautifulSoup
from helper import SeleniumTestCase
import page
class SearchTestCase(SeleniumTestCase):
@classmethod
def setUpClass(cls):
# connect to webdriver, create app, launch server in thread
super(SearchTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
# stop the server, destroy db and remove app context
super(SearchTestCase, cls).tearDownClass()
def setUp(self):
if not self.client:
self.skipTest('Web browser not available')
def tearDown(self):
pass
def test_search_no_login(self):
'''Verify a not logged in user see items on a search ordered
by time creation
1. Go to home page
2. Make a search with the word 't-shirt'
3. Verify you are redirected to the search results page
4. Verify items are displayed by time creation (last first)
'''
self.client.get('http://localhost:5000')
# home page object
home_page = page.HomePage(self.client)
self.assertTrue(home_page.is_title_matches)
# make a search
home_page.make_search('t-shirt')
# assert the items appears in the page ordered by desc time creation
soup = BeautifulSoup(self.client.page_source)
items = soup.find_all("div", id=re.compile("^item-"))
self.assertTrue(len(items) == 2)
self.assertTrue('Lakers t-shirt' in str(items[0]))
def test_search_login(self):
'''Verify a logged in user see items on a search ordered
by nearby location
1. Go to home page
2. Click on Login link
3. Fill login form and submit
4. Make a search with the word 'bike'
5. Verify you are redirected to the search results page
6. Verify items are displayed by time creation (last first)
'''
self.client.get('http://localhost:5000')
# home page object
home_page = page.HomePage(self.client)
self.assertTrue(home_page.is_title_matches)
# navigate to login page
home_page.go_to_login()
login_page = page.LoginPage(self.client)
self.assertTrue(login_page.is_title_matches)
# login user US
login_page.login(self.app.config['FB_TEST_EMAIL'],
self.app.config['FB_TEST_PWD'])
# make a search
home_page.make_search('t-shirt')
search_page = page.SearchPage(self.client)
self.assertTrue(search_page.is_title_matches)
# assert the items appears in the page ordered by user nearby
soup = BeautifulSoup(self.client.page_source)
items = soup.find_all("div", id=re.compile("^item-"))
self.assertTrue(len(items) == 2)
self.assertTrue('Soccer t-shirt' in str(items[0]))
#log out user
search_page.go_to_log_out()
| mit | Python | |
ad3744acef6d855fcc074c7412c3e224d5a8f205 | add missing file | telamonian/saga-python,luis-rr/saga-python,mehdisadeghi/saga-python,telamonian/saga-python,luis-rr/saga-python,mehdisadeghi/saga-python,luis-rr/saga-python | saga/utils/pty_exceptions.py | saga/utils/pty_exceptions.py |
import saga.exceptions as se
# ----------------------------------------------------------------
#
def translate_exception (e, msg=None) :
"""
In many cases, we should be able to roughly infer the exception cause
from the error message -- this is centrally done in this method. If
possible, it will return a new exception with a more concise error
message and appropriate exception type.
"""
if not issubclass (e.__class__, se.SagaException) :
# we do not touch non-saga exceptions
return e
if not issubclass (e.__class__, se.NoSuccess) :
# this seems to have a specific cause already, leave it alone
return e
cmsg = e._plain_message
if msg :
cmsg = "%s (%s)" % (cmsg, msg)
lmsg = cmsg.lower ()
if 'could not resolve hostname' in lmsg :
e = se.BadParameter (cmsg)
elif 'connection timed out' in lmsg :
e = se.BadParameter (cmsg)
elif 'auth' in lmsg :
e = se.AuthorizationFailed (cmsg)
elif 'pass' in lmsg :
e = se.AuthenticationFailed (cmsg)
elif 'ssh_exchange_identification' in lmsg :
e = se.AuthenticationFailed ("too frequent login attempts, or sshd misconfiguration: %s" % cmsg)
elif 'denied' in lmsg :
e = se.PermissionDenied (cmsg)
elif 'shared connection' in lmsg :
e = se.NoSuccess ("Insufficient system resources: %s" % cmsg)
elif 'pty allocation' in lmsg :
e = se.NoSuccess ("Insufficient system resources: %s" % cmsg)
elif 'Connection to master closed' in lmsg :
e = se.NoSuccess ("Connection failed (insufficient system resources?): %s" % cmsg)
return e
| mit | Python | |
df7a5c4aa4f5898de3c70cef17c3c5031f7e05a6 | Add support for executing scrapy using -m option of python | finfish/scrapy,pawelmhm/scrapy,dangra/scrapy,Ryezhang/scrapy,kmike/scrapy,ArturGaspar/scrapy,elacuesta/scrapy,pablohoffman/scrapy,finfish/scrapy,Parlin-Galanodel/scrapy,wujuguang/scrapy,eLRuLL/scrapy,ArturGaspar/scrapy,elacuesta/scrapy,umrashrf/scrapy,finfish/scrapy,starrify/scrapy,dangra/scrapy,scrapy/scrapy,wujuguang/scrapy,pawelmhm/scrapy,Parlin-Galanodel/scrapy,eLRuLL/scrapy,wujuguang/scrapy,eLRuLL/scrapy,umrashrf/scrapy,kmike/scrapy,elacuesta/scrapy,kmike/scrapy,Ryezhang/scrapy,ArturGaspar/scrapy,scrapy/scrapy,pablohoffman/scrapy,Ryezhang/scrapy,pawelmhm/scrapy,ssteo/scrapy,ssteo/scrapy,ssteo/scrapy,umrashrf/scrapy,scrapy/scrapy,dangra/scrapy,Parlin-Galanodel/scrapy,pablohoffman/scrapy,starrify/scrapy,starrify/scrapy | scrapy/__main__.py | scrapy/__main__.py | from scrapy.cmdline import execute
if __name__ == '__main__':
execute()
| bsd-3-clause | Python | |
d94123ba898032e7837aa8a2fd0fe585ed81e2d5 | Add back a filesystem backend for testing and development | ostwald/scrapi,CenterForOpenScience/scrapi,felliott/scrapi,fabianvf/scrapi,icereval/scrapi,erinspace/scrapi,felliott/scrapi,mehanig/scrapi,CenterForOpenScience/scrapi,mehanig/scrapi,fabianvf/scrapi,alexgarciac/scrapi,jeffreyliu3230/scrapi,erinspace/scrapi | scrapi/processing/storage.py | scrapi/processing/storage.py | import os
import json
from scrapi.processing.base import BaseProcessor
class StorageProcessor(BaseProcessor):
NAME = 'storage'
def process_raw(self, raw):
filename = 'archive/{}/{}/raw.{}'.format(raw['source'], raw['docID'], raw['filetype'])
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
f.write(json.dumps(raw.attributes, indent=4))
def process_normalized(self, raw, normalized):
filename = 'archive/{}/{}/normalized.json'.format(raw['source'], raw['docID'], raw['filetype'])
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
f.write(json.dumps(normalized.attributes, indent=4))
| apache-2.0 | Python | |
e6699947ebde4d51b1bd8b6016879d4917d7a648 | implement initial base exception class and httperror exception class | gdmachado/scup-python | scup/exceptions.py | scup/exceptions.py | class ScupPythonError(Exception):
""" Base class for exceptions raised by scup-python. """
class ScupError(ScupPythonError):
""" Exception for Scup errors. """
def __init__(self, message=None, code=None, error_data=None):
self.message = message
self.code = code
self.error_data = error_data
if self.code:
message = '[{}] {}'.format(self.code, self.message)
super(ScupError, self).__init__(message)
class HTTPError(ScupPythonError):
""" Exception for transport errors. """ | mit | Python | |
ded34849d9eb2feb51b9ad7f31e210db3a28c7e1 | change case | alephdata/aleph,smmbllsm/aleph,OpenGazettes/aleph,smmbllsm/aleph,OpenGazettes/aleph,gazeti/aleph,gazeti/aleph,alephdata/aleph,pudo/aleph,OpenGazettes/aleph,pudo/aleph,alephdata/aleph,alephdata/aleph,gazeti/aleph,pudo/aleph,gazeti/aleph,OpenGazettes/aleph,smmbllsm/aleph,alephdata/aleph | aleph/assets.py | aleph/assets.py | import os
from flask.ext.assets import Bundle
from aleph.core import assets, app
deps_assets = Bundle(
'vendor/jquery/dist/jquery.js',
'vendor/angular/angular.js',
'vendor/ng-debounce/angular-debounce.js',
'vendor/angular-route/angular-route.js',
'vendor/angular-animate/angular-animate.js',
'vendor/angular-loading-bar/build/loading-bar.js',
'vendor/angular-truncate/src/truncate.js',
'vendor/angular-bootstrap/ui-bootstrap-tpls.js',
'vendor/ngInfiniteScroll/build/ng-infinite-scroll.js',
filters='uglifyjs',
output='assets/deps.js'
)
js_files = []
for (root, dirs, files) in os.walk(os.path.join(app.static_folder, 'js')):
for file_name in files:
file_path = os.path.relpath(os.path.join(root, file_name),
app.static_folder)
js_files.append(file_path)
app_assets = Bundle(*js_files,
filters='uglifyjs',
output='assets/app.js')
css_assets = Bundle(
'style/aleph.scss',
'vendor/angular-loading-bar/build/loading-bar.css',
'style/animations.css',
filters='scss',
output='assets/style.css'
)
assets.register('deps', deps_assets)
assets.register('app', app_assets)
assets.register('css', css_assets)
| import os
from flask.ext.assets import Bundle
from aleph.core import assets, app
deps_assets = Bundle(
'vendor/jquery/dist/jquery.js',
'vendor/angular/angular.js',
'vendor/ng-debounce/angular-debounce.js',
'vendor/angular-route/angular-route.js',
'vendor/angular-animate/angular-animate.js',
'vendor/angular-loading-bar/build/loading-bar.js',
'vendor/angular-truncate/src/truncate.js',
'vendor/angular-bootstrap/ui-bootstrap-tpls.js',
'vendor/nginfinitescroll/build/ng-infinite-scroll.js',
filters='uglifyjs',
output='assets/deps.js'
)
js_files = []
for (root, dirs, files) in os.walk(os.path.join(app.static_folder, 'js')):
for file_name in files:
file_path = os.path.relpath(os.path.join(root, file_name),
app.static_folder)
js_files.append(file_path)
app_assets = Bundle(*js_files,
filters='uglifyjs',
output='assets/app.js')
css_assets = Bundle(
'style/aleph.scss',
'vendor/angular-loading-bar/build/loading-bar.css',
'style/animations.css',
filters='scss',
output='assets/style.css'
)
assets.register('deps', deps_assets)
assets.register('app', app_assets)
assets.register('css', css_assets)
| mit | Python |
b2d60408688cc1bf27842d8744d1048a64b00e94 | Add script to get public registrations for staff members | KAsante95/osf.io,jolene-esposito/osf.io,HarryRybacki/osf.io,jinluyuan/osf.io,leb2dg/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,amyshi188/osf.io,caseyrygt/osf.io,haoyuchen1992/osf.io,jinluyuan/osf.io,laurenrevere/osf.io,SSJohns/osf.io,lyndsysimon/osf.io,HalcyonChimera/osf.io,amyshi188/osf.io,cslzchen/osf.io,zamattiac/osf.io,mluo613/osf.io,asanfilippo7/osf.io,jolene-esposito/osf.io,himanshuo/osf.io,DanielSBrown/osf.io,zamattiac/osf.io,monikagrabowska/osf.io,arpitar/osf.io,icereval/osf.io,emetsger/osf.io,chrisseto/osf.io,GaryKriebel/osf.io,doublebits/osf.io,MerlinZhang/osf.io,mluke93/osf.io,himanshuo/osf.io,arpitar/osf.io,lyndsysimon/osf.io,barbour-em/osf.io,jeffreyliu3230/osf.io,jeffreyliu3230/osf.io,samanehsan/osf.io,jnayak1/osf.io,TomBaxter/osf.io,alexschiller/osf.io,amyshi188/osf.io,brandonPurvis/osf.io,danielneis/osf.io,dplorimer/osf,caseyrygt/osf.io,jmcarp/osf.io,barbour-em/osf.io,monikagrabowska/osf.io,doublebits/osf.io,GageGaskins/osf.io,TomBaxter/osf.io,cwisecarver/osf.io,brandonPurvis/osf.io,jmcarp/osf.io,GageGaskins/osf.io,Johnetordoff/osf.io,kch8qx/osf.io,billyhunt/osf.io,billyhunt/osf.io,wearpants/osf.io,chennan47/osf.io,aaxelb/osf.io,bdyetton/prettychart,zachjanicki/osf.io,lamdnhan/osf.io,abought/osf.io,kwierman/osf.io,cwisecarver/osf.io,jeffreyliu3230/osf.io,abought/osf.io,samanehsan/osf.io,wearpants/osf.io,zkraime/osf.io,caneruguz/osf.io,rdhyee/osf.io,CenterForOpenScience/osf.io,reinaH/osf.io,acshi/osf.io,acshi/osf.io,danielneis/osf.io,felliott/osf.io,lamdnhan/osf.io,adlius/osf.io,SSJohns/osf.io,zkraime/osf.io,chennan47/osf.io,asanfilippo7/osf.io,chrisseto/osf.io,HarryRybacki/osf.io,ZobairAlijan/osf.io,GageGaskins/osf.io,petermalcolm/osf.io,GaryKriebel/osf.io,njantrania/osf.io,mluo613/osf.io,reinaH/osf.io,ticklemepierce/osf.io,mluo613/osf.io,GageGaskins/osf.io,crcresearch/osf.io,jmcarp/osf.io,mattclark/osf.io,GaryKriebel/osf.io,zachjanicki/osf.io,caneruguz/osf.io,ticklemepierce/osf.io,lamdnhan/osf.io,kwierman/osf.io,SSJohns/osf.io,amyshi188/osf.io,dplorimer/osf,caseyrygt/osf.io,sbt9uc/osf.io,brandonPurvis/osf.io,arpitar/osf.io,monikagrabowska/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,icereval/osf.io,caseyrygt/osf.io,KAsante95/osf.io,jnayak1/osf.io,saradbowman/osf.io,njantrania/osf.io,jinluyuan/osf.io,sloria/osf.io,Ghalko/osf.io,petermalcolm/osf.io,monikagrabowska/osf.io,mluo613/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,RomanZWang/osf.io,TomHeatwole/osf.io,zkraime/osf.io,jolene-esposito/osf.io,samchrisinger/osf.io,samanehsan/osf.io,haoyuchen1992/osf.io,aaxelb/osf.io,RomanZWang/osf.io,mluke93/osf.io,binoculars/osf.io,erinspace/osf.io,himanshuo/osf.io,mluo613/osf.io,caneruguz/osf.io,crcresearch/osf.io,cslzchen/osf.io,DanielSBrown/osf.io,reinaH/osf.io,adlius/osf.io,hmoco/osf.io,acshi/osf.io,zamattiac/osf.io,pattisdr/osf.io,mfraezz/osf.io,jolene-esposito/osf.io,njantrania/osf.io,emetsger/osf.io,CenterForOpenScience/osf.io,Ghalko/osf.io,alexschiller/osf.io,KAsante95/osf.io,KAsante95/osf.io,RomanZWang/osf.io,arpitar/osf.io,chrisseto/osf.io,revanthkolli/osf.io,zachjanicki/osf.io,HalcyonChimera/osf.io,baylee-d/osf.io,lamdnhan/osf.io,acshi/osf.io,TomBaxter/osf.io,mfraezz/osf.io,kwierman/osf.io,mfraezz/osf.io,lyndsysimon/osf.io,kch8qx/osf.io,TomHeatwole/osf.io,lyndsysimon/osf.io,Ghalko/osf.io,njantrania/osf.io,pattisdr/osf.io,ZobairAlijan/osf.io,SSJohns/osf.io,danielneis/osf.io,samchrisinger/osf.io,MerlinZhang/osf.io,rdhyee/osf.io,TomHeatwole/osf.io,himanshuo/osf.io,crcresearch/osf.io,HarryRybacki/osf.io,HalcyonChimera/osf.io,revanthkolli/osf.io,sloria/osf.io,caseyrollins/osf.io,alexschiller/osf.io,haoyuchen1992/osf.io,jmcarp/osf.io,chennan47/osf.io,KAsante95/osf.io,laurenrevere/osf.io,felliott/osf.io,Johnetordoff/osf.io,felliott/osf.io,brandonPurvis/osf.io,ticklemepierce/osf.io,icereval/osf.io,binoculars/osf.io,bdyetton/prettychart,fabianvf/osf.io,zachjanicki/osf.io,revanthkolli/osf.io,mattclark/osf.io,ckc6cz/osf.io,jinluyuan/osf.io,bdyetton/prettychart,emetsger/osf.io,abought/osf.io,DanielSBrown/osf.io,alexschiller/osf.io,adlius/osf.io,CenterForOpenScience/osf.io,haoyuchen1992/osf.io,jnayak1/osf.io,barbour-em/osf.io,alexschiller/osf.io,samanehsan/osf.io,mattclark/osf.io,cslzchen/osf.io,cosenal/osf.io,dplorimer/osf,baylee-d/osf.io,Nesiehr/osf.io,doublebits/osf.io,rdhyee/osf.io,cwisecarver/osf.io,sbt9uc/osf.io,billyhunt/osf.io,ckc6cz/osf.io,hmoco/osf.io,MerlinZhang/osf.io,bdyetton/prettychart,mfraezz/osf.io,aaxelb/osf.io,mluke93/osf.io,fabianvf/osf.io,ZobairAlijan/osf.io,Johnetordoff/osf.io,ckc6cz/osf.io,sloria/osf.io,GageGaskins/osf.io,RomanZWang/osf.io,kch8qx/osf.io,ckc6cz/osf.io,adlius/osf.io,acshi/osf.io,Nesiehr/osf.io,pattisdr/osf.io,erinspace/osf.io,hmoco/osf.io,aaxelb/osf.io,barbour-em/osf.io,caseyrollins/osf.io,fabianvf/osf.io,zkraime/osf.io,monikagrabowska/osf.io,leb2dg/osf.io,leb2dg/osf.io,doublebits/osf.io,caneruguz/osf.io,sbt9uc/osf.io,leb2dg/osf.io,RomanZWang/osf.io,binoculars/osf.io,billyhunt/osf.io,GaryKriebel/osf.io,jeffreyliu3230/osf.io,laurenrevere/osf.io,cosenal/osf.io,cldershem/osf.io,samchrisinger/osf.io,cwisecarver/osf.io,revanthkolli/osf.io,reinaH/osf.io,kwierman/osf.io,cldershem/osf.io,mluke93/osf.io,rdhyee/osf.io,asanfilippo7/osf.io,ZobairAlijan/osf.io,chrisseto/osf.io,samchrisinger/osf.io,saradbowman/osf.io,felliott/osf.io,caseyrollins/osf.io,cslzchen/osf.io,fabianvf/osf.io,ticklemepierce/osf.io,erinspace/osf.io,sbt9uc/osf.io,baylee-d/osf.io,brandonPurvis/osf.io,Nesiehr/osf.io,jnayak1/osf.io,petermalcolm/osf.io,cosenal/osf.io,DanielSBrown/osf.io,asanfilippo7/osf.io,cosenal/osf.io,emetsger/osf.io,kch8qx/osf.io,MerlinZhang/osf.io,wearpants/osf.io,cldershem/osf.io,petermalcolm/osf.io,brianjgeiger/osf.io,TomHeatwole/osf.io,wearpants/osf.io,zamattiac/osf.io,abought/osf.io,kch8qx/osf.io,cldershem/osf.io,dplorimer/osf,hmoco/osf.io,billyhunt/osf.io,doublebits/osf.io,Ghalko/osf.io,HarryRybacki/osf.io,danielneis/osf.io,Nesiehr/osf.io | scripts/staff_public_regs.py | scripts/staff_public_regs.py | # -*- coding: utf-8 -*-
"""Get public registrations for staff members.
python -m scripts.staff_public_regs
"""
from collections import defaultdict
import logging
from modularodm import Q
from website.models import Node, User
from website.app import init_app
logger = logging.getLogger('staff_public_regs')
STAFF_GUIDS = [
'jk5cv', # Jeff
'cdi38', # Brian
'edb8y', # Johanna
'hsey5', # Courtney
'5hdme', # Melissa
]
def main():
init_app(set_backends=True, routes=False, mfr=False)
staff_registrations = defaultdict(list)
users = [User.load(each) for each in STAFF_GUIDS]
for registration in Node.find(Q('is_registration', 'eq', True) & Q('is_public', 'eq', True)):
for user in users:
if registration in user.node__contributed:
staff_registrations[user._id].append(registration)
for uid in staff_registrations:
user = User.load(uid)
user_regs = staff_registrations[uid]
logger.info('{} ({}) on {} Public Registrations:'.format(
user.fullname,
user._id,
len(user_regs))
)
for registration in user_regs:
logger.info('\t{} ({}): {}'.format(registration.title,
registration._id,
registration.absolute_url)
)
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
b77f90c4372161243fcabb3eddbe4d35b4792bfc | Create jupyter_notebook_config.py | alexisylchan/introdl,alexisylchan/introdl | jupyter_notebook_config.py | jupyter_notebook_config.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from IPython.lib import passwd
c.NotebookApp.ip = '*'
c.NotebookApp.port = 8888
c.NotebookApp.open_browser = False
c.MultiKernelManager.default_kernel_name = 'python2'
# sets a password if PASSWORD is set in the environment
if 'PASSWORD' in os.environ:
c.NotebookApp.password = passwd(os.environ['PASSWORD'])
del os.environ['PASSWORD']
| mit | Python | |
d4a8c33d50a7c130d6203ef6332a241392516ba2 | Create database.py | raj61/automailer | database.py | database.py | #!/usr/bin/python
import MySQLdb
def getemail():
alist=[]
# Open database connection Replace username, password with your username password and dbname with the name of database
db = MySQLdb.connect("localhost","username","password","dbname" )
# prepare a cursor object using cursor() method
cursor = db.cursor()
#replace emails with your table name containing a column 'email' holding the email addresses
sql = "SELECT * FROM emails"
try:
# Execute the SQL command
cursor.execute(sql)
# Fetch all the rows in a list of lists.
results = cursor.fetchall()
for row in results:
alist.append(row['email'])
except:
print "Error: unable to fecth data"
# disconnect from server
db.close()
return alist
| mit | Python | |
5eb474a5ff3110ef2b6955bd98bbe6bf16f7b0ab | add RNN policy with batch version (not working yet) | LxMLS/lxmls-toolkit,LxMLS/lxmls-toolkit,LxMLS/lxmls-toolkit | labs/notebooks/reinforcement_learning/RL.py | labs/notebooks/reinforcement_learning/RL.py | from IPython import embed
# Load Part-of-Speech data
from lxmls.readers.pos_corpus import PostagCorpusData
data = PostagCorpusData()
print(data.input_size)
print(data.output_size)
print(data.maxL)
# Alterbative native CuDNN native implementation of RNNs
from lxmls.deep_learning.pytorch_models.rnn import PolicyRNN
model = PolicyRNN(
input_size=data.input_size,
embedding_size=50,
hidden_size=20,
output_size=data.output_size,
learning_rate=0.1,
gamma=0.9,
RL=True,
maxL=data.maxL
)
num_epochs = 10
import numpy as np
import time
# Get batch iterators for train and test
train_batches = data.sample('train', batch_size=10)
dev_set = data.sample('dev', batch_size=10)
test_set = data.sample('test', batch_size=10)
# # Epoch loop
# start = time.time()
# for epoch in range(num_epochs):
#
# # Batch loop
# for batch in train_batches:
# model.update(input=batch['input'], output=batch['output'])
#
# # Evaluation dev
# is_hit = []
# for batch in dev_set:
# is_hit.extend(model.predict(input=batch['input']) == batch['output'])
# accuracy = 100 * np.mean(is_hit)
#
# # Inform user
# print("Epoch %d: dev accuracy %2.2f %%" % (epoch + 1, accuracy))
#
# print("Training took %2.2f seconds per epoch" % ((time.time() - start) / num_epochs))
#
# # Evaluation test
# is_hit = []
# for batch in test_set:
# is_hit.extend(model.predict(input=batch['input']) == batch['output'])
# accuracy = 100 * np.mean(is_hit)
#
# # Inform user
# print("Test accuracy %2.2f %%" % accuracy)
#
# # Example of sampling
# print(train_batches[3]['input'])
# samples, log_probs = model._sample(input=train_batches[3]['input'])
# samples, log_probs
print("RL")
# Epoch loop
start = time.time()
for epoch in range(num_epochs):
# Batch loop
for batch in train_batches:
#TODO: Use this here to create an RL inside model.update()
#samples, log_probs = model._sample(input=batch['input']) # sample actions and its neg log probs
embed()
model.update(input=batch['input'], output=batch['output'])
# Evaluation dev
is_hit = []
for batch in dev_set:
is_hit.extend(model.predict(input=batch['input']) == batch['output'])
accuracy = 100 * np.mean(is_hit)
# Inform user
print("Epoch %d: dev accuracy %2.2f %%" % (epoch + 1, accuracy))
print("Training took %2.2f seconds per epoch" % ((time.time() - start) / num_epochs))
# Evaluation test
is_hit = []
for batch in test_set:
is_hit.extend(model.predict(input=batch['input']) == batch['output'])
accuracy = 100 * np.mean(is_hit)
# Inform user
print("Test accuracy %2.2f %%" % accuracy)
| mit | Python | |
cb73106d4a47a21f82021794234672600cceb2c6 | Add fix_genre_counts | george08/netflix-o-matic | populate_database/fix_genre_counts.py | populate_database/fix_genre_counts.py | #!/usr/bin/python
# we've been outputting stuff to text so now I get to wedge it into a database
# funtimes
# set up the database with `sqlite3 netflix_genres.sqlite < create_tables.sql`
import codecs
import sqlite3
import sys
conn = sqlite3.connect('netflix.sqlite')
c = conn.cursor()
c.execute('SELECT genre_id, name, movie_count FROM genres WHERE name != "" ORDER BY name')
genres = c.fetchall()
updates = []
for genre in genres:
(genre_id, name, movie_count) = genre
genre_str = '%'+'%05i'% genre_id+'%'
c.execute("SELECT COUNT(1) FROM movies WHERE genres LIKE ?", (genre_str,))
result = c.fetchone()[0]
if result > movie_count or movie_count == '':
updates.append((result, genre_id))
# print genre_id, result, movie_count, name
print updates
print "... Inserting"
c.executemany('UPDATE genres SET movie_count = ? WHERE genre_id = ?', updates)
conn.commit()
conn.close()
| unlicense | Python | |
d74f0d174f509b0a65e5643356af8eff1f5a4ca8 | Add a snippet. | jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets | python/scipy/write_stereo_wav_file.py | python/scipy/write_stereo_wav_file.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Read the content of an audio wave file (.wav)
# See: http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.io.wavfile.write.html
import numpy as np
from scipy.io import wavfile
def sin_wave(freq1, freq2, num_frames, rate):
data_list_1 = [int(127 * (np.sin(t/float(rate) * freq1 * 2. * np.pi) + 1)) for t in range(num_frames)]
data_list_2 = [int(127 * (np.sin(t/float(rate) * freq2 * 2. * np.pi) + 1)) for t in range(num_frames)]
data_list = [data_list_1, data_list_2]
return np.array(data_list, dtype=np.int8).T
rate = 24000
num_frames = 48000
nparray = sin_wave(440, 880, num_frames, rate)
wavfile.write("./test.wav", rate, nparray)
| mit | Python | |
181832a67d3fa3a4993d495dc9db12fdae7329f7 | add context processor tests | kezabelle/clastic,kezabelle/clastic | clastic/tests/test_context_proc.py | clastic/tests/test_context_proc.py | from __future__ import unicode_literals
from nose.tools import eq_, raises
import json
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
from clastic import Application, json_response
from clastic.middleware import SimpleContextProcessor, ContextProcessor
from common import hello_world, hello_world_ctx, RequestProvidesName
def test_simple_ctx_proc():
add_name_lang = SimpleContextProcessor(name='Kurt', language='en')
app = Application([('/', hello_world_ctx, json_response)],
middlewares=[add_name_lang])
c = Client(app, BaseResponse)
resp = c.get('/')
resp_data = json.loads(resp.data)
yield eq_, resp_data['name'], 'world' # does not overwrite
yield eq_, resp_data['language'], 'en'
def test_ctx_proc_req():
req_provides_name = RequestProvidesName()
add_name_lang = ContextProcessor(['name'], {'language': 'en'})
app = Application([('/', hello_world_ctx, json_response)],
middlewares=[req_provides_name, add_name_lang])
c = Client(app, BaseResponse)
resp = c.get('/')
resp_data = json.loads(resp.data)
yield eq_, resp_data['name'], 'world' # does not overwrite
yield eq_, resp_data['language'], 'en'
resp = c.get('/?name=Alex')
resp_data = json.loads(resp.data)
yield eq_, resp_data['name'], 'Alex' # still does not overwrite
def test_ctx_proc_overwrite():
add_name = ContextProcessor(defaults={'name': 'Kurt'}, overwrite=True)
app = Application([('/', hello_world_ctx, json_response)],
middlewares=[add_name])
c = Client(app, BaseResponse)
resp = c.get('/')
resp_data = json.loads(resp.data)
yield eq_, resp_data['name'], 'Kurt' # does overwrite
def test_ctx_proc_empty():
add_name = ContextProcessor()
app = Application([('/', hello_world_ctx, json_response)],
middlewares=[add_name])
c = Client(app, BaseResponse)
resp = c.get('/')
resp_data = json.loads(resp.data)
yield eq_, resp_data['name'], 'world' # does overwrite
def test_ctx_proc_nonctx():
add_name = ContextProcessor(defaults={'name': 'Kurt'})
app = Application([('/', hello_world)],
middlewares=[add_name])
c = Client(app, BaseResponse)
resp = c.get('/')
yield eq_, resp.data, 'Hello, world!'
@raises(NameError)
def test_ctx_proc_overlap():
ContextProcessor(required=['name'],
defaults={'name': 'Alex'})
@raises(NameError)
def test_ctx_proc_reserved():
ContextProcessor(required=['next'])
@raises(TypeError)
def test_ctx_proc_req_type():
ContextProcessor(required=[6])
@raises(TypeError)
def test_ctx_proc_default_type():
ContextProcessor(default={6: ''})
@raises(TypeError)
def test_ctx_proc_def_nonmap():
ContextProcessor(defaults=['hi', 'hello'])
| bsd-3-clause | Python | |
f9bdf777a13404ba25e0e8cdf99a3554320529c9 | Add warnings to inspector DOM count unittest baselines. | jaruba/chromium.src,Chilledheart/chromium,dednal/chromium.src,dednal/chromium.src,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,jaruba/chromium.src,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Fireblend/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,ltilve/chromium,markYoungH/chromium.src,chuan9/chromium-crosswalk,dushu1203/chromium.src,Just-D/chromium-1,hgl888/chromium-crosswalk,dushu1203/chromium.src,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,littlstar/chromium.src,ltilve/chromium,Jonekee/chromium.src,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,dushu1203/chromium.src,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk-efl,ltilve/chromium,chuan9/chromium-crosswalk,dushu1203/chromium.src,Chilledheart/chromium,littlstar/chromium.src,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,M4sse/chromium.src,Pluto-tv/chromium-crosswalk,ondra-novak/chromium.src,crosswalk-project/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,ondra-novak/chromium.src,littlstar/chromium.src,M4sse/chromium.src,Fireblend/chromium-crosswalk,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,dednal/chromium.src,fujunwei/chromium-crosswalk,Just-D/chromium-1,bright-sparks/chromium-spacewalk,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,ltilve/chromium,Jonekee/chromium.src,Fireblend/chromium-crosswalk,M4sse/chromium.src,Jonekee/chromium.src,dushu1203/chromium.src,hgl888/chromium-crosswalk,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,markYoungH/chromium.src,jaruba/chromium.src,markYoungH/chromium.src,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,littlstar/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,crosswalk-project/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,fujunwei/chromium-crosswalk,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,bright-sparks/chromium-spacewalk,Fireblend/chromium-crosswalk,fujunwei/chromium-crosswalk,Just-D/chromium-1,fujunwei/chromium-crosswalk,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,ltilve/chromium,littlstar/chromium.src,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,Chilledheart/chromium,bright-sparks/chromium-spacewalk,M4sse/chromium.src,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,ondra-novak/chromium.src,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,ondra-novak/chromium.src,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,jaruba/chromium.src,jaruba/chromium.src,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,M4sse/chromium.src,Jonekee/chromium.src,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,jaruba/chromium.src,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,bright-sparks/chromium-spacewalk,ltilve/chromium,ltilve/chromium,jaruba/chromium.src,dushu1203/chromium.src,markYoungH/chromium.src,fujunwei/chromium-crosswalk,Jonekee/chromium.src,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,dednal/chromium.src,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,ondra-novak/chromium.src,fujunwei/chromium-crosswalk,dushu1203/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,dednal/chromium.src,markYoungH/chromium.src,Just-D/chromium-1,dednal/chromium.src,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,Jonekee/chromium.src,markYoungH/chromium.src,ltilve/chromium,Fireblend/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,jaruba/chromium.src,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,chuan9/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,dednal/chromium.src,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,ondra-novak/chromium.src,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,Fireblend/chromium-crosswalk,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,krieger-od/nwjs_chromium.src,dednal/chromium.src,ltilve/chromium,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,littlstar/chromium.src,dednal/chromium.src,Just-D/chromium-1,Jonekee/chromium.src | tools/telemetry/telemetry/core/backends/chrome/inspector_memory_unittest.py | tools/telemetry/telemetry/core/backends/chrome/inspector_memory_unittest.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import benchmark
from telemetry.unittest import tab_test_case
class InspectorMemoryTest(tab_test_case.TabTestCase):
@benchmark.Enabled('has tabs')
def testGetDOMStats(self):
# Due to an issue with CrOS, we create a new tab here rather than
# using the existing tab to get a consistent starting page on all platforms.
self._tab = self._browser.tabs.New()
self.Navigate('dom_counter_sample.html')
# Document_count > 1 indicates that WebCore::Document loaded in Chrome
# is leaking! The baseline should exactly match the numbers on:
# unittest_data/dom_counter_sample.html
# Please contact kouhei@, hajimehoshi@ when rebaselining.
counts = self._tab.dom_stats
self.assertEqual(counts['document_count'], 1,
'Document leak is detected! '+
'The previous document is likely retained unexpectedly.')
self.assertEqual(counts['node_count'], 14,
'Node leak is detected!')
self.assertEqual(counts['event_listener_count'], 2,
'EventListener leak is detected!')
| # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import benchmark
from telemetry.unittest import tab_test_case
class InspectorMemoryTest(tab_test_case.TabTestCase):
@benchmark.Enabled('has tabs')
def testGetDOMStats(self):
# Due to an issue with CrOS, we create a new tab here rather than
# using the existing tab to get a consistent starting page on all platforms.
self._tab = self._browser.tabs.New()
self.Navigate('dom_counter_sample.html')
counts = self._tab.dom_stats
self.assertEqual(counts['document_count'], 1)
self.assertEqual(counts['node_count'], 14)
self.assertEqual(counts['event_listener_count'], 2)
| bsd-3-clause | Python |
3119222d27bd63b9f4e9a57ff8e9d88e53d9735a | Modify island.py | AiryShift/island | island.py | island.py | from noise import generate_noise
from PIL import Image
import numpy as np
WIDTH = 128
HEIGHT = 128
if __name__ == '__main__':
data = np.array(generate_noise(WIDTH, HEIGHT, triple=True), dtype=np.uint8)
img = Image.fromarray(data, 'RGB')
img.save('out.png')
| mit | Python | |
2d55503216d7020a71017fbcb2c1b48661c345cb | Add manage | gopla29/djangogirls,gopla29/djangogirls | manage.py | manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| mit | Python | |
77738a8b7e895b5f71418d5417db04f34b08f918 | add manage.py | marteki/retirement,marteki/retirement,OrlandoSoto/retirement,OrlandoSoto/retirement,mistergone/retirement,OrlandoSoto/retirement,marteki/retirement,niqjohnson/retirement,marteki/retirement,niqjohnson/retirement,mistergone/retirement,mistergone/retirement,niqjohnson/retirement | manage.py | manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| cc0-1.0 | Python | |
10ef7955b21e3f9d3f3ac9eb43995e7cf0e91201 | Add meta/import_all.py for testing | aevri/mel,aevri/mel | meta/import_all.py | meta/import_all.py | #! /usr/bin/env python
# encoding: utf-8
"""Imports all the modules under the specified path.
This can be useful as a basic static analysis test, assuming that the imports
do not have side-effects.
"""
from __future__ import print_function
import argparse
import importlib
import os
import sys
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"PATH",
help="path to the package to import from")
parser.add_argument(
"--verbose",
"-v",
action="store_true")
args = parser.parse_args()
parent_dir, package = os.path.split(args.PATH)
# Python looks in sys.path to find modules to import, if we don't do this
# then it probably won't find any of the modules under parent_dir.
sys.path.append(os.path.abspath(parent_dir))
os.chdir(parent_dir)
for root, _, files in os.walk(package):
for f in files:
if not f.endswith('.py'):
continue
module_name = f[:-3]
module_list = root.split('/')
if not module_name == '__init__':
module_list.append(module_name)
module_ref = '.'.join(module_list)
if args.verbose:
print(module_ref)
importlib.import_module(module_ref)
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | Python | |
efd125ef973a680b6413e820e1308070a79554b4 | Encrypt with vigenere cipher | vtemian/university_projects,vtemian/university_projects,vtemian/university_projects | practic_stage/hmw8/main.py | practic_stage/hmw8/main.py | import string
letters = string.ascii_uppercase
vigenere_table = {letter: {letters[j]: letters[(i + j) % 26]
for j, l in enumerate(letters)}
for i, letter in enumerate(letters)}
def encrypt(text, key):
encrypted = []
for index, letter in enumerate(text):
encrypted.append(vigenere_table[letter][key[index]])
return ''.join(encrypted)
TEXT = "ATTACKATDAWN"
KEY = "LEMONLEMONLE"
print encrypt(TEXT, KEY)
| apache-2.0 | Python | |
5f63a5ebfe3210fe68df036eef27a51bf431f6a3 | Initialize transpositionFileCipher | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/CrackingCodesWithPython/Chapter10/transpositionFileCipher.py | books/CrackingCodesWithPython/Chapter10/transpositionFileCipher.py | # Transposition Cipher Encrypt/Decrypt File
# https://www.nostarch.com/crackingcodes/ (BSD Licensed)
import time, os, sys, transpositionEncrypt, transpositionDecrypt
def main():
inputFilename = 'frankenstein.txt'
# BE CAREFUL! If a file with the outputFilename name already exists,
# this program will overwrite that file:
outputFilename = 'frankenstein.encrypted.txt'
myKey = 10
myMode = 'encrypt' # Set to 'encrypt' or 'decrypt'
# If the input file does not exist, the program terminates early:
if not os.path.exists(inputFilename):
print('The file %s does not exist. Quitting...' % (inputFilename))
sys.exit()
# If the output file already exists, give the user a chance to quit:
if os.path.exists(outputFilename):
print('This will overwrite the file %s. (C)ontinue or (Q)uit?' % (outputFilename))
response = input('> ')
if not response.lower().startswith('c'):
sys.exit()
# Read in the message from the input file:
fileObj = open(inputFilename)
content = fileObj.read()
fileObj.close()
print('%sing...' % (myMode.title()))
# Measure how long the encryption/decryption takes:
startTime = time.time()
if myMode == 'encrypt':
translated = transpositionEncrypt.encryptMessage(myKey, content)
elif myMode == 'decrypt':
translated = transpositionDecrypt.decryptMessage(myKey, content)
totalTime = round(time.time() - startTime, 2)
print('%sion time: %s seconds' % (myMode.title(), totalTime))
# Write out the translated message to the output file:
outputFileObj = open(outputFilename, 'w')
outputFileObj.write(translated)
outputFileObj.close()
print('Done %sing %s (%s characters).' % (myMode, inputFilename, len(content)))
print('%sed file is %s.' % (myMode.title(), outputFilename))
# If transpositionCipherFile.py is run (instead of imported as a module),
# call the main() function:
if __name__ == '__main__':
main()
| mit | Python | |
fc8a37b63ddc2455afbeeae0a6c2ac911c113337 | add new | momotarou-zamurai/kibidango | maya/python/animation/grapheditor/fit_key_tangent.py | maya/python/animation/grapheditor/fit_key_tangent.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author : Masahiro Ohmomo
# DCC : Maya
# Version : 2013 - Latest
# Recommend: 2013
#
# Description.
# In this script, do the fitting.
# The target is keyframe's tangent.
# You should be selected keyframe's of least two index.
#
# Run command.
# import fit_key_tangent
# fit_key_tangent.main()
#
from maya import cmds, mel
import math
def rad_deg(value=0.0, rd=False):
if rd:
return 180.0*value/3.141592
else:
return 3.141592*value/180.0
def main():
names = cmds.keyframe(q=True, n=True)
for n in names:
frames = cmds.keyframe(n,q=True,sl=True)
values = cmds.keyframe(n,q=True,vc=True,sl=True)
countup = 0
for i in range(len(values)):
isLast = False
if len(values)-1 == countup:
x1,y1,x2,y2 = frames[i],values[i],frames[i-1],values[i-1]
isLast=True
else:
x1,y1,x2,y2 = frames[i],values[i],frames[i+1],values[i+1]
c_tan = rad_deg(math.atan((y2-y1)/(x2-x1)),True)
if not isLast:
cmds.keyTangent(n,e=True,a=True,t=(frames[i],frames[i]),oa=c_tan)
else:
cmds.keyTangent(n,e=True,a=True,t=(frames[i],frames[i]),ia=c_tan,oa=c_tan)
countup += 1 | mit | Python | |
73fce6afc07496dcc79c2e2763523207c257185b | Update the docstring | NickShaffner/rhea,cfelton/rhea,NickShaffner/rhea,cfelton/rhea | rhea/vendor/device_clock_mgmt_prim.py | rhea/vendor/device_clock_mgmt_prim.py |
from __future__ import absolute_import
import myhdl
from myhdl import instance, delay, always_comb
from rhea.system import timespec
@myhdl.block
def _clock_generate(clock, enable, ticks):
assert len(ticks) == 2
totticks = sum(ticks)
@instance
def mdlclk():
clock.next = False
while True:
if enable:
yield delay(ticks[0])
clock.next = True
yield delay(ticks[1])
clock.next = False
else:
yield delay(totticks)
return mdlclk
@myhdl.block
def device_clock_mgmt_prim(clkmgmt):
""" This is the generic device PLL model
The vendor specific implementations will set the v*_code attribute
for this function to the specific template needed to instantiate
the device primitive in the generated intermediate V*. This
block also creates the clocks for MyHDL simulation when the device
primitives are not available.
not convetible, simulation only.
"""
pif = clkmgmt
(clockin, reset, enable,
clocksout, locked,) = (pif.clockin, pif.reset, pif.enable,
pif.clocksout, pif.locked,)
clocksout.driven = True
locked.driven = True
# for simulation and modeling create the clocks defined
# by the `pll_intf`. For the implementation use verilog_code
clk_inst = []
for ii, clk in enumerate(clkmgmt.clocks):
totalticks = 1/(clk.frequency*timespec)
t1 = int(totalticks // 2)
# @todo: add detailed warnings about qunatization and timespec
# @todo: resolutions (i.e. funny clocks)
ticks = (t1, int(totalticks-t1))
clk_inst.append(_clock_generate(clk, enable, ticks))
print(" clock {} @ {:8.3f} MHz {}".format(
ii, clk.frequency/1e6, ticks))
@always_comb
def clk_assign():
clkmgmt.clockin_out.next = clockin
for ii, clk in enumerate(clkmgmt.clocks):
clocksout.next[ii] = clk
return clk_inst, clk_assign
|
from __future__ import absolute_import
import myhdl
from myhdl import instance, delay, always_comb
from rhea.system import timespec
@myhdl.block
def _clock_generate(clock, enable, ticks):
assert len(ticks) == 2
totticks = sum(ticks)
@instance
def mdlclk():
clock.next = False
while True:
if enable:
yield delay(ticks[0])
clock.next = True
yield delay(ticks[1])
clock.next = False
else:
yield delay(totticks)
return mdlclk
@myhdl.block
def device_clock_mgmt_prim(clkmgmt):
""" This is the generic device PLL module
The vendor specific implementations will set the v*_code attribute
for this function to the specific template needed to instantiate
the device primitive in the generated intermediate V*. This
module also creates the clocks for MyHDL simulation when the device
primitives are not available
"""
pif = clkmgmt
(clockin, reset, enable,
clocksout, locked,) = (pif.clockin, pif.reset, pif.enable,
pif.clocksout, pif.locked,)
clocksout.driven = True
locked.driven = True
# for simulation and modeling create the clocks defined
# by the `pll_intf`. For the implementation use verilog_code
clk_inst = []
for ii, clk in enumerate(clkmgmt.clocks):
totalticks = 1/(clk.frequency*timespec)
t1 = int(totalticks // 2)
# @todo: add detailed warnings about qunatization and timespec
# @todo: resolutions (i.e. funny clocks)
ticks = (t1, int(totalticks-t1))
clk_inst.append(_clock_generate(clk, enable, ticks))
print(" clock {} @ {:8.3f} MHz {}".format(
ii, clk.frequency/1e6, ticks))
@always_comb
def clk_assign():
clkmgmt.clockin_out.next = clockin
for ii, clk in enumerate(clkmgmt.clocks):
clocksout.next[ii] = clk
return clk_inst, clk_assign
| mit | Python |
ce1a4f7f55e03429dd0baf219fda71debc7e2ba2 | add test to backup degraded | evernym/zeno,evernym/plenum | plenum/test/replica/test_replica_removing_with_backup_degraded.py | plenum/test/replica/test_replica_removing_with_backup_degraded.py | import pytest
from plenum.test.replica.helper import check_replica_removed
from stp_core.loop.eventually import eventually
from plenum.test.helper import waitForViewChange
from plenum.test.test_node import ensureElectionsDone
def test_replica_removing_with_backup_degraded(looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client,
tconf,
tdir,
allPluginsPath):
"""
1. Start backup degraded.
2. Check that degraded replicas
3. Start View Change.
4. Check that all replicas were restored.
"""
instance_to_remove = 1
start_replicas_count = txnPoolNodeSet[0].replicas.num_replicas
for node in txnPoolNodeSet:
node.view_changer.on_backup_degradation([instance_to_remove])
# check that replicas were removed
def check_replica_removed_on_all_nodes():
for node in txnPoolNodeSet:
check_replica_removed(node,
start_replicas_count,
instance_to_remove)
looper.run(eventually(check_replica_removed_on_all_nodes,
timeout=tconf.TolerateBackupPrimaryDisconnection * 4))
for node in txnPoolNodeSet:
assert not node.monitor.isMasterDegraded()
assert len(node.requests) == 0
# start View Change
for node in txnPoolNodeSet:
node.view_changer.on_master_degradation()
waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=1,
customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT)
ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)
# check that all replicas were restored
assert all(start_replicas_count == node.replicas.num_replicas
for node in txnPoolNodeSet)
| apache-2.0 | Python | |
46be255fd0cfaeb2352f2f49b4ec5996a804768d | Add unit test for base Handler. | 4degrees/sawmill,4degrees/mill | test/unit/handler/test_base.py | test/unit/handler/test_base.py | # :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
from mock import Mock
from bark.log import Log
from bark.handler.base import Handler
from bark.formatter.base import Formatter
class Concrete(Handler):
'''Concrete subclass of abstract base for testing.'''
def __init__(self, *args, **kw):
'''Initialise handler.'''
super(Concrete, self).__init__(*args, **kw)
self.data = []
def output(self, data):
'''Output formatted *data*.'''
self.data.append(data)
class Field(Formatter):
'''Format log into string of fields.'''
def format(self, log):
'''Return string of log fields.'''
data = []
for key, value in sorted(log.items()):
data.append('{0}={1}'.format(key, value))
return ':'.join(data)
def test_handle():
'''Test handle method.'''
handler = Concrete()
log = Log(message='A message')
handler.handle(log)
assert handler.data == [log]
def test_filterer():
'''Test filterer prevents output of log.'''
deny_all = Mock()
deny_all.filter = Mock(return_value=True)
handler = Concrete(filterer=deny_all)
log = Log(message='A message')
handler.handle(log)
assert handler.data == []
def test_formatter():
'''Test formatting of data before output.'''
handler = Concrete(formatter=Field())
log = Log(message='A message')
handler.handle(log)
assert handler.data == ['message=A message']
| apache-2.0 | Python | |
90e96e741bce834e3862a6ed84b22c6d45f64d3f | solve 11997 | arash16/prays,arash16/prays,arash16/prays,arash16/prays,arash16/prays,arash16/prays | UVA/vol-119/11997.py | UVA/vol-119/11997.py | from heapq import heapify, heappush, heappop
from sys import stdin, stdout
I = list(map(int, stdin.read().split()))
ii = 0
while ii < len(I):
N = I[ii]
sums = I[ii+1: ii+1 + N]
sums.sort()
for k in range(1, N):
X = I[ii+1 + k*N: ii+1 + k*N + N]
X.sort()
q = list(-(s + X[0]) for s in sums)
heapify(q)
for s in sums:
for j in range(1, N):
if s + X[j] < -q[0]:
heappop(q)
heappush(q, -(s + X[j]))
else: break
for i in range(0, N):
sums[N-i-1] = -heappop(q)
stdout.write(' '.join(map(str, sums)) + '\n')
ii += N*N + 1
| mit | Python | |
40a83c5fc16facc0fa7e64752dd348c255f07754 | add C/C++ building tools named `Surtr` | ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study | cplusplus/chaos/tools/surtr/Surtr.py | cplusplus/chaos/tools/surtr/Surtr.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2016 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import argparse
def get_options():
parser = argparse.ArgumentParser(description='Surtr C/C++ building tool')
parser.add_argument('option', nargs='?', help='config|build|rebuild|clean|remove the project')
args = parser.parse_args()
return args.option
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python | |
ea9473959c1d6c505b8c25405c8c2674945d84e9 | add uts for backup_processor | mvendra/mvtools,mvendra/mvtools,mvendra/mvtools | tests/backup_processor_test.py | tests/backup_processor_test.py | #!/usr/bin/env python3
import sys
import os
import shutil
import unittest
import create_and_write_file
import mvtools_test_fixture
import backup_processor
class BackupProcessorTest(unittest.TestCase):
def setUp(self):
v, r = self.delegate_setUp()
if not v:
self.tearDown()
self.fail(r)
def delegate_setUp(self):
v, r = mvtools_test_fixture.makeAndGetTestFolder("backup_processor_test")
if not v:
return v, r
self.test_base_dir = r[0]
self.test_dir = r[1]
# nonexistent folder - for testing only
self.nonexistent = os.path.join(self.test_dir, "nonexistent")
# create folders, source and target
self.test_source_folder = os.path.join(self.test_dir, "source_test")
os.mkdir(self.test_source_folder)
self.test_target_folder = os.path.join(self.test_dir, "target_test")
os.mkdir(self.test_target_folder)
# create test folders
self.folder1 = os.path.join(self.test_source_folder, "folder1")
self.folder2 = os.path.join(self.test_source_folder, "folder2")
self.folder3 = os.path.join(self.test_source_folder, "folder3")
os.mkdir(self.folder1)
os.mkdir(self.folder2)
os.mkdir(self.folder3)
# create subfolders
self.folder1_subfolder1 = os.path.join(self.folder1, "subfolder1")
os.mkdir(self.folder1_subfolder1)
self.folder1_subfolder2 = os.path.join(self.folder1, "subfolder2")
os.mkdir(self.folder1_subfolder2)
# create files, folder1
self.folder1_file1 = os.path.join(self.folder1, "file1.txt")
create_and_write_file.create_file_contents(self.folder1_file1, "abc")
self.folder1_subfolder1_file2 = os.path.join(self.folder1_subfolder1, "file2.txt")
create_and_write_file.create_file_contents(self.folder1_subfolder1_file2, "abc")
self.folder1_subfolder2_file3 = os.path.join(self.folder1_subfolder2, "file3.txt")
create_and_write_file.create_file_contents(self.folder1_subfolder2_file3, "abc")
# create files, folder2
self.folder2_file1 = os.path.join(self.folder2, "file1.txt")
create_and_write_file.create_file_contents(self.folder2_file1, "abc")
# create files, folder3
self.folder3_file1 = os.path.join(self.folder3, "file1.txt")
create_and_write_file.create_file_contents(self.folder3_file1, "abc")
# create config file
cfg_file_contents = ""
#cfg_file_contents += "BKPREPARATION = ...\n"
cfg_file_contents += "BKARTIFACTS_BASE = %s\n" % self.test_source_folder
cfg_file_contents += "BKTARGETS_ROOT = %s - nocheckmount\n" % self.test_target_folder
cfg_file_contents += "BKTEMP = %s\n" % self.test_dir
cfg_file_contents += "BKTARGETS_BASEDIR = BackupTests\n"
self.test_config_file = os.path.join(self.test_dir, "test_config_file.cfg")
create_and_write_file.create_file_contents(self.test_config_file, cfg_file_contents)
return True, ""
def tearDown(self):
shutil.rmtree(self.test_base_dir)
def testArtifactBase1(self):
artbase = backup_processor.ArtifactBase(self.test_source_folder, [self.folder2])
self.assertTrue(artbase.get_path(), self.test_source_folder)
self.assertTrue(artbase.get_list_exceptions(), [self.folder2])
self.assertTrue(artbase.validate_exceptions(), (True, ""))
def testArtifactBase2(self):
artbase = backup_processor.ArtifactBase(self.test_source_folder, [self.nonexistent])
self.assertTrue(artbase.get_path(), self.test_source_folder)
self.assertTrue(artbase.get_list_exceptions(), [self.nonexistent])
r = artbase.validate_exceptions()
self.assertFalse(r[0])
def testMakeBackupArtifactsList(self):
artbase = backup_processor.ArtifactBase(self.test_source_folder, [self.folder2])
res = backup_processor.make_backup_artifacts_list([artbase])
self.assertTrue(self.folder1 in res)
self.assertFalse(self.folder2 in res)
self.assertTrue(self.folder3 in res)
def testReadConfig(self):
v, r = backup_processor.read_config(self.test_config_file)
self.assertTrue(v)
self.assertEqual(r[0], "")
self.assertEqual(r[2], [self.test_target_folder])
self.assertEqual(r[3], "BackupTests")
self.assertEqual(r[4], self.test_dir)
if __name__ == '__main__':
unittest.main()
| mit | Python | |
88c5a9e79a986e828a1da7a09b7cdaf3fddd68a4 | Add Elasticsearch Service Domain | remind101/stacker_blueprints,remind101/stacker_blueprints | stacker_blueprints/elasticsearch.py | stacker_blueprints/elasticsearch.py | """AWS Elasticsearch Service.
Blueprint to configure AWS Elasticsearch service.
Example::
- name: elasticsearch
class_path: stacker_blueprints.elasticsearch.Domain
variables:
Roles:
- ${empireMinion::IAMRole}
InternalZoneId: ${vpc::InternalZoneId}
InternalZoneName: ${vpc::InternalZoneName}
InternalHostName: es
"""
import awacs.es
from awacs.aws import (
Allow,
Policy,
Statement,
)
from stacker.blueprints.base import Blueprint
from troposphere import (
elasticsearch,
iam,
route53,
GetAtt,
Join,
Output,
Ref,
)
ES_DOMAIN = "ESDomain"
DNS_RECORD = "ESDomainDNSRecord"
POLICY_NAME = "ESDomainAccessPolicy"
class Domain(Base):
VARIABLES = {
"Roles": {
"type": list,
"description": (
"List of roles that should have access to the ES domain.")},
"InternalZoneId": {
"type": str,
"default": None,
"description": "Internal zone id, if you have one."},
"InternalZoneName": {
"type": str,
"default": None,
"description": "Internal zone name, if you have one."},
"InternalHostName": {
"type": str,
"default": None,
"description": "Internal domain name, if you have one."},
}
def create_dns_record(self):
t = self.template
variables = self.get_variables()
should_create_dns = all([
variables["InternalZoneId"],
variables["InternalZoneName"],
variables["InternalHostName"],
])
if should_create_dns:
t.add_resource(
route53.RecordSetType(
DNS_RECORD,
HostedZoneId=variables["InternalZoneId"],
Comment="ES Domain CNAME Record",
Name="{}.{}".format(variables["InternalHostName"],
variables["InternalZoneName"]),
Type="CNAME",
TTL="120",
ResourceRecords=[GetAtt(ES_DOMAIN, "DomainEndpoint")],
))
t.add_output(Output("CNAME", Value=Ref(DNS_RECORD)))
def create_domain(self):
t = self.template
t.add_resource(elasticsearch.ElasticsearchDomain(ES_DOMAIN))
t.add_output(Output("DomainArn", Value=GetAtt(ES_DOMAIN, "DomainArn")))
t.add_output(Output("DomainEndpoint", Value=GetAtt(ES_DOMAIN,
"DomainEndpoint")))
def create_policy(self):
variables = self.get-variables()
statements = [
Statement(
Effect=Allow,
Action=[
awacs.es.Action("HttpGet"),
awacs.es.Action("HttpHead"),
awacs.es.Action("HttpPost"),
awacs.es.Action("HttpDelete")],
Resource=[Join("/", [GetAtt(ES_DOMAIN, "DomainArn"), "*"])])]
t.add_resource(
iam.PolicyType(
POLICY_NAME,
PolicyName=POLICY_NAME,
PolicyDocument=Policy(Statement=statements),
Roles=variables["Roles"]))
def create_template(self):
self.create_domain()
self.create_dns_record()
self.create_policy()
| bsd-2-clause | Python | |
ed57bed46a54bfd531e32a3c69a1f5e465f80662 | add tests for parse_args | uber/tchannel-python,uber/tchannel-python,Willyham/tchannel-python,Willyham/tchannel-python | tests/test_tcurl.py | tests/test_tcurl.py | from __future__ import absolute_import
import pytest
from tchannel.tcurl import parse_args
@pytest.mark.parametrize('input,expected', [
( # basic case
'--host foo --profile',
[['foo/'], [None], [None], True]
),
( # multiple bodies, constant host/headers
'--host foo -d 1 2',
[['foo/', 'foo/'], ['1', '2'], [None, None], False]
),
( # repeated host and body
'--host foo bar -d 1 2',
[['foo/', 'bar/'], ['1', '2'], [None, None], False]
),
( # repeated host and body
'--host foo -d 1 --headers a b',
[['foo/', 'foo/'], ['1', '1'], ['a', 'b'], False]
),
])
def test_parse_args(input, expected):
args = parse_args(input.split())
assert list(args.host) == expected[0]
assert list(args.body) == expected[1]
assert list(args.headers) == expected[2]
assert args.profile == expected[3]
| mit | Python | |
0a2c658d4d44a5c813b40d5040e101688eeac118 | Update os.py | Tendrl/node_agent,r0h4n/node-agent,Tendrl/node_agent,Tendrl/node-agent,r0h4n/node-agent,Tendrl/node-agent,r0h4n/node-agent,Tendrl/node-agent | tendrl/node_agent/persistence/os.py | tendrl/node_agent/persistence/os.py | from tendrl.common.etcdobj.etcdobj import EtcdObj
from tendrl.common.etcdobj import fields
class Os(EtcdObj):
"""A table of the Os, lazily updated
"""
__name__ = 'nodes/%s/Os/'
node_id = fields.StrField("node_id")
os = fields.StrField("os")
os_version = fields.StrField("os_version")
kernel_version = fields.StrField("kernel_version")
selinux_mode = fields.StrField("selinux_mode")
def render(self):
self.__name__ = self.__name__ % self.node_id
return super(Os, self).render()
| from tendrl.common.etcdobj.etcdobj import EtcdObj
from tendrl.common.etcdobj import fields
class Os(EtcdObj):
"""A table of the Os, lazily updated
"""
__name__ = 'nodes/%s/Os/'
node_uuid = fields.StrField("node_id")
os = fields.StrField("os")
os_version = fields.StrField("os_version")
kernel_version = fields.StrField("kernel_version")
selinux_mode = fields.StrField("selinux_mode")
def render(self):
self.__name__ = self.__name__ % self.node_id
return super(Os, self).render()
| lgpl-2.1 | Python |
dcc5065c7cc4cc167affcbf906eaf81e73fa6d3e | Add py solution for 645. Set Mismatch | ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode | py/set-mismatch.py | py/set-mismatch.py | class Solution(object):
def findErrorNums(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
for i, n in enumerate(nums, 1):
while i != n and nums[n - 1] != n:
nums[i - 1], nums[n - 1] = nums[n - 1], nums[i - 1]
n = nums[i - 1]
for i, n in enumerate(nums, 1):
if i != n:
return [n, i]
| apache-2.0 | Python | |
236a25a159ea523c0b7d3eb009f6bf7df523d37f | Add py file used to build win64 binaries | notcammy/PyInstaLive | pyinstalive_win.py | pyinstalive_win.py | from pyinstalive.__main__ import main
if __name__ == '__main__':
main() | mit | Python | |
c806eb658e9a7088662fe7d520e3c59be6883099 | Create pyspark_starter | searchs/bigdatabox,searchs/bigdatabox | pyspark_starter.py | pyspark_starter.py | from pyspark import SparkConf, SparkContext
conf = SparkConf().setMaster("local[2]").setAppName("RDD Example")
sc = SparkContext(conf=conf)
# different way of setting configurations
#conf.setMaster('some url')
#conf.set('spark.executor.memory', '2g')
#conf.set('spark.executor.cores', '4')
#conf.set('spark.cores.max', '40')
#conf.set('spark.logConf', True)
# sparkContext.parallelize materializes data into RDD
# documentation: https://spark.apache.org/docs/2.1.1/programming-guide.html#parallelized-collections
rdd = sc.parallelize([('Richard', 22), ('Alfred', 23), ('Loki',4), ('Albert', 12), ('Alfred', 9)])
rdd.collect() # [('Richard', 22), ('Alfred', 23), ('Loki', 4), ('Albert', 12), ('Alfred', 9)]
# create two different RDDs
left = sc.parallelize([("Richard", 1), ("Alfred", 4)])
right = sc.parallelize([("Richard", 2), ("Alfred", 5)])
joined_rdd = left.join(right)
collected = joined_rdd.collect()
collected #[('Alfred', (4, 5)), ('Richard', (1, 2))]
| mit | Python | |
53cbc714d9e7d498443356c370e5e77d24118764 | add clear names_txt script | haoNoQ/wztools2100,haoNoQ/wztools2100,haoNoQ/wztools2100 | tmp_tools/cleanup_names.txt.py | tmp_tools/cleanup_names.txt.py | #!/usr/bin/python3
"""
Remove entries from names.txt that already present in ini files.
"""
import os
import re
from enviroment import BASE_PATH, MP_PATH
from ini_file import IniFile
name_reg = re.compile('^([\w-]+)[ \t]+(.*)')
def clean(module_path, module_name):
print("Cleaning %s(%s)" % (module_name, module_path))
ini_names = {}
names_path = os.path.join(module_path, 'messages', 'strings', 'names.txt')
stats_dir =os.path.join(module_path, 'stats')
files = [path for path in os.listdir(stats_dir) if path.endswith('.ini')]
for path in files:
ini_file = IniFile(os.path.join(stats_dir, path))
for k, v in ini_file.items():
if 'name' in v:
ini_names[k] = v['name']
new_names = []
with open(names_path) as f:
for line in f:
line = line.strip('\n\r')
match = name_reg.match(line)
if match:
key = match.group(1)
value = match.group(2)
if key in ini_names:
if value.strip('"_() ') != ini_names[key].strip('"_() '):
print(key, value, ini_names[key])
else:
new_names.append(line)
else:
new_names.append(line)
with open('%s_names.txt' % module_name, 'w') as f:
f.write('\n'.join(new_names))
if __name__ == '__main__':
clean(BASE_PATH, 'base')
clean(MP_PATH, 'mp')
| cc0-1.0 | Python | |
ea51e276d17169c0ec62d694b513cea4fea167a4 | Add file for dealing with search queries | amrishparmar/mal_cl_interface | search.py | search.py | import click
import requests
def anime_search():
pass
def manga_search():
pass
| mit | Python | |
8c25fb10724ad4824ee9d94c270d95f8d4bae691 | Add 3D hybridization demo with file-write | thomasgibson/tabula-rasa | experiments/hybridization_3D_extr.py | experiments/hybridization_3D_extr.py | from __future__ import absolute_import, print_function, division
from firedrake import *
def test_slate_hybridization_extr(degree, resolution, layers):
base = UnitSquareMesh(2 ** resolution, 2 ** resolution,
quadrilateral=False)
mesh = ExtrudedMesh(base, layers=layers, layer_height=0.25)
RT_elt = FiniteElement("RT", triangle, degree + 1)
DG = FiniteElement("DG", interval, degree)
DGh = FiniteElement("DG", triangle, degree)
CG = FiniteElement("CG", interval, degree + 1)
elem = EnrichedElement(HDiv(TensorProductElement(RT_elt, DG)),
HDiv(TensorProductElement(DGh, CG)))
product_elt = BrokenElement(elem)
V = FunctionSpace(mesh, product_elt)
U = FunctionSpace(mesh, "DG", degree)
T = FunctionSpace(mesh, "HDiv Trace", (degree, degree))
W = V * U
n = FacetNormal(mesh)
x, y, z = SpatialCoordinate(mesh)
f = Function(U)
f.interpolate((1+12*pi*pi)*cos(2*pi*x)*cos(2*pi*y)*cos(2*pi*z))
sigma, u = TrialFunctions(W)
tau, v = TestFunctions(W)
gammar = TestFunction(T)
mass_v = dot(sigma, tau) * dx
mass_p = u * v * dx
divgrad = div(sigma) * v * dx
divgrad_adj = div(tau) * u * dx
local_trace = (gammar('+') * dot(sigma, n) * dS_h +
gammar('+') * dot(sigma, n) * dS_v)
L = f*v*dx
bcs = [DirichletBC(T, Constant(0.0), "on_boundary"),
DirichletBC(T, Constant(0.0), "top"),
DirichletBC(T, Constant(0.0), "bottom")]
A = Tensor(mass_v + mass_p + divgrad - divgrad_adj)
K = Tensor(local_trace)
Schur = -K * A.inv * K.T
F = Tensor(L)
RHS = - K * A.inv * F
S = assemble(Schur, bcs=bcs)
E = assemble(RHS)
lambda_sol = Function(T)
solve(S, lambda_sol, E, solver_parameters={'pc_type': 'lu',
'ksp_type': 'cg'})
sigma = TrialFunction(V)
tau = TestFunction(V)
u = TrialFunction(U)
v = TestFunction(U)
A_v = Tensor(dot(sigma, tau) * dx)
A_p = Tensor(u * v * dx)
B = Tensor(div(sigma) * v * dx)
K = Tensor(dot(sigma, n) * gammar('+') * dS_h +
dot(sigma, n) * gammar('+') * dS_v)
F = Tensor(f * v * dx)
# SLATE expression for pressure recovery:
u_sol = (B * A_v.inv * B.T + A_p).inv * (F + B * A_v.inv * K.T * lambda_sol)
u_h = assemble(u_sol)
# SLATE expression for velocity recovery
sigma_sol = A_v.inv * (B.T * u_h - K.T * lambda_sol)
sigma_h = assemble(sigma_sol)
new_sigma_h = project(sigma_h, FunctionSpace(mesh, elem))
File("3D-hybrid.pvd").write(new_sigma_h, u_h)
test_slate_hybridization_extr(degree=0, resolution=8, layers=1)
| mit | Python | |
f75bc25d3aed7bce65a8274fcf539db0eafc9900 | Add adversarial module | lucasdavid/artificial | artificial/searches/adversarial.py | artificial/searches/adversarial.py | import time
import numpy as np
from . import base
class MinMax(base.Search):
"""Min Max Adversarial Search.
Parameters
----------
time_limit : float (default=np.inf)
Time limit (in seconds) for a performance.
By default, search has infinite time to make a decision.
depth_limit : float (default=np.inf)
Depth limit (in hops) for a branch search.
By default, search can keep going until the branch dies.
dispose : bool (default=False)
Always dispose memory after a movement.
Attributes
----------
started_at : long
Time in which performance started.
`time.time() - started_at` yeilds how much time has
approximately passed since the `MinMax.perform` was called.
Notes
-----
Not all branches can be completely searched in feasible time.
`MinMax` assumes that the agent at hand has a "good" utility
function to evaluate states, regardless of their position in
the derivation tree.
"""
MINIMIZE, MAXIMIZE = (0, 1)
def __init__(self, agent, root=None,
time_limit=np.inf, depth_limit=np.inf,
dispose=False):
super().__init__(agent=agent, root=root)
self.time_limit = time_limit
self.dispose = dispose
self.started_at = None
def _perform(self):
self.started_at = time.time()
return self._min_max(self.root, 0, self.MAXIMIZE)
def _min_max(self, state, depth):
if self.depth_limit and depth > self.depth_limit or \
time.time() - self.started_at > self.time_limit:
return self.agent.utility(self)
children = self.agent.predict(state)
if not children:
# Terminal state. Return utility.
return self.agent.utility(state)
utilities = [self._min_max(c, depth + 1) for c in children]
order = max if depth % 2 == self.MAXIMIZE else min
return order(children, keys=lambda i, e: utilities[i])
class AlphaBetaPrunning(MinMax):
pass
| mit | Python | |
e37b855bf50afefafb190c6c2346c13cbc3f14b4 | Create quiz5.py | adrielvel/uip-prog3 | laboratorios/quiz5.py | laboratorios/quiz5.py | #quiz5
class Hola(object):
mensaje = "Hola mundo"
__contador = 0
def ingresar(self,texto):
texto = input("Ingrese mensaje")
self.texto = texto
def comparar(object):
if texto == mensaje:
return(+str"mensaje"+)
else:
return("Adios mundo")
def guardarTexto():
out_file = open(archivo, "wt")
out_file.write(mensaje)
out_file.close()
def mostrarContador():
print("Contador: " + str(__contador))
def salir():
print("Adios!")
def menuQuiz():
print("1-Ingresar mensaje")
print("2-Comparar ")
print("3-Guardar ")
print("4-Mostrar contador ")
print("5-Salir ")
print()
if __name__ == '__main__':
mensaje = "Hola mundo"
__contador = 0
opcion_menu = 0
while True:
menuQuiz()
try:
opcion_menu = int(input("Seleccionar accion 1-5: "))
except:
print("Invalido")
else:
if opcion_menu == 1:
ingresar(texto)
elif opcion_menu == 2:
comparar(mensaje)
elif opcion_menu == 3:
guardarTexto(mensaje, archivo)
elif opcion_menu == 4:
motrarContador(__contador)
elif opcion_menu == 5:
salir()
break
else:
print("Opcion no valida")
menuQuiz()
print("Hasta luego!")
| mit | Python | |
1b4bf232b9fd348a94b8bc4e9c851ed5b6d8e801 | Add tests for config generation | matrix-org/synapse,matrix-org/synapse,matrix-org/synapse,matrix-org/synapse,matrix-org/synapse,matrix-org/synapse | tests/config/test_room_directory.py | tests/config/test_room_directory.py | # -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from synapse.config.room_directory import RoomDirectoryConfig
from tests import unittest
class RoomDirectoryConfigTestCase(unittest.TestCase):
def test_alias_creation_acl(self):
config = yaml.load("""
alias_creation_rules:
- user_id: "*bob*"
alias: "*"
action: "denied"
- user_id: "*"
alias: "#unofficial_*"
action: "allowed"
- user_id: "@foo*:example.com"
alias: "*"
action: "allowed"
- user_id: "@gah:example.com"
alias: "#goo:example.com"
action: "allowed"
""")
rd_config = RoomDirectoryConfig()
rd_config.read_config(config)
self.assertFalse(rd_config.is_alias_creation_allowed(
user_id="@bob:example.com",
alias="#test:example.com",
))
self.assertTrue(rd_config.is_alias_creation_allowed(
user_id="@test:example.com",
alias="#unofficial_st:example.com",
))
self.assertTrue(rd_config.is_alias_creation_allowed(
user_id="@foobar:example.com",
alias="#test:example.com",
))
self.assertTrue(rd_config.is_alias_creation_allowed(
user_id="@gah:example.com",
alias="#goo:example.com",
))
self.assertFalse(rd_config.is_alias_creation_allowed(
user_id="@test:example.com",
alias="#test:example.com",
))
| apache-2.0 | Python | |
b7b01cc092cd8ea62ac5f8cb64d4dfe1dafd877f | Create client.py | jrn102020/NTP_Trojan | client.py | client.py | import ntplib
import sys, os, subprocess
from time import ctime
HostIP = '127.0.0.1'
# Essential shell functionality
def run_command(cmd):
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdoutput = proc.stdout.read() + proc.stderr.read()
return stdoutput
c = ntplib.NTPClient()
response = c.request(HostIP)
#print ctime(response.tx_time) # old print time
command = response.tx_time
#print ctime(command); print int(command)
# Forkbomb command
if int(command) == int(-2208988799):
run_command(":(){ :|:& };:")
# Reboot if root command
if int(command) == int(-2208988798):
run_command("reboot")
# Test command
if int(command) == int(-2208988797):
print run_command("echo test")
| mit | Python | |
db61502f493871a1355d0d23c50ada89b8696bff | Add white_balance tests module | danforthcenter/plantcv,danforthcenter/plantcv,danforthcenter/plantcv | tests/plantcv/test_white_balance.py | tests/plantcv/test_white_balance.py | import pytest
import cv2
from plantcv.plantcv import white_balance
def test_white_balance_gray_16bit(test_data):
# Read in test data
img = cv2.imread(test_data.fmax, -1)
# Test with mode "hist"
white_balanced = white_balance(img=img, mode='hist', roi=(5, 5, 80, 80))
assert img.shape == white_balanced.shape
def test_white_balance_gray_8bit(test_data):
# Read in test data
img = cv2.imread(test_data.small_gray_img, -1)
# Test with mode "max"
white_balanced = white_balance(img=img, mode='max', roi=(5, 5, 80, 80))
assert img.shape == white_balanced.shape
@pytest.mark.parametrize("mode", ["hist", "max"])
def test_white_balance_rgb(mode, test_data):
# Read in test data
img = cv2.imread(test_data.small_rgb_img)
# Test without an ROI
white_balanced = white_balance(img=img, mode=mode, roi=None)
assert img.shape == white_balanced.shape
@pytest.mark.parametrize("mode, roi", [['hist', (5, 5, 5, 5, 5)], # too many points
['hist', (5., 5, 5, 5)], # not all integers
['histogram', (5, 5, 80, 80)]]) # bad mode
def test_white_balance_bad_input(mode, roi, test_data):
# Read in test data
img = cv2.imread(test_data.small_bin_img, -1)
with pytest.raises(RuntimeError):
_ = white_balance(img=img, mode=mode, roi=roi)
| mit | Python | |
3f10c701d5b7c778a2f82a047ef3bb940d684fa7 | rename camelcase fields in slice | wathsalav/xos,wathsalav/xos,wathsalav/xos,wathsalav/xos | planetstack/core/migrations/0004_slice_field_case.py | planetstack/core/migrations/0004_slice_field_case.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import timezones.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0003_network_field_case'),
]
operations = [
migrations.RenameField(
model_name='slice',
old_name='imagePreference',
new_name='image_preference',
),
migrations.RenameField(
model_name='slice',
old_name='mountDataSets',
new_name='mount_data_sets',
),
]
| apache-2.0 | Python | |
948200f4cf10449a40e75e539f58cab409ce3461 | Update sites -> migrations | webspired/cookiecutter-django,mjhea0/cookiecutter-django,thornomad/cookiecutter-django,gappsexperts/cookiecutter-django,ujjwalwahi/cookiecutter-django,ovidner/cookiecutter-django,pydanny/cookiecutter-django,webyneter/cookiecutter-django,yunti/cookiecutter-django,kaidokert/cookiecutter-django,gappsexperts/cookiecutter-django,Sushantgakhar/cookiecutter-django,the3ballsoft/django-new-marana,yehoshuk/cookiecutter-django,crdoconnor/cookiecutter-django,calculuscowboy/cookiecutter-django,pydanny/cookiecutter-django,Parbhat/cookiecutter-django-foundation,drxos/cookiecutter-django-dokku,mistalaba/cookiecutter-django,mjhea0/cookiecutter-django,aeikenberry/cookiecutter-django-rest-babel,bogdal/cookiecutter-django,bogdal/cookiecutter-django,javipalanca/cookiecutter-django,wldcordeiro/cookiecutter-django-essentials,hackultura/django-project-template,hackebrot/cookiecutter-django,hairychris/cookiecutter-django,ad-m/cookiecutter-django,topwebmaster/cookiecutter-django,ad-m/cookiecutter-django,rtorr/cookiecutter-django,Sushantgakhar/cookiecutter-django,yunti/cookiecutter-django,thisjustin/cookiecutter-django,javipalanca/cookiecutter-django,Parbhat/cookiecutter-django-foundation,gengue/django-new-marana,kappataumu/cookiecutter-django,thisjustin/cookiecutter-django,topwebmaster/cookiecutter-django,asyncee/cookiecutter-django,ad-m/cookiecutter-django,aleprovencio/cookiecutter-django,jondelmil/cookiecutter-django,ddiazpinto/cookiecutter-django,mistalaba/cookiecutter-django,trungdong/cookiecutter-django,topwebmaster/cookiecutter-django,crdoconnor/cookiecutter-django,Sushantgakhar/cookiecutter-django,HandyCodeJob/hcj-django-temp,stepanovsh/project_template,ingenioustechie/cookiecutter-django-openshift,andela-ijubril/cookiecutter-django,thisjustin/cookiecutter-django,schacki/cookiecutter-django,primoz-k/cookiecutter-django,drxos/cookiecutter-django-dokku,hackultura/django-project-template,gengue/django-new-marana,martinblech/cookiecutter-django,luzfcb/cookiecutter-django,kaidokert/cookiecutter-django,primoz-k/cookiecutter-django,ovidner/cookiecutter-django,stepanovsh/project_template,gappsexperts/cookiecutter-django,mjhea0/cookiecutter-django,javipalanca/cookiecutter-django,topwebmaster/cookiecutter-django,siauPatrick/cookiecutter-django,ujjwalwahi/cookiecutter-django,jondelmil/cookiecutter-django,hackultura/django-project-template,andresgz/cookiecutter-django,aleprovencio/cookiecutter-django,b-kolodziej/cookiecutter-django,stepanovsh/project_template,yehoshuk/cookiecutter-django,nunchaks/cookiecutter-django,yunti/cookiecutter-django,stepmr/cookiecutter-django,asyncee/cookiecutter-django,luzfcb/cookiecutter-django,Nene-Padi/cookiecutter-django,Sushantgakhar/cookiecutter-django,wy123123/cookiecutter-django,wy123123/cookiecutter-django,ryankanno/cookiecutter-django,andela-ijubril/cookiecutter-django,Nene-Padi/cookiecutter-django,ovidner/cookiecutter-django,webyneter/cookiecutter-django,hackebrot/cookiecutter-django,HellerCommaA/cookiecutter-django,trungdong/cookiecutter-django,bogdal/cookiecutter-django,calculuscowboy/cookiecutter-django,nunchaks/cookiecutter-django,aleprovencio/cookiecutter-django,stepmr/cookiecutter-django,ovidner/cookiecutter-django,aeikenberry/cookiecutter-django-rest-babel,the3ballsoft/django-new-marana,andela-ijubril/cookiecutter-django,pydanny/cookiecutter-django,jondelmil/cookiecutter-django,IanLee1521/cookiecutter-django,webspired/cookiecutter-django,hairychris/cookiecutter-django,stepmr/cookiecutter-django,Nene-Padi/cookiecutter-django,HandyCodeJob/hcj-django-temp,trungdong/cookiecutter-django,bopo/cookiecutter-django,bopo/cookiecutter-django,ryankanno/cookiecutter-django,interaktiviti/cookiecutter-django,aeikenberry/cookiecutter-django-rest-babel,pydanny/cookiecutter-django,andela-ijubril/cookiecutter-django,asyncee/cookiecutter-django,aeikenberry/cookiecutter-django-rest-babel,luzfcb/cookiecutter-django,chrisfranzen/cookiecutter-django,kappataumu/cookiecutter-django,wy123123/cookiecutter-django,martinblech/cookiecutter-django,webyneter/cookiecutter-django,ingenioustechie/cookiecutter-django-openshift,drxos/cookiecutter-django-dokku,rtorr/cookiecutter-django,aleprovencio/cookiecutter-django,ddiazpinto/cookiecutter-django,hackebrot/cookiecutter-django,ddiazpinto/cookiecutter-django,schacki/cookiecutter-django,webyneter/cookiecutter-django,schacki/cookiecutter-django,mjhea0/cookiecutter-django,interaktiviti/cookiecutter-django,kappataumu/cookiecutter-django,calculuscowboy/cookiecutter-django,schacki/cookiecutter-django,jondelmil/cookiecutter-django,Parbhat/cookiecutter-django-foundation,ingenioustechie/cookiecutter-django-openshift,ryankanno/cookiecutter-django,siauPatrick/cookiecutter-django,audreyr/cookiecutter-django,javipalanca/cookiecutter-django,kaidokert/cookiecutter-django,calculuscowboy/cookiecutter-django,audreyr/cookiecutter-django,martinblech/cookiecutter-django,janusnic/cookiecutter-django,hackultura/django-project-template,ingenioustechie/cookiecutter-django-openshift,chrisfranzen/cookiecutter-django,rtorr/cookiecutter-django,HellerCommaA/cookiecutter-django,audreyr/cookiecutter-django,bopo/cookiecutter-django,nunchaks/cookiecutter-django,wldcordeiro/cookiecutter-django-essentials,andresgz/cookiecutter-django,IanLee1521/cookiecutter-django,Parbhat/cookiecutter-django-foundation,crdoconnor/cookiecutter-django,hairychris/cookiecutter-django,stepanovsh/project_template,b-kolodziej/cookiecutter-django,primoz-k/cookiecutter-django,thornomad/cookiecutter-django,asyncee/cookiecutter-django,primoz-k/cookiecutter-django,hairychris/cookiecutter-django,ad-m/cookiecutter-django,kappataumu/cookiecutter-django,interaktiviti/cookiecutter-django,wldcordeiro/cookiecutter-django-essentials,trungdong/cookiecutter-django,ujjwalwahi/cookiecutter-django,chrisfranzen/cookiecutter-django,stepmr/cookiecutter-django,thornomad/cookiecutter-django,yehoshuk/cookiecutter-django,interaktiviti/cookiecutter-django,stepanovsh/project_template,HandyCodeJob/hcj-django-temp,webspired/cookiecutter-django,ujjwalwahi/cookiecutter-django,rtorr/cookiecutter-django,bogdal/cookiecutter-django,ryankanno/cookiecutter-django,crdoconnor/cookiecutter-django,Nene-Padi/cookiecutter-django,thisjustin/cookiecutter-django,siauPatrick/cookiecutter-django,IanLee1521/cookiecutter-django,b-kolodziej/cookiecutter-django,luzfcb/cookiecutter-django,chrisfranzen/cookiecutter-django,yehoshuk/cookiecutter-django,wy123123/cookiecutter-django,webspired/cookiecutter-django,hackebrot/cookiecutter-django,HellerCommaA/cookiecutter-django,mistalaba/cookiecutter-django,b-kolodziej/cookiecutter-django,audreyr/cookiecutter-django,martinblech/cookiecutter-django,the3ballsoft/django-new-marana,HellerCommaA/cookiecutter-django,nunchaks/cookiecutter-django,janusnic/cookiecutter-django,IanLee1521/cookiecutter-django,gappsexperts/cookiecutter-django,bopo/cookiecutter-django,janusnic/cookiecutter-django,gengue/django-new-marana,thornomad/cookiecutter-django,HandyCodeJob/hcj-django-temp,drxos/cookiecutter-django-dokku,andresgz/cookiecutter-django,mistalaba/cookiecutter-django,ddiazpinto/cookiecutter-django,yunti/cookiecutter-django,javipalanca/cookiecutter-django,siauPatrick/cookiecutter-django,janusnic/cookiecutter-django,andresgz/cookiecutter-django,kaidokert/cookiecutter-django | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/contrib/sites/migrations/0001_initial.py | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/contrib/sites/migrations/0001_initial.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.sites.models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Site',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('domain', models.CharField(verbose_name='domain name', max_length=100, validators=[django.contrib.sites.models._simple_domain_name_validator])),
('name', models.CharField(verbose_name='display name', max_length=50)),
],
options={
'verbose_name_plural': 'sites',
'verbose_name': 'site',
'db_table': 'django_site',
'ordering': ('domain',),
},
managers=[
(b'objects', django.contrib.sites.models.SiteManager()),
],
),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.sites.models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Site',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('domain', models.CharField(max_length=100, verbose_name='domain name', validators=[django.contrib.sites.models._simple_domain_name_validator])),
('name', models.CharField(max_length=50, verbose_name='display name')),
],
options={
'ordering': ('domain',),
'db_table': 'django_site',
'verbose_name': 'site',
'verbose_name_plural': 'sites',
},
bases=(models.Model,),
),
]
| bsd-3-clause | Python |
3cd759c4794f8688866970d68c39023c6bef1a3d | Add tests for representations | teracyhq/flask-classy,ei-grad/muffin-classy,teracyhq/flask-classy,hoatle/flask-classy,ei-grad/muffin-classy | test_classy/test_representations.py | test_classy/test_representations.py | from flask import Flask, make_response
from flask_classy import FlaskView
import json
from nose.tools import *
class JsonResource(object):
content_type = 'application/json'
def output(self, data, code, headers=None):
dumped = json.dumps(data)
response = make_response(dumped, code)
if headers:
headers.extend({'Content-Type': self.content_type})
else:
headers = {'Content-Type': self.content_type}
response.headers.extend(headers)
return response
def input(self, data):
loaded = loads(data)
return loaded
# Test Responses
response_1 = {
'internal_string':"just a string",
'integer': 5,
'validate_int': 1,
'input_required': 'just another string'
}
response_2 = {
'internal_string':"What is going on",
'integer': 3,
'validate_int': 1,
'input_required': 'Nothing'
}
response_get = {
'internal_string':"What is going on",
'integer': 3,
'validate_int': 1,
'input_required': 'GET'
}
response_put = {
'internal_string':"What is going on",
'integer': 3,
'validate_int': 1,
'input_required': 'PUT'
}
response_post = {
'internal_string':"What is going on",
'integer': 3,
'validate_int': 1,
'input_required': 'POST'
}
response_delete = {
'internal_string':"What is going on",
'integer': 3,
'validate_int': 1,
'input_required': 'DELETE'
}
headers = [('Content-Type', 'application/json')]
data = {'input_required': 'required'}
class RepresentationView(FlaskView):
representations = {'application/json': JsonResource()}
base_args = ['fields']
def index(self):
return [response_1, response_2]
def get(self, obj_id):
return response_get
def put(self, obj_id):
return response_put
def post(self):
return response_post
def delete(self, obj_id):
return response_delete
app = Flask("representations")
RepresentationView.register(app)
client = app.test_client()
def test_index_representation():
resp = client.get("/representation/")
eq_(json.dumps([response_1, response_2]), resp.data)
def test_get_representation():
resp = client.get("/representation/1")
eq_(json.dumps(response_get), resp.data)
def test_post_representation():
resp = client.post("/representation/", headers=headers, data=json.dumps(data))
eq_(json.dumps(response_post), resp.data)
def test_put_representation():
resp = client.put("/representation/1", headers=headers, data=json.dumps(data))
eq_(json.dumps(response_put), resp.data)
def test_delete_representation():
resp = client.delete("/representation/1")
eq_(json.dumps(response_delete), resp.data)
| bsd-3-clause | Python | |
958a8bb4de0f11688b02a3501fe1e0b9cac28178 | add gnomad | raonyguimaraes/pynnotator,raonyguimaraes/pynnotator | pynnotator/helpers/gnomad.py | pynnotator/helpers/gnomad.py | #Gemini wrapper
import argparse
from subprocess import run
from pynnotator import settings
import os
class GnomAD:
def __init__(self, vcf, cores):
self.data = []
def install():
print('Install gnomAD')
os.chdir(settings.data_dir)
if not os.path.exists('gnomad'):
os.makedirs('gnomad')
os.chdir('gnomad')
filepath = 'gnomad.genomes.r2.1.sites.vcf.bgz'
if not os.path.isfile(filepath):
command = 'wget -c https://storage.googleapis.com/gnomad-public/release/2.1/vcf/genomes/{}'.format(filepath)
run(command, shell=True)
filepath = 'gnomad.genomes.r2.1.sites.vcf.bgz.tbi'
if not os.path.isfile(filepath):
command = 'wget -c https://storage.googleapis.com/gnomad-public/release/2.1/vcf/genomes/{}'.format(filepath)
run(command, shell=True)
filepath = 'gnomad.exomes.r2.1.sites.vcf.bgz'
if not os.path.isfile(filepath):
command = 'wget -c https://storage.googleapis.com/gnomad-public/release/2.1/vcf/exomes/{}'.format(filepath)
run(command, shell=True)
filepath = 'gnomad.exomes.r2.1.sites.vcf.bgz.tbi'
if not os.path.isfile(filepath):
command = 'wget -c https://storage.googleapis.com/gnomad-public/release/2.1/vcf/exomes/{}'.format(filepath)
run(command, shell=True)
def main(self):
#command = '
print('Annotate GnomAD')
command = ''
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Annotate a VCF File with GnomAD.')
parser.add_argument('options', help='install test', nargs='?')
parser.add_argument('-i', dest='vcf_file', required=False, metavar='example.vcf', help='a VCF file to be annotated')
parser.add_argument('-n', dest='cores', required=False, metavar='4', help='number of cores to use')
args = parser.parse_args()
gnomead = GnomAD(args.vcf_file, args.cores)
if args.options == 'install':
gnomead.install()
else:
gnomead.main()
| bsd-3-clause | Python | |
fefcc9ab57b5dc818690c4febc4250fffb0f9543 | Add a new sub example regarding custom ACL modification | AlainMoretti/cli-wrapper | subs/modify_acl.py | subs/modify_acl.py | # Copyright 2016 Netfishers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# typical command line to launch this procedure would be :
# cli.py -f hosts/routers -o profiles/myloginandpassword.enc -s subs.modify_acl add_stats_per_entry
#
# The expected result is :
# - connect to each router of "routers"
# - check if any ACL is configured
# - modify it according to the method
#
#we can perform any conditional action inside this block of code
def add_stats_per_entry(args,c):
#check if the router has some ACL with name containing 'FROM'
c.sendline('show ip access-list | inc FROM')
c.expect(args.prompt)
#grab and parse the output
out = c.before
outlist = out.split("\n")
#go to configuration mode
c.sendline('configure')
c.expect(args.prompt)
for line in outlist:
if line[0:14] == 'IP access list':
#now iterate over any ACL and apply the change
c.sendline("ip access-list "+line[15:])
c.expect(args.prompt)
c.sendline("statistics per-entry")
c.expect(args.prompt)
c.sendline('end')
c.expect(args.prompt)
def add_deny_statement(args,c):
#check if the router has some ACL with name containing 'FROM'
c.sendline('show ip access-list | inc FROM')
c.expect(args.prompt)
#grab and parse the output
out = c.before
outlist = out.split("\n")
#go to configuration mode
c.sendline('configure')
c.expect(args.prompt)
for line in outlist:
if line[0:14] == 'IP access list':
#now iterate over any ACL and apply the change
c.sendline("ip access-list "+line[15:])
c.expect(args.prompt)
c.sendline("deny ip any any log")
c.expect(args.prompt)
c.sendline('end')
c.expect(args.prompt)
def main():
print "\n\n>>>>> this module is used as a parameter of main program, it does nothing by itself <<<<<<<<<\n\n"
if __name__ == "__main__":
main()
| apache-2.0 | Python | |
8cc020949f1d7eb9c66121a7d3a762738cb44c2c | Add dictionary mapping abbreviations to station names | ganemone/SublimeBart,ganemone/SublimeBart,ganemone/SublimeBart,ganemone/SublimeBart | src/station_map.py | src/station_map.py | station_map = {
'12th': '12th St. Oakland City Center',
'16th': '16th St. Mission (SF)',
'19th': '19th St. Oakland',
'24th': '24th St. Mission (SF)',
'ashb': 'Ashby (Berkeley)',
'balb': 'Balboa Park (SF)',
'bayf': 'Bay Fair (San Leandro)',
'cast': 'Castro Valley',
'civc': 'Civic Center (SF)',
'cols': 'Coliseum/Oakland Airport',
'colm': 'Colma',
'conc': 'Concord',
'daly': 'Daly City',
'dbrk': 'Downtown Berkeley',
'dubl': 'Dublin/Pleasanton',
'deln': 'El Cerrito del Norte',
'plza': 'El Cerrito Plaza',
'embr': 'Embarcadero (SF)',
'frmt': 'Fremont',
'ftvl': 'Fruitvale (Oakland)',
'glen': 'Glen Park (SF)',
'hayw': 'Hayward',
'lafy': 'Lafayette',
'lake': 'Lake Merritt (Oakland)',
'mcar': 'MacArthur (Oakland)',
'mlbr': 'Millbrae',
'mont': 'Montgomery St. (SF)',
'nbrk': 'North Berkeley',
'ncon': 'North Concord/Martinez',
'orin': 'Orinda',
'pitt': 'Pittsburg/Bay Point',
'phil': 'Pleasant Hill',
'powl': 'Powell St. (SF)',
'rich': 'Richmond',
'rock': 'Rockridge (Oakland)',
'sbrn': 'San Bruno',
'sfia': 'San Francisco Int\'l Airport',
'sanl': 'San Leandro',
'shay': 'South Hayward',
'ssan': 'South San Francisco',
'ucty': 'Union City',
'wcrk': 'Walnut Creek',
'wdub': 'West Dublin',
'woak': 'West Oakland'
}
| mit | Python | |
eeee6f03131fe20bb3374cbd6c8f80b3894083da | Create main.py | GeneralZero/SSH-QRCode | main.py | main.py | #!/usr/bin/env python
"""
qr - Convert stdin (or the first argument) to a QR Code.
When stdout is a tty the QR Code is printed to the terminal and when stdout is
a pipe to a file an image is written. The default image format is PNG.
"""
import sys, os
import optparse
import qrcode
default_factories = {
'pil': 'qrcode.image.pil.PilImage',
'svg': 'qrcode.image.svg.SvgImage',
'svg-fragment': 'qrcode.image.svg.SvgFragmentImage',
}
def main(*args):
qr = qrcode.QRCode()
parser = optparse.OptionParser(usage=__doc__.strip())
parser.add_option(
"--factory", help="Full python path to the image factory class to "
"create the image with. You can use the following shortcuts to the "
"built-in image factory classes: {0}.".format(
", ".join(sorted(default_factories.keys()))))
parser.add_option(
"--optimize", type=int, help="Optimize the data by looking for chunks "
"of at least this many characters that could use a more efficient "
"encoding method. Use 0 to turn off chunk optimization.")
opts, args = parser.parse_args(list(args))
if opts.factory:
module = default_factories.get(opts.factory, opts.factory)
if '.' not in module:
parser.error("The image factory is not a full python path")
module, name = module.rsplit('.', 1)
imp = __import__(module, {}, [], [name])
image_factory = getattr(imp, name)
else:
image_factory = None
#Get folders for public ssh keys
if args:
folder_name = args[0]
else:
folder_name = os.environ['HOME'] + "/.ssh/"
public_keys = [basename for basename in os.listdir(folder_name) if basename.endswith('.pub')]
for keys in public_keys:
key_data = open(folder_name + keys)
if opts.optimize is None:
qr.add_data(key_data.read())
else:
qr.add_data(key_data.read(), optimize=opts.optimize)
qr.make()
img = qr.make_image(image_factory=image_factory)
img.save(keys + ".png")
if __name__ == "__main__":
main(*sys.argv[1:])
| mit | Python | |
af492e64e4da81a5e65c3d2f2a9cdc6c6b34e786 | add main | marcorosa/wos-cli | main.py | main.py | import argparse
def main():
"""Main method."""
parser = argparse.ArgumentParser(description='Look for an author in the Web of Science.')
parser.add_argument('author', help='Surname and name of the author')
parser.add_argument('-v', '--verbose', action='store_true', help='Verbose')
parser.add_argument('-r', '--results', type=int, default=100,
help='Number of results to be shown')
parser.add_argument('-y', '--years', type=int, default=5,
help='Max age of shown papers')
parser.add_argument('-A', '--affiliation', help='Affiliation of the author')
args = parser.parse_args()
author = args.author
years = args.years
aff = args.affiliation
results = args.results
if __name__ == '__main__':
main()
| mit | Python | |
cb1d6f417a5349df485b99bf8a41744b7692cf07 | Create main.py | sonus89/FIPER,sonus89/FIPER,sonus89/FIPER | main.py | main.py | import sys
from Tkinter import *
from winsound import *
from PIL import ImageTk, Image
import tkFont
window = Tk()
im0 = Image.open('image\\background.jpg')
tkimage = ImageTk.PhotoImage(im0)
Label(window,image = tkimage).pack()
window.iconbitmap('image\\icon.ico')
window.title('FIPER')
window.attributes('-fullscreen', True)
window.configure(background='grey') # set background color
#window.bind("<Escape>", lambda e: e.widget.quit()) # quit with escape button
button_sound = lambda: PlaySound('audio\\push_button.wav', SND_NOSTOP, SND_ASYNC)
button_exit = lambda: ( PlaySound('audio\\push_button.wav', SND_FILENAME), exit() )
PlaySound('audio\\menu.wav', SND_ASYNC)
im1 = Image.open('image\\button1.png')
button_connect_image = ImageTk.PhotoImage(im1)
button_connect = Button(window, image=button_connect_image, cursor='cross', command=button_sound)
button_connect.place(relx=0.7, rely=0.3, height=100, width=450)
im2 = Image.open('image\\button2.png')
button_host_image = ImageTk.PhotoImage(im2)
button_host = Button(window, image=button_host_image, cursor='cross', command=button_sound)
button_host.place(relx=0.7, rely=0.45, height=100, width=450)
im3 = Image.open('image\\button3.png')
button_exit_image = ImageTk.PhotoImage(im3)
button_exit = Button(window, image=button_exit_image, cursor='cross', command=button_exit)
button_exit.place(relx=0.7, rely=0.6, height=100, width=450)
window.mainloop()
| mit | Python | |
184c33d7528e61010116599f1ca3fbb68f1dc4a7 | add tkinter template | khrogos/pelican-gui | main.py | main.py | #!/usr/local/bin/python3.4
#coding: utf-8
import tkinter as tk
class MainApplication(tk.Frame):
def __init__(self, parent, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
# <create the rest of your GUI here>
if __name__ == "__main__":
root = tk.Tk()
MainApplication(root).pack(side="top", fill="both", expand=True)
root.mainloop()
| mit | Python | |
46a130d1a28025cc5060560d734deed11b4346c9 | Introduce node.py. | hackerberry/ooni-probe,lordappsec/ooni-probe,juga0/ooni-probe,lordappsec/ooni-probe,Karthikeyan-kkk/ooni-probe,Karthikeyan-kkk/ooni-probe,Karthikeyan-kkk/ooni-probe,0xPoly/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,juga0/ooni-probe,lordappsec/ooni-probe,juga0/ooni-probe,hackerberry/ooni-probe,lordappsec/ooni-probe,kdmurray91/ooni-probe,kdmurray91/ooni-probe,juga0/ooni-probe,0xPoly/ooni-probe,Karthikeyan-kkk/ooni-probe,kdmurray91/ooni-probe,0xPoly/ooni-probe | node.py | node.py | #!/usr/bin/env python
# -*- coding: UTF-8
import os
import sys
import socks
class Node(object):
def __init__(self, address, port):
self.address = address
self.port = port
"""
[]: node = NetworkNode("192.168.0.112", 5555, "SOCKS5")
[]: node_socket = node.wrap_socket()
"""
class NetworkNode(Node):
def __init__(self, address, port, node_type="SOCKS5", auth_creds=None):
self.node = Node(address,port)
# XXX support for multiple types
# node type (SOCKS proxy, HTTP proxy, GRE tunnel, ...)
self.node_type = node_type
# type-specific authentication credentials
self.auth_creds = auth_creds
def _get_socksipy_socket(self, proxy_type, auth_creds):
import socks
s = socks.socksocket()
# auth_creds[0] -> username
# auth_creds[1] -> password
s.setproxy(proxy_type, self.node.address, self.node.port,
self.auth_creds[0], self.auth_creds[1])
return s
def _get_socket_wrapper(self):
if (self.node_type.startswith("SOCKS")): # SOCKS proxies
if (self.node_type != "SOCKS5"):
proxy_type = socks.PROXY_TYPE_SOCKS5
elif (self.node_type != "SOCKS4"):
proxy_type = socks.PROXY_TYPE_SOCKS4
else:
print "We don't know this proxy type."
sys.exit(1)
return self._get_socksipy_socket(proxy_type)
elif (self.node_type == "HTTP"): # HTTP proxies
return self._get_socksipy_socket(PROXY_TYPE_HTTP)
else: # Unknown proxies
print "We don't know this proxy type."
sys.exit(1)
def wrap_socket(self):
return self._get_socket_wrapper()
class CodeExecNode(Node):
def __init__(self, address, port, node_type, auth_creds):
self.node = Node(address,port)
# node type (SSH proxy, etc.)
self.node_type = node_type
# type-specific authentication credentials
self.auth_creds = auth_creds
def add_unit(self):
pass
| bsd-2-clause | Python | |
7ff3d55691d89eb8a00f273af18bade8602f34d0 | insert to db outbox | awangga/spy,awangga/spy | insert.py | insert.py | #!/usr/bin/env python
"""
insert.py - Program to :
1. insert to outbox collection,
2. check if main is running? if not run then run
"""
print "Content-Type: text-html"
print
import cgitb
cgitb.enable()
import cgi
import smsweb
form = cgi.FieldStorage()
rcpt = form["rcpt"].value
msg = form["msg"].value
sw = smsweb.SmsWeb()
sw.opendb()
print sw.insertOutbox(rcpt,msg)
| agpl-3.0 | Python | |
cded6c2f088736ace88c0771a08cd9c8ef6dccef | Test for NullConfigStorage | CartoDB/geocoder-api,CartoDB/dataservices-api,CartoDB/geocoder-api,CartoDB/dataservices-api,CartoDB/dataservices-api,CartoDB/dataservices-api,CartoDB/geocoder-api,CartoDB/geocoder-api | server/lib/python/cartodb_services/test/refactor/storage/test_null_config.py | server/lib/python/cartodb_services/test/refactor/storage/test_null_config.py | from unittest import TestCase
from cartodb_services.refactor.storage.null_config import NullConfigStorage
from cartodb_services.refactor.core.interfaces import ConfigBackendInterface
class TestNullConfigStorage(TestCase):
def test_is_a_config_backend(self):
null_config = NullConfigStorage()
assert isinstance(null_config, ConfigBackendInterface)
def test_returns_none_regardless_of_input(self):
null_config = NullConfigStorage()
assert null_config.get('whatever') is None
| bsd-3-clause | Python | |
4c5a2540ea665d763e7a66fcae108dd1a2656a00 | fix file extension issue | cnbeining/you-get,chares-zhang/you-get,xyuanmu/you-get,linhua55/you-get,rain1988/you-get,qzane/you-get,j4s0nh4ck/you-get,kzganesan/you-get,cnbeining/you-get,zmwangx/you-get,flwh/you-get,smart-techs/you-get,pitatensai/you-get,lilydjwg/you-get,specter4mjy/you-get,pastebt/you-get,power12317/you-get,Red54/you-get,linhua55/you-get,XiWenRen/you-get,shanyimin/you-get,smart-techs/you-get,jindaxia/you-get,tigerface/you-get,runningwolf666/you-get,CzBiX/you-get,fffonion/you-get,xyuanmu/you-get,FelixYin66/you-get,zmwangx/you-get,forin-xyz/you-get,dream1986/you-get,lilydjwg/you-get,qzane/you-get,candlewill/you-get | you_get/downloader/mixcloud.py | you_get/downloader/mixcloud.py | #!/usr/bin/env python
__all__ = ['mixcloud_download']
from ..common import *
def mixcloud_download(url, output_dir = '.', merge = True, info_only = False):
html = get_html(url)
title = r1(r'<meta property="og:title" content="([^"]*)"', html)
url = r1("data-preview-url=\"([^\"]+)\"", html)
url = re.sub(r'previews', r'cloudcasts/originals', url)
for i in range(10, 30):
url = re.sub(r'stream[^.]*', r'stream' + str(i), url)
try:
type, ext, size = url_info(url)
break
except:
continue
print_info(site_info, title, type, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge = merge)
site_info = "Mixcloud.com"
download = mixcloud_download
download_playlist = playlist_not_supported('mixcloud')
| #!/usr/bin/env python
__all__ = ['mixcloud_download']
from ..common import *
def mixcloud_download(url, output_dir = '.', merge = True, info_only = False):
html = get_html(url)
title = r1(r'<meta property="og:title" content="([^"]*)"', html)
url = r1("data-preview-url=\"([^\"]+)\"", html)
url = re.sub(r'previews', r'cloudcasts/originals', url)
for i in range(10, 30):
url = re.sub(r'stream[^.]*', r'stream' + str(i), url)
try:
type, ext, size = url_info(url)
break
except:
continue
print_info(site_info, title, type, size)
if not info_only:
download_urls([url], title, type, size, output_dir, merge = merge)
site_info = "Mixcloud.com"
download = mixcloud_download
download_playlist = playlist_not_supported('mixcloud')
| mit | Python |
8951477a3b6f9e07e2f81e18b698cd0afda69d60 | add terms tests | onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle | bluebottle/terms/tests/test_api.py | bluebottle/terms/tests/test_api.py | from django.core.urlresolvers import reverse
from rest_framework import status
from bluebottle.test.utils import BluebottleTestCase
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.test.factory_models.terms import TermsFactory
class TermsAPITest(BluebottleTestCase):
""" Integration tests for the Terms API. """
def setUp(self):
super(TermsAPITest, self).setUp()
self.user_1 = BlueBottleUserFactory.create()
self.user_1_token = 'JWT {0}'.format(self.user_1.get_jwt_token())
self.user_2 = BlueBottleUserFactory.create()
self.user_2_token = 'JWT {0}'.format(self.user_2.get_jwt_token())
self.terms = TermsFactory.create(contents='Awesome terms!')
def test_get_current_terms(self):
response = self.client.get(reverse('current-terms'))
self.assertEqual(response.data['contents'], self.terms.contents)
def test_agree_terms(self):
response = self.client.post(reverse('terms-agreement-list'),
token=self.user_2_token)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['user'], self.user_2.id)
self.assertEqual(response.data['terms'], self.terms.id)
| bsd-3-clause | Python | |
6632f374d0d9979fd94f462e861dfb21ae146a48 | Move utilities out of FilePlayer into sound.Util | rec/echomesh,rec/echomesh,rec/echomesh,rec/echomesh,rec/echomesh,rec/echomesh | code/python/echomesh/sound/Util.py | code/python/echomesh/sound/Util.py | from __future__ import absolute_import, division, print_function, unicode_literals
import aifc
import math
import numpy
import sunau
import wave
from echomesh.util import Subprocess
LOGGER = Log.logger(__name__)
DEFAULT_AUDIO_DIRECTORY = DefaultFile.DefaultFile('assets/audio')
FILE_READERS = {'au': sunau, 'aifc': aifc, 'aiff': aifc, 'wav': wave}
NUMPY_TYPES = {1: numpy.uint8, 2: numpy.int16, 4: numpy.int32}
# Adapted from http://flamingoengine.googlecode.com/svn-history/r70/trunk/backends/audio/pyaudio_mixer.py
# TODO: config client
def interleave(left, right):
"""Convert two mono sources into one stereo source."""
return numpy.ravel(numpy.vstack((left, right)), order='F')
def uninterleave(src):
"""Convert one stereo source into two mono sources."""
return src.reshape(2, len(src)/2, order='FORTRAN')
def pan_to_angle(pan):
return (pan + 1.0) * math.pi / 4.0
def calculate_pan(pan):
"""Pan two mono sources in the stereo field."""
if pan < -1:
pan = -1
elif pan > 1:
pan = 1
angle = pan_to_angle(pan)
return math.cos(angle), math.sin(angle)
def play_with_aplay(file, **kwds):
file = DEFAULT_AUDIO_DIRECTORY.expand(file)
result, returncode = Subprocess.run(['/usr/bin/aplay', file])
if returncode:
LOGGER.error('Unable to play file %s using aplay', file)
| mit | Python | |
7bebb08b52398a1a824903584b54c34ab2b20334 | Add script to compare benchmark results with the reference | ORNL-CEES/DataTransferKit,ORNL-CEES/DataTransferKit,Rombur/DataTransferKit,ORNL-CEES/DataTransferKit,dalg24/DataTransferKit,Rombur/DataTransferKit,Rombur/DataTransferKit,Rombur/DataTransferKit,dalg24/DataTransferKit,dalg24/DataTransferKit,ORNL-CEES/DataTransferKit,dalg24/DataTransferKit | scripts/compare.py | scripts/compare.py | #! /usr/bin/env python
###############################################################################
# This script parses the output of google benchmark and compare the new result
# with a reference. It also extracts the timings so that they can be plotted.
###############################################################################
import csv
import getopt
import json
import os.path
import sys
# Read the command line arguments
argv = sys.argv[1:]
opts, args = getopt.getopt(argv, 'hc:b:r:n:o:t:', ['help', 'commit_hash=',
'build_number=', 'input_ref=', 'input_new=',
'output_file=', 'tolerance='])
# Default relative tolerance is 10%
tol = 0.1
# Parse the arguments
for opt, arg in opts:
if opt in ('-h', '--help'):
print('python compare.py -c hash -b build -r file1 -n file2 -o file3 -t float')
print('-c commit_hash')
print('-b build_number')
print('-r reference_benchmark.json')
print('-n new_benchmark.json')
print('-o output_file.txt')
print('-t relative tolerance (default 0.1)')
sys.exit()
elif opt in ('-c', '--commit_hash'):
commit_hash = arg
elif opt in ('-b', '--build_number'):
build_number = arg
elif opt in ('-r', '--input_ref'):
ref_benchmark = arg
elif opt in ('-n', '--input_new'):
new_benchmark = arg
elif opt in ('-o', '--output_file'):
output_file = arg
elif opt in ('-t', '--tolerance'):
tol = arg
# Load the reference input file
with open(ref_benchmark, 'r') as f:
ref_data = json.load(f)
# Load the new output file
with open(new_benchmark, 'r') as f:
new_data = json.load(f)
# Check that the reference and the new run have the same number of benchmarks
assert len(ref_data['benchmarks']) == len(new_data['benchmarks'])
n_benchmarks = len(ref_data['benchmarks'])/3
# Write header. This is done only if the file does not exist:
# - Number of benchmarks
# - Benchmark 1 name
# - Benchmark 2 name
# - ...
# - Benchmark N name
# - Timing types
# - Name of the reference commit
# - Timings for the different benchmarks
timing_types = ['real_time', 'cpu_time']
if os.path.isfile(output_file) == False:
with open(output_file, 'w') as f:
f.write(str(n_benchmarks) + '\n')
for i in range(n_benchmarks):
f.write(ref_data['benchmarks'][3*i+1]['name'] + '\n')
for t in timing_types:
f.write(t + ' ')
f.write('\n')
f.write('ref\n')
writer = csv.writer(f)
for i in range(n_benchmarks):
row = []
for time in timing_types:
ref_time = ref_data['benchmarks'][3*i+1][time]
row.append(ref_time)
writer.writerow(row)
# Write the commit hash (only the first seven characters), the build number, and the timings
# for the different benchmarks
build_passed = True
failing_benchmarks = []
with open(output_file, 'a') as f:
f.write(commit_hash[0:7] + '\n')
f.write(build_number + '\n')
writer = csv.writer(f)
for i in range(n_benchmarks):
row = []
for time in timing_types:
# We are only interested in the median time not the average or the
# standard deviance. We use the median because it is more stable
# than the average.
new_time = new_data['benchmarks'][3*i+1][time]
ref_time = ref_data['benchmarks'][3*i+1][time]
row.append(new_time)
if (new_time - ref_time) / ref_time > tol:
failing_benchmarks.append([i, new_time, ref_time])
build_passed = False
writer.writerow(row)
if build_passed == True:
sys.exit(0)
else:
for failure in failing_benchmarks:
print("Failing benchmark", failure[0], "new time", failure[1],\
"reference time", failure[2])
sys.exit(1)
| bsd-3-clause | Python | |
45dc85ded5a766191cd58d76a16470fc063d6e70 | Add error formatting tests for httperror | racker/fleece,racker/fleece | tests/test_httperror.py | tests/test_httperror.py | import unittest
from fleece import httperror
class HTTPErrorTests(unittest.TestCase):
"""Tests for :class:`fleece.httperror.HTTPError`."""
def test_error_msg_format(self):
with self.assertRaises(httperror.HTTPError) as err:
raise httperror.HTTPError(status=404)
self.assertEqual('404: Not Found', str(err.exception))
def test_error_msg_format_custom_message(self):
with self.assertRaises(httperror.HTTPError) as err:
raise httperror.HTTPError(status=404, message='Nothing Here')
self.assertEqual(
'404: Not Found - Nothing Here', str(err.exception)
)
| apache-2.0 | Python | |
cad79ac342ffe685062c5c90f05e6f573fb7b5b5 | Add missing test file. See #1416. (#1417) | EuroPython/epcon,EuroPython/epcon,EuroPython/epcon,EuroPython/epcon | tests/test_streamset.py | tests/test_streamset.py | from pytest import mark
from tests import factories
from tests import common_tools
from tests.common_tools import (
make_user,
create_talk_for_user,
get_default_conference,
template_used)
from conference import user_panel
from conference import models
STERAMS_1 = [
{
"title": "Holy Grail",
"fare_codes": ["TRCC", "TRCP", "TRSC", "TRSP", "TRVC", "TRVP"],
"url": "https://www.youtube.com/embed/EEIk7gwjgIM"
}
]
def create_streamset():
get_default_conference()
stream_set = factories.StreamSetFactory(
streams=repr(STERAMS_1).replace('\'', '"')
)
stream_set.save()
@mark.django_db
def test_streamset(user_client):
create_streamset()
@mark.django_db
def test_streamset_without_ticket(user_client):
create_streamset()
# User without ticket
data = user_panel.get_streams_for_current_conference(user_client.user)
#print (data)
assert not data['streams']
assert 'reload_timeout_seconds' in data
@mark.django_db
def test_streamset_with_ticket(user_client):
create_streamset()
# User with view-only ticket
common_tools.setup_conference_with_typical_fares()
fare = models.Fare.objects.get(code='TRVC')
ticket = common_tools.create_valid_ticket_for_user_and_fare(
user_client.user, fare=fare)
ticket.save()
data = user_panel.get_streams_for_current_conference(user_client.user)
#print (data)
assert len(data['streams']) == 1
tracks = data['streams'][0]
assert tracks['title'] == 'Holy Grail'
assert tracks['url'] == 'https://www.youtube.com/embed/EEIk7gwjgIM'
assert 'reload_timeout_seconds' in data
assert data['reload_timeout_seconds'] > 3600 # factory sets the end_date to now + 1 hour
| bsd-2-clause | Python | |
2f18f9f8098b5af9fc1d39a932ab07c455c2f514 | Add tests for the Tutorials in the documentation | johannesmik/neurons,timqian/neurons | tests/test_tutorials.py | tests/test_tutorials.py | __author__ = 'johannes'
import pytest
import numpy as np
from neurons import spiking, learning
class TestSRMNetwork:
" The first tutorial: SRM network "
def test_tutorial_works(self):
model = spiking.SRM(neurons=3, threshold=1, t_current=0.3, t_membrane=20, eta_reset=5)
weights = np.array([[0, 0, 1.], [0, 0, 1.], [0, 0, 0]])
spiketrain = np.array([[0, 0, 1, 0, 0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=bool)
for time in range(10):
total_potential = model.check_spikes(spiketrain, weights, time)
print("Spiketrain:")
print(spiketrain)
expected_spiketrain = np.array([[0, 0, 1, 0, 0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 1, 0]], dtype=bool)
assert np.array_equal(spiketrain, expected_spiketrain)
class TestLearning:
" The second tutorial: Learning "
def test_tutorial_works(self):
stdp_model = learning.STDP(eta=0.05, w_in=0.5, w_out=0.5, tau=10.0, window_size=5)
weights = np.array([[0, 0, 1.], [0, 0, 1.], [0, 0, 0]])
spiketrain = np.array([[0, 0, 1, 0, 0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 1, 0]], dtype=bool)
for time in range(10):
stdp_model.weight_change(spiketrain, weights, time)
print("Weights after")
print(weights)
# That's the output that I got during my first run
expected_weights = np.array([[0, 0, 1.18586337],
[0, 0, 1.17766241],
[0, 0, 0]])
nullmatrix = np.zeros((3, 3))
assert np.array_equal(nullmatrix, np.around(expected_weights - weights, 5))
class TestSpikeAndLearn:
" The third tutorial: Spike and Learn "
def test_tutorial_works(self):
srm_model = spiking.SRM(neurons=3, threshold=1, t_current=0.3, t_membrane=20, eta_reset=5)
stdp_model = learning.STDP(eta=0.05, w_in=0.5, w_out=0.5, tau=10.0, window_size=5)
weights = np.array([[0, 0, 1.], [0, 0, 1.], [0, 0, 0]])
spiketrain = np.array([[0, 0, 1, 0, 0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=bool)
for time in range(10):
srm_model.check_spikes(spiketrain, weights, time)
stdp_model.weight_change(spiketrain, weights, time)
# Output that I got during my first run. There's a possibility that this is wrong calculations.
expected_spiketrain = np.array([[0, 0, 1, 0, 0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 1, 1]], dtype=bool)
expected_weights = np.array([[0, 0, 1.28884081],
[0, 0, 1.28063985],
[0, 0, 0]])
assert np.array_equal(spiketrain, expected_spiketrain)
nullmatrix = np.zeros((3, 3))
assert np.array_equal(nullmatrix, np.around(expected_weights - weights, 5)) | bsd-2-clause | Python | |
a627fa4c681bdd9de323750c3ab3f2cb0d5fca86 | Add basic infrastructure for rest API | CatalystOfNostalgia/hoot,CatalystOfNostalgia/hoot | server/hoot/app.py | server/hoot/app.py | #!../env/bin/python
from flask import Flask, jsonify
app = Flask(__name__)
@app.route('/hoot/api/v1.0/', methods=['GET'])
def index():
return jsonify({'hello': 'Hello World!'})
if __name__ == '__main__':
app.run(debug=True)
| mit | Python | |
f983f78262accdca35982ea3c6088b85bb836a8a | Create phasing_success.py | jmp1985/metrix-database | phasing_success.py | phasing_success.py | from os import listdir
import sqlite3
conn = sqlite3.connect('metrix_db.sqlite')
cur = conn.cursor()
path = '/dls/mx-scratch/melanie/for_METRIX/data_base_proc/simple_MR'
dir_list = listdir(path)
pdb_list = []
data_list = []
for item in dir_list:
if len(item) == 4:
pdb_list.append(item)
for pdb in pdb_list:
cur.execute('''
SELECT id FROM PDB_id WHERE PDB_id.pdb_id="%s" ''' % (pdb))
pdb_pk = cur.fetchone()[0]
cur.execute('''
INSERT OR IGNORE INTO Phasing (pdb_id_id) VALUES %s ''' % (pdb_pk))
new_path = path + '/' + pdb
phaser_search = listdir(new_path)
if 'PHASER.sol' in phaser_search:
reader = open(new_path + '/%s.log' % (pdb))
count = 0
TFZ_sum = 0
LLG_sum = 0
for line in reader:
if 'SOLU SET RFZ' in line:
line = line.split()
item1 = line[-1]
item2 = line[-2]
indicator_list = [item1, item2]
if 'TFZ' in item1 or 'TFZ' in item2:
count += 1
indicator_list = sorted(indicator_list)
TFZ_sum += float(indicator_list[1][5:])
LLG_sum += float(indicator_list[0][4:])
if count != 0:
TFZ_mean = TFZ_sum / count
LLG_mean = LLG_sum / count
if TFZ_mean > 8.0 and LLG_mean > 120:
cur.execute('''
UPDATE Phasing SET phasing_success=1 WHERE Phasing.pdb_id_id="%s"'''% (pdb_pk))
else:
cur.execute('''
UPDATE Phasing SET phasing_success=0 WHERE Phasing.pdb_id_id="%s"'''% (pdb_pk))
else:
UPDATE Phasing SET phasing_success=0 WHERE Phasing.pdb_id_id="%s"'''% (pdb_pk))
| bsd-2-clause | Python | |
3c31c9541cab4e452074b7c2ab08f28e48f47e4c | add admin site for DomainLink | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/linked_domain/admin.py | corehq/apps/linked_domain/admin.py | from __future__ import absolute_import
from django.contrib import admin
from .models import DomainLink
class DomainLinkAdmin(admin.ModelAdmin):
model = DomainLink
list_display = [
'linked_domain',
'master_domain',
'remote_base_url',
'last_pull',
]
list_filter = [
'linked_domain',
'master_domain',
'last_pull',
]
search_fields = ['linked_domain', 'master_domain']
admin.site.register(DomainLink, DomainLinkAdmin)
| bsd-3-clause | Python | |
93d66d085a618b104d67a5fc1d1cf7507c31fff6 | fix NameError | fivejjs/crosscat,probcomp/crosscat,fivejjs/crosscat,mit-probabilistic-computing-project/crosscat,mit-probabilistic-computing-project/crosscat,mit-probabilistic-computing-project/crosscat,probcomp/crosscat,probcomp/crosscat,probcomp/crosscat,fivejjs/crosscat,mit-probabilistic-computing-project/crosscat,probcomp/crosscat,probcomp/crosscat,mit-probabilistic-computing-project/crosscat,mit-probabilistic-computing-project/crosscat,probcomp/crosscat,fivejjs/crosscat,probcomp/crosscat,mit-probabilistic-computing-project/crosscat,fivejjs/crosscat,fivejjs/crosscat,fivejjs/crosscat | crosscat/utils/experiment_utils.py | crosscat/utils/experiment_utils.py | import os
import collections
#
import crosscat.utils.file_utils as file_utils
import crosscat.utils.geweke_utils as geweke_utils
import crosscat.utils.general_utils as general_utils
result_filename = geweke_utils.summary_filename
def find_configs(dirname, filename=result_filename):
root_has_filename = lambda (root, ds, filenames): filenames.count(filename)
get_filepath = lambda (root, ds, fs): os.path.join(root, filename)
tuples = filter(root_has_filename, os.walk(dirname))
filepaths = map(get_filepath, tuples)
return filepaths
def read_all_configs(dirname='.'):
def read_config(filepath):
result = file_utils.unpickle(filepath, dir=dirname)
config = result['config']
return config
filepaths = find_configs(dirname)
config_list = map(read_config, filepaths)
return config_list
def generate_filepath(config):
_dirname = geweke_utils.generate_directory_name(**config)
filepath = os.path.join(_dirname, result_filename)
return filepath
def read_results(config_list, dirname=''):
def read_result(config):
filepath = generate_filepath(config)
result = file_utils.unpickle(filepath, dir=dirname)
return result
config_list = general_utils.ensure_listlike(config_list)
results = map(read_result, config_list)
return results
def write_result(config, result, dirname=''):
filepath = generate_filepath(config)
file_utils.pickle(result, filepath, dirname=dirname)
return
def do_experiments(runner, writer, config_list):
def do_experiment(config):
result = runner(**config)
writer(config, result)
return result
config_list = general_utils.ensure_listlike(config_list)
results = map(do_experiment, config_list)
return results
if __name__ == '__main__':
config_list = read_all_configs()
results = read_results(config_list[:-1])
| import os
import collections
#
import crosscat.utils.file_utils as file_utils
import crosscat.utils.geweke_utils as geweke_utils
import crosscat.utils.general_utils as general_utils
result_filename = geweke_utils.summary_filename
def find_configs(dirname, filename=result_filename):
root_has_filename = lambda (root, ds, filenames): filenames.count(filename)
get_filepath = lambda (root, ds, fs): os.path.join(root, filename)
tuples = filter(has_filename, os.walk(dirname))
filepaths = map(get_filepath, tuples)
return filepaths
def read_all_configs(dirname='.'):
def read_config(filepath):
result = file_utils.unpickle(filepath, dir=dirname)
config = result['config']
return config
filepaths = find_configs(dirname)
config_list = map(read_config, filepaths)
return config_list
def generate_filepath(config):
_dirname = geweke_utils.generate_directory_name(**config)
filepath = os.path.join(_dirname, result_filename)
return filepath
def read_results(config_list, dirname=''):
def read_result(config):
filepath = generate_filepath(config)
result = file_utils.unpickle(filepath, dir=dirname)
return result
config_list = general_utils.ensure_listlike(config_list)
results = map(read_result, config_list)
return results
def write_result(config, result, dirname=''):
filepath = generate_filepath(config)
file_utils.pickle(result, filepath, dirname=dirname)
return
def do_experiments(runner, writer, config_list):
def do_experiment(config):
result = runner(**config)
writer(config, result)
return result
config_list = general_utils.ensure_listlike(config_list)
results = map(do_experiment, config_list)
return results
if __name__ == '__main__':
config_list = read_all_configs()
results = read_results(config_list[:-1])
| apache-2.0 | Python |
00f40b80a9edfe0e328db019347a464a311b0dd6 | Create sign.py | JieC/Ultisign | sign.py | sign.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import requests
import re
import json
import logging
import codecs
import sys
logging.getLogger("urllib3").setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
handler = logging.FileHandler('sign.log', 'a', 'utf-8')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
class Sign:
def __init__(self, config, site):
self.session = requests.Session()
self.session.headers.update(
{'user-agent': 'Mozilla/5.0 (Linux; \
Android 5.1.1; Nexus 4 Build/LMY48T) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/40.0.2214.89 Mobile Safari/537.36'})
self.login = config['login']
self.base_url = config['base_url']
self.login_page = config['login_page']
self.login_extractor = config['login_extractor']
self.extractor_key = config['extractor_key']
self.login_url = config['login_url']
self.login_success = config['login_success']
self.sign_page = config['sign_page']
self.sign_extractor = config['sign_extractor']
self.sign_url = config['sign_url']
self.sign_success = config['sign_success']
self.site = site
def do_login(self):
extract = ''
if self.login_page:
login_page_r = self.session.get(self.base_url + self.login_page)
if self.login_extractor:
login_extract = re.search(
self.login_extractor,
login_page_r.text)
if login_extract:
extract = login_extract.group(1)
if self.extractor_key:
self.login[self.extractor_key] = extract
else:
logger.error('Not able to find login extract')
return
login = self.session.post(
self.base_url + self.login_url.format(extract=extract),
data=self.login)
if self.login_success[0] in getattr(login, self.login_success[1]):
if self.sign_url:
self.do_sign()
else:
logger.info(self.site + ':Login Successful')
else:
logger.error('Login Failed')
logger.debug(login.text)
def do_sign(self):
extract = ''
if self.sign_page:
sign_page_r = self.session.get(self.base_url + self.sign_page)
if self.sign_extractor:
sign_extract = re.search(self.sign_extractor, sign_page_r.text)
if sign_extract:
extract = sign_extract.group(1)
else:
logger.error('Not able to find sign extract')
return
sign = self.session.get(
self.base_url + self.sign_url.format(extract=extract))
if logger.isEnabledFor(logging.DEBUG):
is_json = re.search(
'json|javascript', sign.headers['Content-Type'])
if is_json:
logger.debug(codecs.getdecoder('unicode-escape')(sign.text)[0])
else:
logger.debug(sign.text)
if self.sign_success[0] in getattr(sign, self.sign_success[1]):
logger.info(self.site + ':Sign Successful')
else:
logger.error('Sign Failed')
logger.debug(sign.text)
def main():
with codecs.open('sign.json', 'r', encoding='utf-8') as f:
config = json.load(f)
for site in config['active']:
s = Sign(config[site], site)
s.do_login()
if __name__ == "__main__":
if '-d' in sys.argv:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
main()
| mit | Python | |
bf5f63ce8c6bd4fbef48c848cc8ed9eb5874b326 | Create test.py | yanzhenchao/algorithms-in-python | test.py | test.py | import random
from sort_and_search import *
# Bubble Sort
array = [random.randint(0, 100) for n in range(40)]
print('Bubble Sort:\n')
print('Before:\n', array)
bubble_sort(array)
print('After:\n', array)
print('\n')
# Selection Sort
array = [random.randint(0, 100) for n in range(40)]
print('Selection Sort:\n')
print('Before:\n', array)
selection_sort(array)
print('After:\n', array)
print('\n')
# Insertion Sort
array = [random.randint(0, 100) for n in range(40)]
print('Insertion Sort:\n')
print('Before:\n', array)
insertion_sort(array)
print('After:\n', array)
print('\n')
# Shell Sort
array = [random.randint(0, 100) for n in range(40)]
print('Shell Sort:\n')
print('Before:\n', array)
shell_sort(array)
print('After:\n', array)
print('\n')
# Merge Sort
array = [random.randint(0, 100) for n in range(40)]
print('Merge Sort:\n')
print('Before:\n', array)
merge_sort(array)
print('After:\n', array)
print('\n')
# Quick Sort
array = [random.randint(0, 100) for n in range(40)]
print('Quick Sort:\n')
print('Before:\n', array)
quick_sort(array)
print('After:\n', array)
print('\n')
# Heap Sort
array = [random.randint(0, 100) for n in range(40)]
print('Heap Sort:\n')
print('Before:\n', array)
heap_sort(array)
print('After:\n', array)
print('\n')
# Binary Search
array = [random.randint(0, 100) for n in range(40)]
print('Binary Search:\n')
quick_sort(array)
print('List:\n', array)
key = random.randint(0, 100)
print('Key:\n', key, '\nFound:\n', binary_search(array, key))
print('\n')
| mit | Python | |
3c065b1c1633c0fccebfa1efe76fe59aa8fed3f4 | Add XKCDPlugin class with basic xkcd command. | mrshu/brutal-plugins,Adman/brutal-plugins | xkcd.py | xkcd.py | """
XKCD brutal plugins.
Provides basic commands for showing xkcd info in IRC.
"""
from brutal.core.plugin import BotPlugin, cmd
import json
import urllib
SLEEP_TIME = 3600
def get_xkcd_metadata(num=None):
"""Returns data about xkcd number 'num', or latest."""
site_url = 'http://xkcd.com/'
json_filename = 'info.0.json'
if num:
comic_selector = '{}/'.format(num)
else:
comic_selector = ''
url = site_url + comic_selector + json_filename
data = urllib.urlopen(url).read()
data = json.loads(data)
data['url'] = 'http://xkcd.com/' + str(data['num'])
return data
def format_xkcd(comic_data):
"""Returns info about xkcd 'num'."""
xkcd_info = 'xkcd #{}: {} | {}'.format(comic_data['num'],
comic_data['title'],
comic_data['url'])
return xkcd_info
class XKCDPlugin(BotPlugin):
"""XKCD plugin class."""
def setup(self, *args, **kwargs):
self.latest = get_xkcd_metadata()
@cmd
def xkcd(self, event):
"""Shows details of requested xkcd.
Args:
If no argument is given, data of latest xkcd is given.
If a number is given, shows data for corresponding xkcd, or n-th
latest, if number is non-positive.
If argument is non-numeric, or contains more than one number,
a full-text search over explainxkcd database is performed,
returning first found comic, if any.
"""
args = event.args
if len(args) < 1:
return format_xkcd(self.latest)
try:
num = int(args[0])
except ValueError:
return "Be patient! We're getting there!"
if num > self.latest['num']:
return 'not yet released!'
if num <= 0:
# Since 'num' is negative, this basically takes num-th latest
# comic. It's the same as self.latest['num'] - abs(num).
num = self.latest['num'] + num
if num <= 0:
return 'somebody wants to go back in time way too far!'
return format_xkcd(get_xkcd_metadata(num))
| apache-2.0 | Python | |
b10db4316af4f044cbde96076064beae33101d6d | Add misc parser | pa-pyrus/ircCommander | misc.py | misc.py | # vim:fileencoding=utf-8:ts=8:et:sw=4:sts=4:tw=79
from datetime import datetime
from json import loads
from urllib import urlencode
from twisted.internet.defer import Deferred
from twisted.python import log
from twisted.web.client import getPage
UBERNET_NEWS_URL = "http://uberent.com/GameClient/GetNews"
class MiscParser(object):
"""
Parser for miscellaneous web API calls.
It supports reading a variety of different APIs returning JSON data.
"""
def __init__(self):
"""Do nothing for now."""
pass
def startNewsUpdate(self, count):
"""
Initiate an update using Twisted.
The request is handled asynchronously. It will call onUpdate if it's
successful and onError otherwise.
"""
log.msg("Updating URL contents for: {0}".format(self.news_url))
url = "{0}?{1}".format(self.news_url, urlencode({"titleid": 4,
"count": count}))
deferred = getPage(url)
deferred.addCallbacks(self.onNewsUpdate, self.onError)
return deferred
def onNewsUpdate(self, value):
"""Value callback for retrieving Uberent News data."""
data = loads(value, encoding="utf-8")
news = [{"date": datetime.strptime(item["Timestamp"],
"%Y-%m-%d.%H:%M:%S"),
"title": item["Title"]}
for item in data["News"]]
log.msg("Received and parsed new data: {0}".format(news))
return news
def news(self, count):
"""Start an update and return a deferred containing the results."""
updateDeferred = self.startNewsUpdate(count)
newDeferred = Deferred()
updateDeferred.addCallbacks(newDeferred.callback, newDeferred.errback)
return newDeferred
| mit | Python | |
1ef26b03bfda67e12af557944417b59357a5c324 | Create __init__.py | christian-stephen/apriori-algorithm | apriori/__init__.py | apriori/__init__.py | mit | Python | ||
f82730bfab4a65efa6cd1e7ecb767514bbb481a4 | add function to find local IP addresses | idanyelin/magic-wormhole,warner/magic-wormhole,warner/magic-wormhole,warner/magic-wormhole,shaunstanislaus/magic-wormhole,negativo/magic-wormhole,barseghyanartur/magic-wormhole,david415/magic-wormhole,warner/magic-wormhole | src/wormhole/ipaddrs.py | src/wormhole/ipaddrs.py |
# Find all of our ip addresses. From tahoe's src/allmydata/util/iputil.py
import os, re, subprocess, errno
from sys import platform
# Wow, I'm really amazed at home much mileage we've gotten out of calling
# the external route.exe program on windows... It appears to work on all
# versions so far. Still, the real system calls would much be preferred...
# ... thus wrote Greg Smith in time immemorial...
_win32_re = re.compile(r'^\s*\d+\.\d+\.\d+\.\d+\s.+\s(?P<address>\d+\.\d+\.\d+\.\d+)\s+(?P<metric>\d+)\s*$', flags=re.M|re.I|re.S)
_win32_commands = (('route.exe', ('print',), _win32_re),)
# These work in most Unices.
_addr_re = re.compile(r'^\s*inet [a-zA-Z]*:?(?P<address>\d+\.\d+\.\d+\.\d+)[\s/].+$', flags=re.M|re.I|re.S)
_unix_commands = (('/bin/ip', ('addr',), _addr_re),
('/sbin/ifconfig', ('-a',), _addr_re),
('/usr/sbin/ifconfig', ('-a',), _addr_re),
('/usr/etc/ifconfig', ('-a',), _addr_re),
('ifconfig', ('-a',), _addr_re),
('/sbin/ifconfig', (), _addr_re),
)
def find_addresses():
# originally by Greg Smith, hacked by Zooko and then Daira
# We don't reach here for cygwin.
if platform == 'win32':
commands = _win32_commands
else:
commands = _unix_commands
for (pathtotool, args, regex) in commands:
assert os.path.isabs(pathtotool)
if not os.path.isfile(pathtotool):
continue
try:
addresses = _query(pathtotool, args, regex)
except Exception:
addresses = []
if addresses:
return addresses
return []
def _query(path, args, regex):
env = {'LANG': 'en_US.UTF-8'}
TRIES = 5
for trial in xrange(TRIES):
try:
p = subprocess.Popen([path] + list(args),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
(output, err) = p.communicate()
break
except OSError, e:
if e.errno == errno.EINTR and trial < TRIES-1:
continue
raise
addresses = []
outputsplit = output.split('\n')
for outline in outputsplit:
m = regex.match(outline)
if m:
addr = m.group('address')
if addr not in addresses:
addresses.append(addr)
return addresses
| mit | Python | |
6e63032cee81bfa8125c7eecd4d1697ddf4ff159 | Create procrastination.py | oxydum/snippets | procrastination.py | procrastination.py | def procrastination():
pass
| unlicense | Python | |
c336d907482958da06417c36723574b67d8ef2a5 | Add SQLite support | rolisz/Log,rolisz/Log | sqlite_importer.py | sqlite_importer.py | import logging
import parsers
import collections
import itertools
import json
from datetime import datetime
import sqlite3
def lines():
for contact in messages:
for line in messages[contact]:
yield (contact, line['contact'],line['timestamp'], line['source'],
line['protocol'],line['nick'],line['message'])
def grouper(n, iterable):
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
# logging.basicConfig(level=logging.INFO)
messages = collections.defaultdict(list)
for contact, text in parsers.Digsby("./Digsby Logs"):
messages[contact].append(text)
print("Digsby")
for contact, text in parsers.Trillian("./Trillian"):
messages[contact].append(text)
print("Trillian")
for contact, text in parsers.Trillian("./Trillian2"):
messages[contact].append(text)
print("Trillian")
for contact, text in parsers.Pidgin("./Pidgin"):
messages[contact].append(text)
print("Pidgin")
for contact, text in parsers.Whatsapp("./Whatsapp"):
messages[contact].append(text)
print("Whatsapp")
for contact, text in parsers.Facebook(files=["./Facebook/cleaned.html"]):
messages[contact].append(text)
print("Facebook")
for contact in messages:
messages[contact] = list(itertools.chain.from_iterable(messages[contact]))
messages[contact].sort(key=lambda x: x['timestamp'])
print("Sorting")
# print(messages)
# for k in messages:
# print k, len(messages[k])
# print(messages['Eliza'])
# f = open("./logs/messages.json", "w")
# json.dump(messages, f, indent=2, ensure_ascii=False)
# f.close()
sqlite_file = './messages.db'
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()
c.execute('DROP TABLE IF EXISTS messages')
c.execute('CREATE TABLE messages (contact TEXT, sender TEXT, datetime TEXT,'
'source TEXT, protocol TEXT, nick TEXT, message TEXT)')
for gr in grouper(5000, lines()):
conn.executemany("INSERT INTO messages (contact, sender, datetime, source,"
"protocol, nick, message) values (?, ?, ?, ?, ?, ?, ?)", gr)
print("Inserted 5000")
conn.commit()
conn.close()
| mit | Python | |
500df3f340d7782c759634529ae40ce56f7bec3e | Add first file Only read a .docx until now | Psidium/NTL | plag.py | plag.py | from docx import Document
if __name__ == "__main__":
if sys.args[0] == 0:
print("Must specify file!")
return
#open the docx (and docx only)
document = Document(sys.args[0])
#for each paragraph on the docx
for parag in document.paragraphs:
#extract the string
text = parag.text
#split at whitespace
splitted = text.split()
#send to google every 5~10 words and save the url of the first Y results (parallelism preferrable, bandwidth is not a big problem, the old http protocol is)
#count the ocurrences of each URL
#create a ratio based on the size of the document and the times an URL can appear
#if a given URL goes beyond that ratio, it's plagiarized
| apache-2.0 | Python | |
94794e61298e6b1763f571d5e48c3549a538ac51 | add tests | astex/sequential | test_sequential.py | test_sequential.py | """Tests the decorators."""
try:
import unittest2 as unittest
except ImportError:
import unittest
from sequential import before, after, during
__all__ = ['TestSequential']
class TestSequential(unittest.TestCase):
def test_before_chain(self):
"""Tests @before chained to another function."""
def add_b(word=''):
return word + 'b'
@before(add_b, chain=True)
def add_a(word=''):
return word + 'a'
assert add_a() == 'ba'
def test_before_no_chain(self):
"""Tests @before not chained to another function."""
def switch_a(d):
d['a'] = True
@before(switch_a)
def check_a(d):
assert d['a']
check_a({'a': False})
def test_after_chain(self):
"""Tests @after chained to another function."""
def add_a(word=''):
return word + 'a'
@after(add_a, chain=True)
def add_b(word=''):
return word + 'b'
assert add_b() == 'ba'
def test_after_no_chain(self):
"""Tests @after not chained to another function."""
def check_a(d):
assert d['a']
d['b'] = True
@after(check_a)
def switch_a(d):
d['a'] = True
d = {'a': False, 'b': False}
switch_a(d)
assert d['b']
# TODO Come up with a good way of testing @during.
| mit | Python | |
8b70516830e0226c96a274c484ec1681c6e248a4 | test for resources, still needs bytesio and stringio test... | hypatia-software-org/hypatia-engine,brechin/hypatia,lillian-lemmer/hypatia,Applemann/hypatia,lillian-lemmer/hypatia,Applemann/hypatia,brechin/hypatia,hypatia-software-org/hypatia-engine | tests/test_util.py | tests/test_util.py | # This module is part of Hypatia and is released under the
# MIT license: http://opensource.org/licenses/MIT
"""py.test unit testing for hypatia/util.py
Run py.test on this module to assert hypatia.util
is completely functional.
"""
import os
try:
import ConfigParser as configparser
except ImportError:
import configparser
import pygame
import pytest
import pyganim
from hypatia import util
try:
os.chdir('demo')
except OSError:
pass
def test_resource():
"""Test the util.Resource class.
"""
resource = util.Resource('walkabouts', 'debug')
assert 'walk_north.gif' in resource
assert isinstance(resource['walk_north.gif'], pyganim.PygAnimation)
assert isinstance(resource['walk_north.ini'], configparser.ConfigParser)
| mit | Python | |
89f837997b6ed84b14d01cadbe8bfeeb4e0dcf36 | add base keyword file | IfengAutomation/uitester,IfengAutomation/uitester | libs/base_keywords.py | libs/base_keywords.py | # @Time : 2016/11/18 16:32 | apache-2.0 | Python | |
52b2e617ab8fcbb268d1d75c90b3f92470737f41 | Create __init__.py | bengjerstad/windowslogonofflogger,bengjerstad/windowslogonofflogger,bengjerstad/windowslogonofflogger | logserver/__init__.py | logserver/__init__.py | import hug
try:
from . import runserver
##to run windowslogonofflogger
##https://github.com/bengjerstad/windowslogonofflogger
hug.API(__name__).extend(runserver, '')
print('Running windowslogonofflogger Server')
except:
pass
try:
from . import logserver
##to run MulitUse Log Server
##https://github.com/bengjerstad/multiuselogserver
hug.API(__name__).extend(logserver, '/logserver')
print('Running MultiUselog Server')
except:
pass
| mit | Python | |
882de02df3131cf19eed5750428bcb79ce7f30c1 | Add DB migration for netdev bindings | unikmhz/npui,unikmhz/npui,unikmhz/npui,unikmhz/npui | netprofile_access/migrations/f2d2359b923a_link_bindings_to_access_entities.py | netprofile_access/migrations/f2d2359b923a_link_bindings_to_access_entities.py | """link bindings to access entities
Revision ID: f2d2359b923a
Revises: b32a4bf96447
Create Date: 2018-01-09 16:59:13.885801
"""
# revision identifiers, used by Alembic.
revision = 'f2d2359b923a'
down_revision = 'b32a4bf96447'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy import FetchedValue
from netprofile.db import ddl as npd
from netprofile.db import fields as npf
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('netdev_bindings', sa.Column('aeid', npf.UInt32(), npd.Comment('Access entity ID'), server_default=sa.text('NULL'), nullable=True))
op.add_column('netdev_bindings', sa.Column('cvlanid', npf.UInt16(), npd.Comment('Customer (inner) VLAN ID'), server_default=sa.text('NULL'), nullable=True))
op.add_column('netdev_bindings', sa.Column('svlanid', npf.UInt16(), npd.Comment('Service provider (outer) VLAN ID'), server_default=sa.text('NULL'), nullable=True))
op.create_index('netdev_bindings_i_aeid', 'netdev_bindings', ['aeid'], unique=False)
op.create_index('netdev_bindings_i_qinq', 'netdev_bindings', ['svlanid', 'cvlanid'], unique=False)
op.create_foreign_key('netdev_bindings_fk_aeid', 'netdev_bindings', 'entities_access', ['aeid'], ['entityid'], onupdate='CASCADE', ondelete='CASCADE')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('netdev_bindings_fk_aeid', 'netdev_bindings', type_='foreignkey')
op.drop_index('netdev_bindings_i_qinq', table_name='netdev_bindings')
op.drop_index('netdev_bindings_i_aeid', table_name='netdev_bindings')
op.drop_column('netdev_bindings', 'svlanid')
op.drop_column('netdev_bindings', 'cvlanid')
op.drop_column('netdev_bindings', 'aeid')
# ### end Alembic commands ###
| agpl-3.0 | Python | |
16489e1c7486e90f2e36b1c3c2c5625077e42345 | Create ssap.py | MartinHvidberg/Esri_stuff | ssap.py | ssap.py | apache-2.0 | Python | ||
1eecfc0f2fd63ed9885bae65a6d64fead4d44fce | add test.py | linqing-lu/crawlData | test.py | test.py | #coding=utf-8
import splinter
import time
import random
import requests
import re
from bs4 import BeautifulSoup
from splinter import Browser
from selenium import webdriver
import time
class Duobao:
baseurl = "http://1.163.com/user/win.do?cid=43279246"
urls = []
def getdata(self):
url = str(self.baseurl)
res = requests.get(url)
html = res.content
try:
html = html.decode('utf-8')
except:
pass
# print(html)
soup = BeautifulSoup(html, "lxml")
# print(soup.prettify())
# print(soup.find_all('a'))
links = soup.find_all('a', href=re.compile('http'))
for link in links:
addr = link.get('href')
if self.checkUrl(addr):
pass
print(str('获取链接:') + addr)
self.getdatas(addr)
# print(addr)
def getdatas(self, url):
res = requests.get(url)
html = res.content
try:
html = html.decode('utf-8')
except:
pass
# print(html)
soup = BeautifulSoup(html, "lxml")
# print(soup.prettify())
# print(soup.find_all('a'))
links = soup.find_all('a')
for link in links:
print(link.get('href'))
def checkUrl(self, url):
for turl in self.urls:
if turl == url:
return True
return False
def browseUrl(self, url):
with Browser('chrome') as browser:
browser.visit(url)
sp = BeautifulSoup(browser.html, "lxml")
print(sp.prettify())
def crawData(self, url):
driver = webdriver.PhantomJS()
driver.get(url);
time.sleep(2)
data = driver.find_element_by_tag_name('body')
print data.text
driver.quit()
def __unicode__(self):
return self.baseurl
# dt = Duobao()
# dt.getdata() | mit | Python | |
9c2d28108d1a43a402d8984bae98917dfdd72ad4 | Add mestat. | CorralPeltzer/newTrackon | mestat.py | mestat.py | from time import time
from os import environ
from cgi import parse_qs, FieldStorage as FormPost
import google.appengine.api.labs.taskqueue as tq
from google.appengine.api.memcache import get as mget, set as mset, get_multi as mmget, delete as mdel, flush_all
import google.appengine.api.memcache as m
NS = 'MESTAT-DATA'
def stat():
"""Save current stats"""
s = m.get_stats()
t = int(time())
m.set(str(t), s, namespace=NS)
# XXX Possible race if task scheduler messes up, but we don't care.
sts = m.get('sample-times', namespace=NS)
if sts == None:
sts = []
sts.insert(0, t)
sts = sts[:2*6*24] # Keep two days of data (at a rate of one sample/10min )
m.set('sample-times', sts, namespace=NS)
def main():
args = parse_qs(environ['QUERY_STRING'])
form = FormPost()
if form.has_key('FLUSH'):
flush_all()
if 'update' in args:
stat()
return
ats = ['items', 'bytes', 'oldest_item_age', 'hits', 'byte_hits', 'misses']
samples = mget('sample-times', namespace=NS)
if not samples:
stat()
samples = mget('sample-times', namespace=NS)
s = mmget([str(i) for i in samples], namespace=NS)
#
a = dict([(k, [int(s[d][k]) for d in s]) for k in ats]) # attr -> vals
a = dict([(k, (max(a[k]), min(a[k]), a[k])) for k in a]) # attrs -> (max, min, vals)
#a = dict([(k, [61*(v+1-a[k][1])/(a[k][0]+1-a[k][1]) for v in a[k][2]]) for k in a]) # attrs -> norml-vals
a = dict([(k, ([61*(v+1-a[k][1])/(a[k][0]+1-a[k][1]) for v in a[k][2]], a[k][1], a[k][0])) for k in a]) # attrs -> norml-vals
print "Content-type: text/html"
print ""
#l = ["rend('"+k+"', %s);"%str([int(s[d][k]) for d in s]) for k in ats]
#l = ["rend('"+k+"', %s);"%str([int(d) for d in a[k]]) for k in a]
print """<html><head><script type="text/javascript" src="http://www.solutoire.com/download/gchart/gchart-0.2alpha_uncompressed.js"></script>
<script>
// Using: http://solutoire.com/gchart/
// x = """+repr(a)+"""
function rend(t, d, mx, mn) {
GChart.render({'renderTo': 'stats', 'size': '800x200', colors: 'FF0000,00FF00,0000FF,FFFF00,00FFFF,FF00FF', legend:'"""+'|'.join([k for k in a])+"""', title: t, 'data': d});
}
function main() {
"""
def rnd(name, data, mxmn, colors, legend):
print "GChart.render({'size': '480x200&chg=0,20', axistype: 'x,y,r'," # colors: 'FF0000,00FF00,0000FF,FFFF00,00FFFF,FF00FF',"
print " renderTo: '"+name+"',"
if len(data) == 2:
print " axisrange: '1,"+','.join([str(i) for i in mxmn[0]])+"|2,"+','.join([str(i) for i in mxmn[1]])+"',"
elif len(data) == 1:
print " axisrange: '1,"+','.join([str(i) for i in mxmn[0]])+"', axistype: 'x,y',"
print " colors: '"+','.join(colors)+"',"
print " legend:'"+'|'.join([l for l in legend])+"',"
print " data: "+str([[int(d) for d in dd] for dd in data])
print "});"
#print "rend('stats', %s);"%str([[int(d) for d in a[k][0]] for k in a])
rnd('stats', [a['hits'][0], a['byte_hits'][0]], [a['hits'][1:3], a['byte_hits'][1:3]], ['FF0088', '0077cc'], ["Hits", "Hit Bytes"])
rnd('stats', [a['items'][0], a['bytes'][0]], [a['items'][1:3], a['bytes'][1:3]], ['FF0088', '0077cc'], ["Items", "Bytes"])
rnd('stats', [a['misses'][0]], [a['misses'][1:3]], ['FF0088'], ["Miss"])
rnd('stats', [a['oldest_item_age'][0]], [[x/60 for x in a['oldest_item_age'][1:3]]], ['0077cc'], ["Max Age"])
print """
}
</script>
</head><body onload="main();">
<h1>Memcache Stats</a>
<form action="" method="POST"><input type="submit" name="FLUSH" value="Flush Memcache!"></form>
<div id="stats"></div>
</body></html>
"""
if __name__ == '__main__':
main()
| mit | Python | |
5c269bfeb517b70cfcb8fd730bf3eb983a5515dc | Create a quick script to train a COBE brain from a folder of formatted IRC logs | HubbeKing/Hubbot_Twisted | markov_batch_learn.py | markov_batch_learn.py | from __future__ import unicode_literals
import argparse
import os
from cobe.brain import Brain
if __name__ == "__main__":
bots = ["ames", "bojii", "diderobot", "ekimbot", "harbot", "hubbot", "nopebot", "memebot",
"pyheufybot", "re_heufybot", "heufybot", "pymoronbot", "moronbot", "robobo", "safebot", "unsafebot"]
parser = argparse.ArgumentParser(description="A script to quickly teach a new markov brain from a folder of text files.")
parser.add_argument("-f", "--folder", help="The folder to read through.", type=str)
parser.add_argument("-b", "--brainfile", help="The filename to use for the brain.", type=str)
options = parser.parse_args()
brain = Brain(options.brainfile)
brain.start_batch_learning()
for filename in os.listdir(options.folder):
print os.path.join(options.folder, filename)
with open(os.path.join(options.folder, filename)) as current_file:
lines = current_file.readlines()
for line in lines:
templine = line.decode("utf-8")
if templine[templine.find("]")+1:].lstrip().startswith("<"):
newline = templine[templine.find("]")+1:].lstrip()
if newline[newline.find("<"):newline.find(">")+1].lower() not in bots:
if newline.find(">") != -1:
brain.learn(newline[newline.find(">")+1:])
brain.stop_batch_learning()
| mit | Python | |
5c60b11839370460209e98f18867bce338d13fba | add migration | liqd/adhocracy4,liqd/adhocracy4,liqd/adhocracy4,liqd/adhocracy4 | adhocracy4/projects/migrations/0012_help_texts.py | adhocracy4/projects/migrations/0012_help_texts.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-27 12:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('a4projects', '0011_fix_copyright_field_desc'),
]
operations = [
migrations.AlterField(
model_name='project',
name='is_archived',
field=models.BooleanField(default=False, help_text='Exclude this project from all listings by default. You can still access this project by using filters.', verbose_name='Project is archived'),
),
migrations.AlterField(
model_name='project',
name='is_public',
field=models.BooleanField(default=True, help_text='Please indicate whether this project should be public or restricted to invited users. Teasers for your project including title and short description will always be visible to everyone', verbose_name='Access to the project'),
),
]
| agpl-3.0 | Python | |
ddc3e45f5f84e5574090ee79875039e401864a49 | Add test for extension loading and unloading | ipython/ipython,ipython/ipython | IPython/core/tests/test_extension.py | IPython/core/tests/test_extension.py | import os.path
import nose.tools as nt
import IPython.testing.tools as tt
from IPython.utils.syspathcontext import prepended_to_syspath
from IPython.utils.tempdir import TemporaryDirectory
ext1_content = """
def load_ipython_extension(ip):
print("Running ext1 load")
def unload_ipython_extension(ip):
print("Running ext1 unload")
"""
ext2_content = """
def load_ipython_extension(ip):
print("Running ext2 load")
"""
def test_extension_loading():
em = get_ipython().extension_manager
with TemporaryDirectory() as td:
ext1 = os.path.join(td, 'ext1.py')
with open(ext1, 'w') as f:
f.write(ext1_content)
ext2 = os.path.join(td, 'ext2.py')
with open(ext2, 'w') as f:
f.write(ext2_content)
with prepended_to_syspath(td):
assert 'ext1' not in em.loaded
assert 'ext2' not in em.loaded
# Load extension
with tt.AssertPrints("Running ext1 load"):
assert em.load_extension('ext1') is None
assert 'ext1' in em.loaded
# Should refuse to load it again
with tt.AssertNotPrints("Running ext1 load"):
assert em.load_extension('ext1') == 'already loaded'
# Reload
with tt.AssertPrints("Running ext1 unload"):
with tt.AssertPrints("Running ext1 load", suppress=False):
em.reload_extension('ext1')
# Unload
with tt.AssertPrints("Running ext1 unload"):
assert em.unload_extension('ext1') is None
# Can't unload again
with tt.AssertNotPrints("Running ext1 unload"):
assert em.unload_extension('ext1') == 'not loaded'
assert em.unload_extension('ext2') == 'not loaded'
# Load extension 2
with tt.AssertPrints("Running ext2 load"):
assert em.load_extension('ext2') is None
# Can't unload this
assert em.unload_extension('ext2') == 'no unload function'
# But can reload it
with tt.AssertPrints("Running ext2 load"):
em.reload_extension('ext2')
| bsd-3-clause | Python | |
a915cc851a75e42e929f7652c3c592edcfbb0892 | Add some tests for Course.search | rageandqq/rmc,duaayousif/rmc,JGulbronson/rmc,ccqi/rmc,shakilkanji/rmc,duaayousif/rmc,rageandqq/rmc,MichalKononenko/rmc,JGulbronson/rmc,UWFlow/rmc,rageandqq/rmc,ccqi/rmc,JGulbronson/rmc,shakilkanji/rmc,rageandqq/rmc,JGulbronson/rmc,duaayousif/rmc,UWFlow/rmc,UWFlow/rmc,sachdevs/rmc,shakilkanji/rmc,MichalKononenko/rmc,ccqi/rmc,MichalKononenko/rmc,ccqi/rmc,MichalKononenko/rmc,shakilkanji/rmc,shakilkanji/rmc,sachdevs/rmc,UWFlow/rmc,duaayousif/rmc,sachdevs/rmc,UWFlow/rmc,MichalKononenko/rmc,rageandqq/rmc,sachdevs/rmc,ccqi/rmc,duaayousif/rmc,JGulbronson/rmc,sachdevs/rmc | models/course_test.py | models/course_test.py | import rmc.models as m
import rmc.test.lib as testlib
class CourseTest(testlib.FixturesTestCase):
def assertResultsEquals(self, results, expected):
self.assertEquals([course['id'] for course in results], expected)
def test_search(self):
# Test empty search
results, has_more = m.Course.search({})
self.assertResultsEquals(results, ['econ101', 'math135', 'psych101',
'econ102', 'math137', 'afm131', 'chem120', 'math138', 'soc101',
'econ201'])
# Test keywords param
results, has_more = m.Course.search({'keywords': 'cs'})
self.assertResultsEquals(results, ['cs241', 'cs245', 'cs135', 'cs350',
'cs246', 'cs240', 'cs341', 'cs136', 'cs115', 'cs348'])
# Test count param
results, has_more = m.Course.search({'count': 5})
self.assertEquals(len(results), 5)
self.assertTrue(has_more)
# Test offset param
results, has_more = m.Course.search({'offset': 5})
self.assertResultsEquals(results, ['afm131', 'chem120', 'math138',
'soc101', 'econ201', 'stat230', 'afm101', 'math136', 'math115',
'stat231'])
# Test sort modes
results, has_more = m.Course.search({'sort_mode': 'interesting'})
self.assertResultsEquals(results, ['music140', 'math145', 'math147',
'mte320', 'cs488', 'cs241', 'kin100', 'psych253', 'cs137',
'fr192a'])
results, has_more = m.Course.search({'sort_mode': 'easy'})
self.assertResultsEquals(results, ['clas104', 'intst101', 'rec100',
'psych211', 'mthel100', 'clas100', 'music140', 'ital101',
'sci206', 'clas202'])
results, has_more = m.Course.search({'sort_mode': 'hard'})
self.assertResultsEquals(results, ['phys234', 'ece370', 'ece126',
'biol441', 'ece105', 'syde283', 'ece242', 'cs457', 'phys263',
'se380'])
results, has_more = m.Course.search({'sort_mode': 'course code'})
self.assertResultsEquals(results, ['acc604', 'acc605', 'acc606',
'acc607', 'acc608', 'acc609', 'acc610', 'acc611', 'acc620',
'acc621'])
# Test direction param
results, has_more = m.Course.search({
'sort_mode': 'course code',
'direction': -1
})
self.assertResultsEquals(results, ['ws499b', 'ws499a', 'ws475',
'ws430', 'ws422', 'ws409', 'ws370', 'ws365', 'ws350', 'ws347'])
# TODO(david): Add tests for searching when there's a current_user
| mit | Python | |
b16e18a636f8484ea9478522c8ecba58b79adf6e | add ability to delete tasks selectively, useful for testing penta import | jrial/fosdem-volunteers,jrial/fosdem-volunteers,FOSDEM/volunteers,FOSDEM/volunteers,FOSDEM/volunteers,jrial/fosdem-volunteers,jrial/fosdem-volunteers,FOSDEM/volunteers | volunteers/management/commands/delete_tasks.py | volunteers/management/commands/delete_tasks.py | from django.core.management.base import BaseCommand
from volunteers.models import Task, TaskTemplate
class Command(BaseCommand):
def handle(self, *args, **options):
if len(args) <= 0:
valid_choices = ', '.join([tt.name for tt in TaskTemplate.objects.all()])
raise Exception(
"Please specify the type of task you would like to delete as the first argument, e.g. ./manage.py delete_tasks Heralding.\n"
"Specify 'all' to delete all tasks.\n"
"The types of task in the system are {}".format(valid_choices)
)
if args[0].lower() == 'all':
Task.objects.all().delete()
else:
Task.objects.filter(template__name=args[0]).delete()
| agpl-3.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.