commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13 values | lang stringclasses 23 values |
|---|---|---|---|---|---|---|---|---|
ebe0b558d80ca7b7e5e7be50cc7c053020dca9fe | create list app skeleton | kubodhi/flist,kubodhi/flist | app.py | app.py | #!/usr/bin/env python
from flask import Flask
app = Flask(__name__)
# define a list item class
class ListItem(Model):
@app.route('/add', methods=['POST'])
def add_item():
''' add items to the list '''
return "stub"
@app.route('/view')
def view_items():
''' view items in the list '''
return "stub"
@app.route('/delete/<this_id>')
def delete_item(this_id):
''' delete items from the list '''
return "stub"
@app.route('/strike/<this_id>')
def strike(this_id):
''' move items to and from deletion staging area '''
@app.route('/')
def home():
''' applicaiton root '''
return "stub"
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=3000) | mit | Python | |
a23eb3f9a921676a3b91ff48b073f9cf4d15cfaa | Create bot.py | hanuchu/fuiibot | bot.py | bot.py | from twython import Twython, TwythonError
from PIL import Image
import os, random, statistics, time
APP_KEY = ''
APP_SECRET = ''
OAUTH_TOKEN = ''
OAUTH_TOKEN_SECRET = ''
brightness_threshold = 35
seconds_between_tweets = 600
def tweet():
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
acceptable = False
while not acceptable:
fn = '.'
while fn.startswith('.'):
fn = random.choice(os.listdir('img/'))
acceptable = image_acceptable('img/' + fn)
photo = open('img/' + fn, 'rb')
response = twitter.upload_media(media=photo)
try:
twitter.update_status(status='#FutureDiaryBot #MiraiNikki #FutureDiary' + ' ', media_ids=[response['media_id']])
print 'tweeted!'
except TwythonError as error:
print error
def main():
while True:
tweet()
time.sleep(seconds_between_tweets)
def image_acceptable(path):
img = Image.open(path)
all_rgb = []
pix = img.load()
for x in range(0, img.size[0]):
for y in range(0, img.size[1]):
all_rgb.append(pix[x,y])
all_brightness = map(sum, all_rgb)
sd = statistics.stdev(all_brightness)
return sd > brightness_threshold
main()
| apache-2.0 | Python | |
3bb4f078f2a03b334c2b44378be2b01e54fb7b37 | Add command load beginner categories | MTG/freesound-datasets,MTG/freesound-datasets,MTG/freesound-datasets,MTG/freesound-datasets | datasets/management/commands/load_beginner_categories.py | datasets/management/commands/load_beginner_categories.py | from django.core.management.base import BaseCommand
import json
from datasets.models import Dataset, TaxonomyNode
class Command(BaseCommand):
help = 'Load field easy categories from json taxonomy file. ' \
'Use it as python manage.py load_beginner_categories.py ' \
'DATASET_ID PATH/TO/TAOXNOMY_FILE.json'
def add_arguments(self, parser):
parser.add_argument('dataset_id', type=int)
parser.add_argument('taxonomy_file', type=str)
def handle(self, *args, **options):
file_location = options['taxonomy_file']
dataset_id = options['dataset_id']
ds = Dataset.objects.get(id=dataset_id)
taxonomy = ds.taxonomy
data = json.load(open(file_location))
for d in data:
node = taxonomy.get_element_at_id(d['id'])
if d['beginner_category']:
node.beginner_task = True
else:
node.beginner_task = False
node.save()
| agpl-3.0 | Python | |
a06995a686c0509f50a481e7d7d41bb35ffe8f19 | add simple improved Sieve Of Eratosthenes Algorithm (#1412) | TheAlgorithms/Python | maths/prime_sieve_eratosthenes.py | maths/prime_sieve_eratosthenes.py | '''
Sieve of Eratosthenes
Input : n =10
Output : 2 3 5 7
Input : n = 20
Output: 2 3 5 7 11 13 17 19
you can read in detail about this at
https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes
'''
def prime_sieve_eratosthenes(num):
"""
print the prime numbers upto n
>>> prime_sieve_eratosthenes(10)
2 3 5 7
>>> prime_sieve_eratosthenes(20)
2 3 5 7 11 13 17 19
"""
primes = [True for i in range(num + 1)]
p = 2
while p * p <= num:
if primes[p] == True:
for i in range(p*p, num+1, p):
primes[i] = False
p+=1
for prime in range(2, num+1):
if primes[prime]:
print(prime, end=" ")
if __name__ == "__main__":
num = int(input())
prime_sieve_eratosthenes(num)
| mit | Python | |
f50c1b067375ea8835c814b55484c416fbba6bf5 | Add test_utils.py; for running containers & http server for test fixture data | scottx611x/refinery-higlass-docker,scottx611x/refinery-higlass-docker,scottx611x/refinery-higlass-docker | test_utils.py | test_utils.py | import _thread
import docker
import logging
import os
import socket
from http.server import HTTPServer, SimpleHTTPRequestHandler
class TestFixtureServer(object):
def __init__(self):
self.port = 9999
self.ip = self.get_python_server_ip()
def get_python_server_ip(self):
# https://stackoverflow.com/a/166589
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
host_ip = s.getsockname()[0]
s.close()
return host_ip
def _start_server(self):
server = HTTPServer((self.ip, self.port), SimpleHTTPRequestHandler)
server.serve_forever()
def start_server_in_background(self):
print("Starting Test Fixture Server on: http://{}:{}".format(self.ip, self.port))
# start the server in a background thread
_thread.start_new_thread(self._start_server, ())
class TestContainerRunner(object):
def __init__(self):
self.client = docker.from_env()
self.container_name = os.environ["CONTAINER_NAME"]
self.image_name = "image-" + os.environ["STAMP"]
self.repository = os.environ["REPO"]
self.containers = []
self.test_fixture_server = TestFixtureServer()
self.test_fixture_server.start_server_in_background()
self.outer_volume_path = "/tmp/" + self.container_name
self.inner_volume_path = "/refinery-data"
self._pull_image()
self._build_image()
def __enter__(self):
self.run()
def __exit__(self, *args):
if not os.environ.get("CONTINUOUS_INTEGRATION"):
self.cleanup_containers()
def _pull_image(self):
print("Pulling image: {}".format(self.image_name))
self.client.images.pull(self.repository)
def _build_image(self):
print("Building image: {}".format(self.image_name))
self.client.images.build(
path=".",
tag=self.image_name,
rm=True,
forcerm=True,
cache_from=[self.repository]
)
def run(self):
print("Creating container: {}".format(self.container_name))
container = self.client.containers.run(
self.image_name,
detach=True,
name=self.container_name,
environment={
"INPUT_JSON_URL":
"http://{}:{}/test-data/input.json".format(
self.test_fixture_server.ip,
self.test_fixture_server.port
)
},
ports={"80/tcp": None},
publish_all_ports=True,
extra_hosts={socket.gethostname(): self.test_fixture_server.ip},
volumes={
self.outer_volume_path: {
'bind': self.inner_volume_path, 'mode': 'rw'
}
}
)
self.containers.append(container)
def cleanup_containers(self):
print("Cleaning up TestContainerRunner containers...")
print(self.containers)
for container in self.containers:
container.remove(force=True, v=True)
| mit | Python | |
40c0d855ba50f39433d6e06f31f4a993a1c98160 | add python | jpgroup/democode,jpgroup/democode,yufree/democode,yufree/democode,jpgroup/democode,jpgroup/democode,yufree/democode,yufree/democode,jpgroup/democode,yufree/democode,yufree/democode,jpgroup/democode | pythontips.py | pythontips.py | # basic
# This is a comment!
# Storing a string to the variable s
s = "Hello world, world"
# printing the type. It shows 'str' which means it is a string type
print type(s)
# printing the number of characters in the string.
print len(s)
# Python gave me an error below! I guess we can't change individual parts of a string this way.
s[0] = 'h'
# from here continue writing a comment for each line explaining what the following line does.
s2 = s.replace("world", "python")
s3 = s2.replace("Hello","monty")
print s
print s2
print s3
print s[6:11]
print s[6:]
print s[-2:]
s4 = s + ' ' + s3
print s4
print s4.find('world')
print 'A string with value {0} and {1}'.format(10,20.3)
help(str)
# list
values = ['1',2,3.0,False]
print len(values)
print type(values)
print values
print values[1]
print values[:3]
print values[2:]
l = []
l.append(8)
l.append(10)
l.append(10)
l.append(12)
print l
l.remove(10)
print l
l.remove(l[0])
print l
l = range(0,10,2)
print l
l = range(-5,5)
print l
line = "This is a \t list - \t of strings"
print len(line.split('\t'))
print line.split('\t')
print ['add another field' for i in range(10)]
l = range(-5,5)
print l
print l.sort(reverse=True)
print l
# Tuples
t = (10,40.0,"A")
print type(t), len(t)
t[1] = 'B'
# dictinaries
data = {}
print data
print len(data), type(data)
data['k4'] = 100
data = {'number': 10, 1:'string'}
data['c'] = [1,2,3,4]
print data
print data[1]
print data['c'][3]
print data['number']
# class
class Car():
def __init__(self, model='Ford'):
self.model = model
self.running = False
def start(self):
if self.running != True:
print 'The car started!'
self.running = True
else:
print 'The car is already running!'
def stop(self):
if self.running == True:
print 'The car stopped!'
self.running = False
else:
print 'The car was not running!'
ford = Car()
nissan = Car(model = 'Nissan')
ford.running
ford.start()
ford.running
nissan.running
nissan.stop()
# import data from web
# mkdir nytimes_data
# cd nytimes_data
# curl -s 'http://api.nytimes.com/svc/search/v2/articlesearch.json?q=malaysia&facet_field=source&begin_date=20120101&end_date=20121231&facet_filter=true&api-key=24120b778a5da6ae998b8171dfde375e:17:70597616' > malaysia_articles.json
#
# cat malaysia_articles.json | python -mjson.tool >> malaysia_pretty_printed.json
#
# cat malaysia_pretty_printed.json | grep 'pub_date'
#
# cat malaysia_pretty_printed.json | grep 'pub_date' | sort | uniq -c
#! /usr/bin/env python
import json
nytimes = json.load(open('malaysia_pretty_printed.json', 'r'))
print ','.join(['contributor', 'pub_date', 'section', 'subsection', 'word_count', 'url'])
for article in nytimes['response']['docs']:
print '"' + '","'.join(map(str,[article['byline'],
article['pub_date'],
article['section_name'],
article['subsection_name'],
article['word_count'],
article['web_url']])) + '"'
# chmod +x ./nytimes_parser.py
# ./nytimes_parser.py > nytimes.csv
# less nytimes.csv
import json
import urllib2
class NYTimesScraper():
def __init__(self, apikey):
# Creates a new NYTimesScraper Object using the apikey that was included.
self.key = apikey
self.url = 'http://api.nytimes.com/svc/search/v2/articlesearch.json?'
def _build_params(self, params):
if not params:
raise Exception('no search parameters!')
else:
return '&'.join([k + '=' + v for k,v in params.iteritems()])
def search(self, params={}):
url = self.url + self._build_params(params)
url = url + '&api-key=%s' % self.key
req = urllib2.Request(url)
data = urllib2.urlopen(req).read()
return json.loads(data)
nytimes = NYTimesScraper(apikey='24120b778a5da6ae998b8171dfde375e:17:70597616')
articles = nytimes.search({'q':'malaysia', 'begin_date': '20140101'})
filename = 'nytimesdata.csv'
data = open(filename, 'w')
for article in articles['response']['docs']:
data.write('"' + '","'.join(map(str,[article['byline']['person'][0]['lastname'],
article['pub_date'],
article['section_name'],
article['subsection_name'],
article['word_count'],
article['web_url']])) + '"\n')
data.close()
| mit | Python | |
f44e4fe8fe7f66258241c77680b2bbf58a6f7d0a | Add basic tests for dics_epochs | Eric89GXL/mne-python,drammock/mne-python,lorenzo-desantis/mne-python,kingjr/mne-python,jniediek/mne-python,jaeilepp/mne-python,nicproulx/mne-python,teonlamont/mne-python,wmvanvliet/mne-python,teonlamont/mne-python,dgwakeman/mne-python,Odingod/mne-python,lorenzo-desantis/mne-python,cjayb/mne-python,alexandrebarachant/mne-python,matthew-tucker/mne-python,jmontoyam/mne-python,bloyl/mne-python,Teekuningas/mne-python,leggitta/mne-python,ARudiuk/mne-python,larsoner/mne-python,wronk/mne-python,jmontoyam/mne-python,andyh616/mne-python,cmoutard/mne-python,mne-tools/mne-python,wronk/mne-python,effigies/mne-python,yousrabk/mne-python,ARudiuk/mne-python,adykstra/mne-python,mne-tools/mne-python,olafhauk/mne-python,wmvanvliet/mne-python,dgwakeman/mne-python,pravsripad/mne-python,jaeilepp/mne-python,kingjr/mne-python,kambysese/mne-python,kingjr/mne-python,dimkal/mne-python,rkmaddox/mne-python,rkmaddox/mne-python,aestrivex/mne-python,Teekuningas/mne-python,matthew-tucker/mne-python,leggitta/mne-python,adykstra/mne-python,pravsripad/mne-python,Odingod/mne-python,drammock/mne-python,bloyl/mne-python,antiface/mne-python,olafhauk/mne-python,effigies/mne-python,larsoner/mne-python,dimkal/mne-python,drammock/mne-python,nicproulx/mne-python,Eric89GXL/mne-python,antiface/mne-python,jniediek/mne-python,cmoutard/mne-python,aestrivex/mne-python,olafhauk/mne-python,kambysese/mne-python,mne-tools/mne-python,pravsripad/mne-python,alexandrebarachant/mne-python,trachelr/mne-python,larsoner/mne-python,wmvanvliet/mne-python,andyh616/mne-python,trachelr/mne-python,agramfort/mne-python,agramfort/mne-python,yousrabk/mne-python,Teekuningas/mne-python,cjayb/mne-python | mne/beamformer/tests/test_dics.py | mne/beamformer/tests/test_dics.py | import os.path as op
from nose.tools import assert_true, assert_raises
import numpy as np
import mne
from mne.datasets import sample
from mne.beamformer import dics_epochs
from mne.time_frequency import compute_csd
data_path = sample.data_path()
fname_data = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis-meg-oct-6-fwd.fif')
fname_fwd_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis-meg-vol-7-fwd.fif')
fname_event = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-eve.fif')
label = 'Aud-lh'
fname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
label = mne.read_label(fname_label)
# preloading raw here increases mem requirements by 400 mb for all nosetests
# that include this file's parent directory :(
raw = mne.fiff.Raw(fname_raw, preload=False)
forward = mne.read_forward_solution(fname_fwd)
forward_surf_ori = mne.read_forward_solution(fname_fwd, surf_ori=True)
forward_fixed = mne.read_forward_solution(fname_fwd, force_fixed=True,
surf_ori=True)
forward_vol = mne.read_forward_solution(fname_fwd_vol, surf_ori=True)
events = mne.read_events(fname_event)
def test_dics_epochs():
"""Test DICS with single trials
"""
event_id, tmin, tmax = 1, -0.11, 0.15
# Setup for reading the raw data
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
# Set up pick list: MEG - bad channels
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False,
stim=True, eog=True, exclude='bads',
selection=left_temporal_channels)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
epochs.resample(200, npad=0, n_jobs=2)
#evoked = epochs.average()
# Computing the data and noise cross-spectral density matrices
data_csd = compute_csd(epochs, mode='multitaper', tmin=0.04, tmax=None,
fmin=8, fmax=12)
noise_csd = compute_csd(epochs, mode='multitaper', tmin=None, tmax=0.0,
fmin=8, fmax=12)
# TODO: This should be done on evoked
stcs = dics_epochs(epochs, forward, noise_csd=None, data_csd=data_csd,
return_generator=True)
stc = stcs.next()
stc_pow = np.sum(stc.data, axis=1)
idx = np.argmax(stc_pow)
max_stc = stc.data[idx]
tmax = stc.times[np.argmax(max_stc)]
# TODO: These should be made reasonable once normalization is implemented
assert_true(-1 < tmax < 1)
assert_true(0. < np.max(max_stc) < 20.)
# Test picking normal orientation
# TODO: This should be done on evoked
stcs = dics_epochs(epochs, forward_surf_ori, noise_csd, data_csd,
pick_ori="normal", return_generator=True)
stc_normal = stcs.next()
# The amplitude of normal orientation results should always be smaller than
# free orientation results
assert_true((np.abs(stc_normal.data) <= stc.data).all())
# TODO: dics_epochs would best be tested by comparing to dics done on
# evoked data
# Test if fixed forward operator is detected when picking normal
# orientation
assert_raises(ValueError, dics_epochs, epochs, forward_fixed, noise_csd,
data_csd, pick_ori="normal")
# Test if non-surface oriented forward operator is detected when picking
# normal orientation
assert_raises(ValueError, dics_epochs, epochs, forward, noise_csd,
data_csd, pick_ori="normal")
# Test if volume forward operator is detected when picking normal
# orientation
assert_raises(ValueError, dics_epochs, epochs, forward_vol, noise_csd,
data_csd, pick_ori="normal")
| bsd-3-clause | Python | |
ca5d276a512fccfe9ed0c7a89a48a13b61d67a55 | Add Display.py | PaulieC/sprint1_Council,PaulieC/rls-c,PaulieC/sprint5-Council,PaulieC/sprint2-Council,PaulieC/sprint1_Council_a,PaulieC/sprint1_Council_b,PaulieC/sprint3-Council,geebzter/game-framework | Display.py | Display.py | __author__ = 'Tara Crittenden'
# Displays the state of the game in a simple text format.
import Observer
import Message
class Display(Observer.Observer):
#Determine which method to display
def notify(self, msg):
if msg.msgtype == 1:
#start of a tournament
display_start_tournament(msg)
elif msg.msgtype == 2:
#end of a tournament
display_end_tournament(msg)
elif msg.msgtype == 3:
#start of a match
display_start_match(msg)
elif msg.msgtype == 4:
#end of a match
display_end_match(msg)
elif msg.msgtype == 5:
#start of a round
display_start_round(msg)
elif msg.msgtype == 6:
#end of a round
display_end_round(msg)
else:
print('Unknown message type')
#Provides easy readiablity
def indent_cushion():
for i in range(8):
print('+')
#Helper method for deconstructing the info portion of a end round message
#Returns the char representation of the move
def get_move(mademove):
"""
:param mademove: move that was made in int form
:return: move that was made in char form
"""
if mademove == 0:
return 'Rock'
elif mademove == 1:
return 'Paper'
elif mademove == 2:
return 'Scissors'
#Display the start of a tournament
def display_start_tournament(msg):
"""
:param msg: message to be displayed
"""
indent_cushion()
print(' Tournament Start! ')
indent_cushion()
m = Message.get_players(msg)
print('\nPlayers: ' + m)
#assuming for the time being that info will hold the specified game
m = Message.get_info(msg)
print('\nGame: ' + m)
#Display the end of a tournament
def display_end_tournament(msg):
"""
:param msg: message to be displayed
"""
indent_cushion()
print(' Tournament End! ')
indent_cushion()
#assuming for the time being that info will hold the winner of the tournament
m = Message.get_info(msg)
print('\nWinner: ' + m)
indent_cushion()
indent_cushion()
print('\n')
indent_cushion()
indent_cushion()
#Display the start of a match
def display_start_match(msg):
"""
:param msg: message to be displayed
"""
indent_cushion()
print(' Match Start! ')
indent_cushion()
m = Message.get_players(msg)
print('\nPlayers: ' + m)
#Display the end of a match
def display_end_match(msg):
"""
:param msg: message to be displayed
"""
indent_cushion()
print(' Match End! ')
indent_cushion()
m = Message.get_info(msg)
#r is the winner
#winnings is the number of times that r won
if m[1] > m[2]:
#player 1 won
r = 'Player 1 '
winnings = m[1]
else:
#player 2 won
r = 'Player 2 '
winnings = m[2]
print('Winner: ' + r + '( ' + winnings + ' out of ' + (m[1] + m[2]) + ')')
#Display the start of a round
def display_start_round(msg):
"""
:param msg: message to be displayed
"""
pass
#Display the end of a round
def display_end_round(msg):
"""
:param msg: message to be displayed
"""
print('\nRound Results: ')
m = Message.get_info(msg)
#r is the winner of the round
if m[2] == 0:
r = 'Tied'
elif m[2] == 1:
#player 1 won
r = 'Player 1 '
elif m[2] == 1:
#player 2 won
r = 'Player 2 '
print('Winner: ' + r)
#find the moves that were played during this round
moves = m[1]
a = get_move(moves[1])
b = get_move(moves[2])
print(' Moves made: Player 1: ' + a + ' Player 2: ' + b)
| apache-2.0 | Python | |
e6e92fb3afff0403091c221328f9023e0e391b0b | Add eventlisten script to watch events on the master and minion | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | tests/eventlisten.py | tests/eventlisten.py | '''
Use this script to dump the event data out to the terminal. It needs to know
what the sock_dir is.
This script is a generic tool to test event output
'''
# Import Python libs
import optparse
import pprint
import os
import time
import tempfile
# Import Salt libs
import salt.utils.event
def parse():
'''
Parse the script command line inputs
'''
parser = optparse.OptionParser()
parser.add_option('-s',
'--sock-dir',
dest='sock_dir',
default=os.path.join(tempfile.gettempdir(), '.salt-unix'),
help=('Staticly define the directory holding the salt unix '
'sockets for communication'))
parser.add_option('-n',
'--node',
dest='node',
default='master',
help=('State if this listener will attach to a master or a '
'minion daemon, pass "master" or "minion"'))
options, args = parser.parse_args()
opts = {}
for k, v in options.__dict__.items():
if v is not None:
opts[k] = v
return opts
def listen(sock_dir, node):
'''
Attach to the pub socket and grab messages
'''
event = salt.utils.event.SaltEvent(
sock_dir,
node
)
while True:
ret = event.get_event(full=True)
if ret is None:
continue
print('Event fired at {0}'.format(time.asctime()))
print('*'*25)
print('Tag: {0}'.format(ret['tag']))
print('Data:')
pprint.pprint(ret['data'])
if __name__ == '__main__':
opts = parse()
listen(opts['sock_dir'], opts['node'])
| apache-2.0 | Python | |
703d97150de1c74b7c1a62b59c1ff7081dec8256 | Add an example of resolving a known service by service name | jstasiak/python-zeroconf | examples/resolver.py | examples/resolver.py | #!/usr/bin/env python3
""" Example of resolving a service with a known name """
import logging
import sys
from zeroconf import Zeroconf
TYPE = '_test._tcp.local.'
NAME = 'My Service Name'
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
if len(sys.argv) > 1:
assert sys.argv[1:] == ['--debug']
logging.getLogger('zeroconf').setLevel(logging.DEBUG)
zeroconf = Zeroconf()
try:
print(zeroconf.get_service_info(TYPE, NAME + '.' + TYPE))
finally:
zeroconf.close()
| lgpl-2.1 | Python | |
039a07bde5975cb6ce40edc43bbd3d931ac5cc92 | Test borda ranking and spearman. | charanpald/APGL | exp/influence2/test/RankAggregatorTest.py | exp/influence2/test/RankAggregatorTest.py | import numpy
import unittest
import logging
from exp.influence2.RankAggregator import RankAggregator
import scipy.stats.mstats
import numpy.testing as nptst
class RankAggregatorTest(unittest.TestCase):
def setUp(self):
numpy.random.seed(22)
def testSpearmanFootrule(self):
list1 = [5, 4, 3, 2, 1, 0]
list2 = [5, 4, 3, 2, 1, 0]
dist = RankAggregator.spearmanFootrule(list1, list2)
self.assertEquals(dist, 0)
list2 = [5, 4, 3, 2, 0, 1]
dist = RankAggregator.spearmanFootrule(list1, list2)
self.assertEquals(dist, 1.0/9)
list2 = [0, 1, 2, 3, 4, 5]
dist = RankAggregator.spearmanFootrule(list1, list2)
self.assertEquals(dist, 1.0)
def testBorda(self):
list1 = [5, 4, 3, 2, 1, 0]
list2 = [5, 4, 3, 2, 1, 0]
outList = RankAggregator.borda(list1, list2)
nptst.assert_array_equal(outList, numpy.array([5,4,3,2,1,0]))
list2 = [4, 3, 2, 5, 1, 0]
outList = RankAggregator.borda(list1, list2)
nptst.assert_array_equal(outList, numpy.array([4,5,3,2,1,0]))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | Python | |
afec3bf59fd454d61d5bc0024516610acfcb5704 | Add 1D data viewer. | stefanv/lulu | examples/viewer1D.py | examples/viewer1D.py | from __future__ import print_function
import numpy as np
from viewer import Viewer
from enthought.traits.api import Array
from enthought.chaco.api import Plot, ArrayPlotData, HPlotContainer, gray
import lulu
class Viewer1D(Viewer):
image = Array
result = Array
def _reconstruction_default(self):
rows, cols = self.image.shape[:2]
self.plot_data = ArrayPlotData(original=self.image[0],
reconstruction=self.result[0])
aspect = cols/float(rows)
old = Plot(self.plot_data)
old.plot('original', )
old.title = 'Old'
self.new = Plot(self.plot_data)
self.new.plot('reconstruction')
self.new.title = 'New'
container = HPlotContainer(bgcolor='none')
container.add(old)
container.add(self.new)
return container
def update_plot(self):
self.plot_data.set_data('reconstruction', self.result[0])
self.new.request_redraw()
if __name__ == "__main__":
import sys
if len(sys.argv) >= 2 and '-UL' in sys.argv:
operator = 'UL'
sys.argv.remove('-UL')
else:
operator = 'LU'
image = (np.random.random((1, 100)) * 255).astype(int)
print("Decomposing using the %s operator." % operator)
if operator == 'LU':
print("Use the '-UL' flag to switch to UL.")
print()
pulses = lulu.decompose(image, operator=operator)
viewer = Viewer1D(pulses=pulses, image=image)
viewer.configure_traits()
| bsd-3-clause | Python | |
d9ff51c74c4b41128bc8e2fe61811dba53e7da17 | Create test_client.py | chidaobanjiu/MANA2077,chidaobanjiu/MANA2077,chidaobanjiu/MANA2077,chidaobanjiu/Flask_Web,chidaobanjiu/Loocat.cc,chidaobanjiu/Loocat.cc,chidaobanjiu/Flask_Web,chidaobanjiu/Loocat.cc,chidaobanjiu/MANA2077,chidaobanjiu/MANA2077,chidaobanjiu/MANA2077,chidaobanjiu/Flask_Web,chidaobanjiu/Flask_Web | tests/test_client.py | tests/test_client.py | import unittest
from app import create_app, db
from app.models import User, Role
class FlaskClientTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
Role.insert_roles()
self.client = self.app.test_client(use_cookies=True)
def teardown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_home_page(self):
response = self.client.get(url_for('main.index'))
self.assertTrue('Stranger' in response.get_data(as_text=True))
| mit | Python | |
d37d831e54fbaebab427c9a5b88cb7eb358b31af | transform data to website via Post & Get | MiracleWong/aming_python,MiracleWong/aming_python | WebScraping/4.py | WebScraping/4.py | #!/usr/bin/python
# encoding:utf-8
import sys
import urllib, urllib2
import re
dic = {'hostname': 'n2', 'ip': '2.2.2.2'}
# url = 'http://127.0.0.1:8000/db/' + '?' + urllib.urlencode(dic)
url = 'http://127.0.0.1:8000/db/'
response = urllib2.urlopen(url, urllib.urlencode(dict))
print response.read()
| mit | Python | |
35a9576dce86c9c3d32c6cc32effb7a8f6c2b706 | Test DjangoAMQPConnection if Django is installed. Closes #10. | ask/carrot,ask/carrot | tests/test_django.py | tests/test_django.py | import os
import sys
import unittest
import pickle
import time
sys.path.insert(0, os.pardir)
sys.path.append(os.getcwd())
from tests.utils import AMQP_HOST, AMQP_PORT, AMQP_VHOST, \
AMQP_USER, AMQP_PASSWORD
from carrot.connection import DjangoAMQPConnection, AMQPConnection
from UserDict import UserDict
class DictWrapper(UserDict):
def __init__(self, data):
self.data = data
def __getattr__(self, key):
return self.data[key]
def configured_or_configure(settings, **conf):
if settings.configured:
for conf_name, conf_value in conf.items():
setattr(settings, conf_name, conf_value)
else:
settings.configure(default_settings=DictWrapper(conf))
class TestDjangoSpecific(unittest.TestCase):
def test_DjangoAMQPConnection(self):
try:
from django.conf import settings
except ImportError:
sys.stderr.write(
"Django is not installed. \
Not testing django specific features.\n")
return
configured_or_configure(settings,
AMQP_SERVER=AMQP_HOST,
AMQP_PORT=AMQP_PORT,
AMQP_VHOST=AMQP_VHOST,
AMQP_USER=AMQP_USER,
AMQP_PASSWORD=AMQP_PASSWORD)
expected_values = {
"hostname": AMQP_HOST,
"port": AMQP_PORT,
"virtual_host": AMQP_VHOST,
"userid": AMQP_USER,
"password": AMQP_PASSWORD}
conn = DjangoAMQPConnection()
self.assertTrue(isinstance(conn, AMQPConnection))
self.assertTrue(getattr(conn, "connection", None))
for val_name, val_value in expected_values.items():
self.assertEquals(getattr(conn, val_name, None), val_value)
| bsd-3-clause | Python | |
0390209498c2a604efabe13595e3f69f7dcbd577 | Add script for path init. | myfavouritekk/TPN | tools/init.py | tools/init.py | #!/usr/bin/env python
"""Setup paths for TPN"""
import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = osp.dirname(__file__)
ext_dir = osp.join(this_dir, '..', 'external')
# Add py-faster-rcnn paths to PYTHONPATH
frcn_dir = osp.join(this_dir, '..', 'external', 'py-faster-rcnn')
add_path(osp.join(frcn_dir, 'lib'))
# caffe_path = osp.join('/Volumes/Research/ECCV2016/Code/External/fast-rcnn-VID-test', 'caffe-fast-rcnn', 'python')
add_path(osp.join(frcn_dir, 'caffe-fast-rcnn', 'python'))
# Add vdetlib to PYTHONPATH
lib_path = ext_dir
add_path(lib_path)
# tpn related modules
src_dir = osp.join(this_dir, '..', 'src')
add_path(src_dir) | mit | Python | |
bcfd9808377878f440cc030178b33e76eb4f031c | Add presubmit check to catch use of PRODUCT_NAME in resources. | anirudhSK/chromium,ChromiumWebApps/chromium,Fireblend/chromium-crosswalk,jaruba/chromium.src,Jonekee/chromium.src,dednal/chromium.src,crosswalk-project/chromium-crosswalk-efl,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,ondra-novak/chromium.src,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,M4sse/chromium.src,hgl888/chromium-crosswalk,krieger-od/nwjs_chromium.src,patrickm/chromium.src,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,dednal/chromium.src,ChromiumWebApps/chromium,markYoungH/chromium.src,krieger-od/nwjs_chromium.src,anirudhSK/chromium,krieger-od/nwjs_chromium.src,anirudhSK/chromium,crosswalk-project/chromium-crosswalk-efl,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,Jonekee/chromium.src,Jonekee/chromium.src,TheTypoMaster/chromium-crosswalk,jaruba/chromium.src,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,Fireblend/chromium-crosswalk,ltilve/chromium,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,patrickm/chromium.src,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,littlstar/chromium.src,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,Chilledheart/chromium,markYoungH/chromium.src,chuan9/chromium-crosswalk,Chilledheart/chromium,Just-D/chromium-1,ondra-novak/chromium.src,Just-D/chromium-1,jaruba/chromium.src,krieger-od/nwjs_chromium.src,patrickm/chromium.src,crosswalk-project/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,krieger-od/nwjs_chromium.src,dushu1203/chromium.src,Chilledheart/chromium,hgl888/chromium-crosswalk-efl,dednal/chromium.src,M4sse/chromium.src,ltilve/chromium,hgl888/chromium-crosswalk,ChromiumWebApps/chromium,dushu1203/chromium.src,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,dednal/chromium.src,Jonekee/chromium.src,hgl888/chromium-crosswalk,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,Jonekee/chromium.src,fujunwei/chromium-crosswalk,ltilve/chromium,littlstar/chromium.src,Pluto-tv/chromium-crosswalk,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk,bright-sparks/chromium-spacewalk,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,ltilve/chromium,hgl888/chromium-crosswalk,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,anirudhSK/chromium,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,markYoungH/chromium.src,fujunwei/chromium-crosswalk,Just-D/chromium-1,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,dednal/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,axinging/chromium-crosswalk,Just-D/chromium-1,anirudhSK/chromium,Fireblend/chromium-crosswalk,markYoungH/chromium.src,fujunwei/chromium-crosswalk,anirudhSK/chromium,littlstar/chromium.src,axinging/chromium-crosswalk,jaruba/chromium.src,anirudhSK/chromium,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,M4sse/chromium.src,chuan9/chromium-crosswalk,anirudhSK/chromium,fujunwei/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,jaruba/chromium.src,littlstar/chromium.src,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,Chilledheart/chromium,ChromiumWebApps/chromium,patrickm/chromium.src,Pluto-tv/chromium-crosswalk,ChromiumWebApps/chromium,M4sse/chromium.src,littlstar/chromium.src,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,chuan9/chromium-crosswalk,dednal/chromium.src,mohamed--abdel-maksoud/chromium.src,M4sse/chromium.src,ondra-novak/chromium.src,crosswalk-project/chromium-crosswalk-efl,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,M4sse/chromium.src,dednal/chromium.src,axinging/chromium-crosswalk,anirudhSK/chromium,littlstar/chromium.src,Pluto-tv/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,ChromiumWebApps/chromium,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,ltilve/chromium,M4sse/chromium.src,jaruba/chromium.src,anirudhSK/chromium,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,ltilve/chromium,ondra-novak/chromium.src,markYoungH/chromium.src,ondra-novak/chromium.src,ChromiumWebApps/chromium,Just-D/chromium-1,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk,markYoungH/chromium.src,ChromiumWebApps/chromium,ltilve/chromium,Just-D/chromium-1,Chilledheart/chromium,fujunwei/chromium-crosswalk,Just-D/chromium-1,ltilve/chromium,bright-sparks/chromium-spacewalk,Fireblend/chromium-crosswalk,ondra-novak/chromium.src,Chilledheart/chromium,patrickm/chromium.src,dushu1203/chromium.src,patrickm/chromium.src,ondra-novak/chromium.src,Chilledheart/chromium,bright-sparks/chromium-spacewalk,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,patrickm/chromium.src,axinging/chromium-crosswalk,M4sse/chromium.src,markYoungH/chromium.src,jaruba/chromium.src,M4sse/chromium.src,dednal/chromium.src,anirudhSK/chromium,littlstar/chromium.src,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,jaruba/chromium.src,dushu1203/chromium.src,jaruba/chromium.src,anirudhSK/chromium,hgl888/chromium-crosswalk,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,Just-D/chromium-1,Fireblend/chromium-crosswalk,dushu1203/chromium.src,fujunwei/chromium-crosswalk,patrickm/chromium.src,ltilve/chromium,PeterWangIntel/chromium-crosswalk,markYoungH/chromium.src,patrickm/chromium.src,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,jaruba/chromium.src,littlstar/chromium.src,Pluto-tv/chromium-crosswalk | chrome/app/PRESUBMIT.py | chrome/app/PRESUBMIT.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for changes affecting chrome/app/
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import os
def _CheckNoProductNameInGeneratedResources(input_api, output_api):
"""Check that no PRODUCT_NAME placeholders are found in resources files.
These kinds of strings prevent proper localization in some languages. For
more information, see the following chromium-dev thread:
https://groups.google.com/a/chromium.org/forum/#!msg/chromium-dev/PBs5JfR0Aoc/NOcIHII9u14J
"""
problems = []
filename_filter = lambda x: x.LocalPath().endswith('.grd')
for f, line_num, line in input_api.RightHandSideLines(filename_filter):
if 'PRODUCT_NAME' in line:
problems.append('%s:%d' % (f.LocalPath(), line_num))
if problems:
return [output_api.PresubmitPromptWarning(
"Don't use PRODUCT_NAME placeholders in string resources. Instead, add "
"separate strings to google_chrome_strings.grd and "
"chromium_strings.grd. See http://goo.gl/6614MQ for more information."
"Problems with this check? Contact dubroy@chromium.org.",
items=problems)]
return []
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(_CheckNoProductNameInGeneratedResources(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
| bsd-3-clause | Python | |
f2807e7505598abdc0f11c8d593655c3dc61c323 | add legislative.apps | opencivicdata/python-opencivicdata-django,opencivicdata/python-opencivicdata-django,opencivicdata/python-opencivicdata,opencivicdata/python-opencivicdata,opencivicdata/python-opencivicdata-django | opencivicdata/legislative/apps.py | opencivicdata/legislative/apps.py | from django.apps import AppConfig
import os
class BaseConfig(AppConfig):
name = 'opencivicdata.legislative'
verbose_name = 'Open Civic Data - Legislative'
path = os.path.dirname(__file__)
| bsd-3-clause | Python | |
cd7364467de45d63e89eab4e745e29dff9906f69 | Add crawler for 'dogsofckennel' | jodal/comics,datagutten/comics,datagutten/comics,datagutten/comics,jodal/comics,datagutten/comics,jodal/comics,jodal/comics | comics/comics/dogsofckennel.py | comics/comics/dogsofckennel.py | from comics.aggregator.crawler import CreatorsCrawlerBase
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = 'Dogs of C-Kennel'
language = 'en'
url = 'https://www.creators.com/read/dogs-of-c-kennel'
rights = 'Mason Mastroianni, Mick Mastroianni, Johnny Hart'
class Crawler(CreatorsCrawlerBase):
history_capable_date = '2007-02-12'
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = 'US/Pacific'
def crawl(self, pub_date):
return self.crawl_helper('179', pub_date)
| agpl-3.0 | Python | |
0c1ae2fb40e5af5cf732e7ec8e10d2e145be2eb2 | add run.py | bkzhn/simple-migrator | run.py | run.py | """Simple Migrator."""
__author__ = 'bkzhn'
if __name__ == '__main__':
print('== Simple Migrator ==')
| mit | Python | |
0d6a31ade487bea9f0b75b1c3e295176fb3a7555 | Add savecpython script | alex/codespeed,nomeata/codespeed,cykl/codespeed,alex/codespeed,cykl/codespeed,nomeata/codespeed | tools/savecpython.py | tools/savecpython.py | # -*- coding: utf-8 -*-
import urllib, urllib2
from datetime import datetime
SPEEDURL = 'http://127.0.0.1:8000/'#'http://speed.pypy.org/'
HOST = "bigdog"
def save(project, revision, results, options, branch, executable, int_options, testing=False):
testparams = []
#Parse data
data = {}
current_date = datetime.today()
if branch == "": return 1
for b in results:
bench_name = b[0]
res_type = b[1]
results = b[2]
value = 0
if res_type == "SimpleComparisonResult":
value = results['base_time']
elif res_type == "ComparisonResult":
value = results['avg_base']
else:
print("ERROR: result type unknown " + b[1])
return 1
data = {
'commitid': revision,
'project': project,
'branch': branch,
'executable_name': executable,
'executable_coptions': int_options,
'benchmark': bench_name,
'environment': HOST,
'result_value': value,
'result_date': current_date,
}
if res_type == "ComparisonResult":
data['std_dev'] = results['std_changed']
if testing: testparams.append(data)
else: send(data)
if testing: return testparams
else: return 0
def send(data):
#save results
params = urllib.urlencode(data)
f = None
response = "None"
info = str(datetime.today()) + ": Saving result for " + data['executable_name'] + " revision "
info += str(data['commitid']) + ", benchmark " + data['benchmark']
print(info)
try:
f = urllib2.urlopen(SPEEDURL + 'result/add/', params)
response = f.read()
f.close()
except urllib2.URLError, e:
if hasattr(e, 'reason'):
response = '\n We failed to reach a server\n'
response += ' Reason: ' + str(e.reason)
elif hasattr(e, 'code'):
response = '\n The server couldn\'t fulfill the request\n'
response += ' Error code: ' + str(e)
print("Server (%s) response: %s\n" % (SPEEDURL, response))
return 1
print "saved correctly!\n"
return 0
| lgpl-2.1 | Python | |
91773cb6a09f710002e5be03ab9ec0c19b2d6ea3 | Add script to extract rows from terms. | BitFunnel/BitFunnel,BitFunnel/BitFunnel,danluu/BitFunnel,danluu/BitFunnel,BitFunnel/BitFunnel,danluu/BitFunnel,danluu/BitFunnel,BitFunnel/BitFunnel,danluu/BitFunnel,BitFunnel/BitFunnel,danluu/BitFunnel,BitFunnel/BitFunnel | src/Scripts/show-term-convert.py | src/Scripts/show-term-convert.py | # Convert from show term to list of rows associated with each term.
import re
term_regex = re.compile("Term\(\"(\S+)\"\)")
rowid_regex = re.compile("\s+RowId\((\S+),\s+(\S+)\)")
this_term = ""
with open("/tmp/show.results.txt") as f:
for line in f:
rowid_match = rowid_regex.match(line)
if rowid_match:
this_term += "," + rowid_match.group(1) + "-" + rowid_match.group(2)
term_match = term_regex.match(line)
if term_match:
print(this_term)
this_term = term_match.group(1)
| mit | Python | |
710f6ed188b6139f6469d61775da4fb752bac754 | Create __init__.py | mrpurplenz/mopidy-ampache | mopidy_ampache/__init__.py | mopidy_ampache/__init__.py | from __future__ import unicode_literals
import os
from mopidy import ext, config
__version__ = '1.0.0'
class AmpacheExtension(ext.Extension):
dist_name = 'Mopidy-Ampache'
ext_name = 'ampache'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(AmpacheExtension, self).get_config_schema()
schema['hostname'] = config.Hostname()
schema['port'] = config.Port()
schema['username'] = config.String()
schema['password'] = config.Secret()
schema['ssl'] = config.Boolean()
schema['context'] = config.String()
return schema
def setup(self, registry):
from .actor import AmpacheBackend
registry.add('backend', AmpacheBackend)
| mit | Python | |
2cd081a6a7c13b40b5db8f667d03e93353630830 | Create leetcode-78.py | jeremykid/FunAlgorithm,jeremykid/FunAlgorithm,jeremykid/FunAlgorithm,jeremykid/FunAlgorithm | python_practice/leetCode/leetcode-78.py | python_practice/leetCode/leetcode-78.py | class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
if nums == []:
return [[]]
sub = self.subsets(nums[1:])
newSub = []
for i in sub:
newI = i + [nums[0]]
newSub.append(newI)
sub.extend(newSub)
return sub
| mit | Python | |
56f8dd435981a28bcb026da0edb395aabd515c29 | Add a frist test for create_random_data | khchine5/opal,khchine5/opal,khchine5/opal | opal/tests/test_command_create_random_data.py | opal/tests/test_command_create_random_data.py | """
Unittests for opal.management.commands.create_random_data
"""
from mock import patch, MagicMock
from opal.core.test import OpalTestCase
from opal.management.commands import create_random_data as crd
class StringGeneratorTestCase(OpalTestCase):
def test_string_generator(self):
mock_field = MagicMock(name='Mock Field')
mock_field.max_length = 30
frist, last = crd.string_generator(mock_field).split()
self.assertIn(frist, crd.adjectives)
self.assertIn(last, crd.nouns)
| agpl-3.0 | Python | |
826c3e3b2787e25d040b3ddf7c4bdabde3da4158 | Add tasks.py the new and improved celery_tasks.py | CenterForOpenScience/scrapi,mehanig/scrapi,alexgarciac/scrapi,CenterForOpenScience/scrapi,erinspace/scrapi,erinspace/scrapi,ostwald/scrapi,mehanig/scrapi,jeffreyliu3230/scrapi,felliott/scrapi,fabianvf/scrapi,felliott/scrapi,icereval/scrapi,fabianvf/scrapi | scrapi/tasks.py | scrapi/tasks.py | import os
import logging
import importlib
from datetime import datetime
from celery import Celery
import settings
app = Celery()
app.config_from_object(settings)
logger = logging.getLogger(__name__)
def import_consumer(consumer_name):
return importlib.import_module('scrapi.consumers.{}'.format(consumer_name))
@app.task
def run_consumer(consumer_name):
logger.info('Runing consumer "{}"'.format(consumer_name))
# Form and start a celery chain
chain = (consume.si(consumer_name) | begin_normalization.s(consumer_name))
chain.apply_async()
@app.task
def begin_normalization(raw_docs, consumer_name):
logger.info('Normalizing {} documents for consumer "{}"'
.format(len(raw_docs), consumer_name))
for raw in raw_docs:
timestamp = datetime.now()
process_raw.si(raw, timestamp).apply_async()
chain = (normalize.si(raw, timestamp, consumer_name) | process_normalized.s())
chain.apply_async()
@app.task
def consume(consumer_name):
logger.info('Consumer "{}" has begun consumption'.format(consumer_name))
consumer = import_consumer(consumer_name)
result = consumer.consume()
logger.info('Consumer "{}" has finished consumption'.format(consumer_name))
return result
@app.task
def normalize(raw_doc, timestamp, consumer_name):
consumer = import_consumer(consumer_name)
normalized = consumer.normalize(raw_doc, timestamp)
# Do other things here
return normalized
@app.task
def process_raw(raw_doc, timestamp):
pass
@app.task
def process_normalized(normalized_doc):
pass
@app.task
def check_archive():
pass
@app.task
def tar_archive():
os.system('tar -czvf website/static/archive.tar.gz archive/')
| apache-2.0 | Python | |
3f1663f7cf32b590affb7a306bcc2711b17af296 | Add a monitor example. | dustin/twitty-twister | example/user_stream_monitor.py | example/user_stream_monitor.py | #!/usr/bin/env python
#
# Copyright (c) 2012 Ralph Meijer <ralphm@ik.nu>
# See LICENSE.txt for details
"""
Print Tweets on a user's timeline in real time.
This connects to the Twitter User Stream API endpoint with the given OAuth
credentials and prints out all Tweets of the associated user and of the
accounts the user follows. This is equivalent to the user's time line.
The arguments, in order, are: consumer key, consumer secret, access token key,
access token secret.
This is mostly the same as the C{user_stream.py} example, except that this
uses L{twittytwisted.streaming.TwitterMonitor}. It will reconnect in the
face of disconnections or explicit reconnects to change the API request
parameters (e.g. changing the track keywords).
"""
import sys
from twisted.internet import reactor
from oauth import oauth
from twittytwister import twitter
def cb(entry):
print '%s: %s' % (entry.user.screen_name.encode('utf-8'),
entry.text.encode('utf-8'))
def change(monitor):
monitor.args = {}
monitor.connect(forceReconnect=True)
consumer = oauth.OAuthConsumer(sys.argv[1], sys.argv[2])
token = oauth.OAuthToken(sys.argv[3], sys.argv[4])
feed = twitter.TwitterFeed(consumer=consumer, token=token)
monitor = twitter.TwitterMonitor(feed.user, cb, {'with': 'followings'})
monitor.startService()
reactor.callLater(30, change, monitor)
reactor.run()
| mit | Python | |
ac3cd54b93aa6d5cddaac89016d09b9e6747a301 | allow bazel 0.7.x (#1467) | rkpagadala/mixer,istio/old_mixer_repo,istio/old_mixer_repo,istio/old_mixer_repo,rkpagadala/mixer,rkpagadala/mixer | check_bazel_version.bzl | check_bazel_version.bzl | def _parse_bazel_version(bazel_version):
# Remove commit from version.
version = bazel_version.split(" ", 1)[0]
# Split into (release, date) parts and only return the release
# as a tuple of integers.
parts = version.split("-", 1)
# Turn "release" into a tuple of strings
version_tuple = ()
for number in parts[0].split("."):
version_tuple += (int(number),)
return version_tuple
# acceptable min_version <= version <= max_version
def check_version():
check_bazel_version("0.5.4", "0.7.99")
# acceptable min_version <= version <= max_version
def check_bazel_version(min_version, max_version):
if "bazel_version" not in dir(native):
fail("\nCurrent Bazel version is lower than 0.2.1, expected at least %s\n" %
min_version)
elif not native.bazel_version:
print("\nCurrent Bazel is not a release version, cannot check for " +
"compatibility.")
print("Make sure that you are running at least Bazel %s.\n" % min_version)
else:
_version = _parse_bazel_version(native.bazel_version)
_min_version = _parse_bazel_version(min_version)
_max_version = _parse_bazel_version(max_version)
if _version < _min_version:
fail("\nCurrent Bazel version {} is too old, expected at least {}\n".format(
native.bazel_version, min_version))
if _version > _max_version:
fail("\nCurrent Bazel version {} is too new, expected at most {}\n".format(
native.bazel_version, max_version))
| def _parse_bazel_version(bazel_version):
# Remove commit from version.
version = bazel_version.split(" ", 1)[0]
# Split into (release, date) parts and only return the release
# as a tuple of integers.
parts = version.split("-", 1)
# Turn "release" into a tuple of strings
version_tuple = ()
for number in parts[0].split("."):
version_tuple += (int(number),)
return version_tuple
# acceptable min_version <= version <= max_version
def check_version():
check_bazel_version("0.5.4", "0.6.1")
# acceptable min_version <= version <= max_version
def check_bazel_version(min_version, max_version):
if "bazel_version" not in dir(native):
fail("\nCurrent Bazel version is lower than 0.2.1, expected at least %s\n" %
min_version)
elif not native.bazel_version:
print("\nCurrent Bazel is not a release version, cannot check for " +
"compatibility.")
print("Make sure that you are running at least Bazel %s.\n" % min_version)
else:
_version = _parse_bazel_version(native.bazel_version)
_min_version = _parse_bazel_version(min_version)
_max_version = _parse_bazel_version(max_version)
if _version < _min_version:
fail("\nCurrent Bazel version {} is too old, expected at least {}\n".format(
native.bazel_version, min_version))
if _version > _max_version:
fail("\nCurrent Bazel version {} is too new, expected at most {}\n".format(
native.bazel_version, max_version))
| apache-2.0 | Python |
b40d064ac5b4e01f11cdb1f6b7ce7f1a0a968be5 | Create set_memory_example.py | ahmadfaizalbh/Chatbot | examples/set_memory_example.py | examples/set_memory_example.py | from chatbot import Chat, register_call
import os
import warnings
warnings.filterwarnings("ignore")
@register_call("increment_count")
def memory_get_set_example(session, query):
name=query.strip().lower()
# Get memory
old_count = session.memory.get(name, '0')
new_count = int(old_count) + 1
# Set memory
session.memory[name]=str(new_count)
return f"count {new_count}"
chat = Chat(os.path.join(os.path.dirname(os.path.abspath(__file__)), "example.template"))
chat.converse("""
Memory get and set example
Usage:
increment <name>
show <name>
example:
increment mango
show mango
""")
| mit | Python | |
12b7d2b2296b934675f2cca0f35d059a67f58e7f | Create ComputeDailyWage.py | JupiterLikeThePlanet/PostMates | ComputeDailyWage.py | ComputeDailyWage.py | ################################ Compute daily wage
def computepay ( w , m , e , g ):
total = float(wage) - (float(mileage)/float(gas)) - float(expenses)
return total
try:
input = raw_input('Enter Wages: ')
wage = float(input)
input = raw_input('Enter Miles: ')
mileage = float(input)
input = raw_input('Enter Expenses: ')
expenses = float(input)
input = raw_input('Enter Price of Gas: ')
gas = float(input)
except:
print "Not a number!"
quit()
print 'Total: ', computepay ( wage , mileage , expenses , gas )
hours = raw_input('Enter Hours: ')
hourlyWage = computepay ( wage , mileage , expenses , gas ) / float(hours)
print 'Hourly Wage: ', hourlyWage
################################## Compute weekly wage
def computepay ( w, m , e , g ):
total = float(wage) - (float(mileage)/float(gas)) - float(expenses)
return total
#date = raw_input("Enter Beginning and End dates of the week: ")
#input = raw_input("How many days were worked: ")
#days = int(input)
############## Day 1 #######################
try:
input = raw_input('Enter Wages: ')
wage = float(input)
input = raw_input('Enter Miles: ')
mileage = float(input)
input = raw_input('Enter Expenses: ')
expenses = float(input)
input = raw_input('Enter Price of Gas: ')
gas = float(input)
except:
print "Not a number!"
quit()
print 'Day 1 Total: ', computepay ( wage, mileage , expenses , gas )
day1 = computepay ( wage , mileage , expenses , gas )
############## Day 2 ########################
try:
input = raw_input('Enter Wages: ')
wage = float(input)
input = raw_input('Enter Miles: ')
mileage = float(input)
input = raw_input('Enter Expenses: ')
expenses = float(input)
input = raw_input('Enter Price of Gas: ')
gas = float(input)
except:
print "Not a number!"
quit()
print 'Day 2 Total: ', computepay ( wage , mileage , expenses , gas )
day2 = computepay ( wage , mileage , expenses , gas )
weekwage = day1 + day2
print "Your weekly wage for ", date , "is: " , weekwage , "dollars."
| mit | Python | |
2429c0bdf5c2db5c2b40dc43d0a4c277e20d72fa | add 0001 | Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python | Jaccorot/0001/0001.py | Jaccorot/0001/0001.py | #!/usr/local/bin/python
#coding=utf-8
#第 0001 题:做为 Apple Store App 独立开发者,你要搞限时促销,为你的应用生成激活码(或者优惠券),
#使用 Python 如何生成 200 个激活码(或者优惠券)?
import uuid
def create_code(num, length):
#生成”num“个激活码,每个激活码含有”length“位
result = []
while True:
uuid_id = uuid.uuid1()
temp = str(uuid_id).replace('-', '')[:length]
if not temp in result:
result.append(temp)
if len(result) == num:
break
return result
print create_code(200, 20)
| mit | Python | |
6913674358d226953c1090ab7c8f5674dac1816c | add 0007 | Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Show-Me-the-Code/python,Show-Me-the-Code/python | Jaccorot/0007/0007.py | Jaccorot/0007/0007.py | #!/usr/bin/python
#coding:utf-8
"""
第 0007 题:有个目录,里面是你自己写过的程序,统计一下你写过多少行代码。包括空行和注释,但是要分别列出来。
"""
import os
def walk_dir(path):
file_path = []
for root, dirs, files in os.walk(path):
for f in files:
if f.lower().endswith('py'):
file_path.append(os.path.join(root, f))
return file_path
def count_the_code(path):
file_name = os.path.basename(path)
note_flag = False
line_num = 0
empty_line_num = 0
note_num = 0
with open(path) as f:
for line in f.read().split('\n'):
line_num += 1
if line.strip().startswith('\"\"\"') and not note_flag:
note_flag =True
note_num += 1
continue
if line.strip().startswith('\"\"\"'):
note_flag = False
note_num += 1
if line.strip().startswith('#') or note_flag:
note_num += 1
if len(line) == 0:
empty_line_num += 1
print u"在%s中,共有%s行代码,其中有%s空行,有%s注释"% (file_name, line_num, empty_line_num, note_num)
if __name__ == '__main__':
for f in walk_dir('.'):
count_the_code(f)
| mit | Python | |
f8093f59b77e481231aeca49ef057a4602d21b2e | add tests for appconfig | artefactual/archivematica,artefactual/archivematica,artefactual/archivematica,artefactual/archivematica | src/archivematicaCommon/tests/test_appconfig.py | src/archivematicaCommon/tests/test_appconfig.py | from __future__ import absolute_import
import os
import StringIO
from django.core.exceptions import ImproperlyConfigured
import pytest
from appconfig import Config
CONFIG_MAPPING = {
'search_enabled': [
{'section': 'Dashboard', 'option': 'disable_search_indexing', 'type': 'iboolean'},
{'section': 'Dashboard', 'option': 'search_enabled', 'type': 'boolean'},
],
}
@pytest.mark.parametrize('option, value, expect', [
('search_enabled', 'true', True),
('search_enabled', 'false', False),
('disable_search_indexing', 'true', False),
('disable_search_indexing', 'false', True),
])
def test_mapping_list_config_file(option, value, expect):
config = Config(env_prefix='ARCHIVEMATICA_DASHBOARD', attrs=CONFIG_MAPPING)
config.read_defaults(StringIO.StringIO(
'[Dashboard]\n'
'{option} = {value}'.format(option=option, value=value)))
assert config.get('search_enabled') is expect
@pytest.mark.parametrize('envvars, expect', [
({'ARCHIVEMATICA_DASHBOARD_DASHBOARD_SEARCH_ENABLED': 'true'}, True),
({'ARCHIVEMATICA_DASHBOARD_DASHBOARD_SEARCH_ENABLED': 'false'}, False),
({'ARCHIVEMATICA_DASHBOARD_SEARCH_ENABLED': 'true'}, True),
({'ARCHIVEMATICA_DASHBOARD_SEARCH_ENABLED': 'false'}, False),
({'ARCHIVEMATICA_DASHBOARD_DASHBOARD_DISABLE_SEARCH_INDEXING': 'true'},
False),
({'ARCHIVEMATICA_DASHBOARD_DASHBOARD_DISABLE_SEARCH_INDEXING': 'false'},
True),
({'ARCHIVEMATICA_DASHBOARD_DISABLE_SEARCH_INDEXING': 'true'}, False),
({'ARCHIVEMATICA_DASHBOARD_DISABLE_SEARCH_INDEXING': 'false'}, True),
({}, ImproperlyConfigured),
# Following two show that the DISABLE env var overrides the ENABLE one
# because of the ordering in CONFIG_MAPPING.
({'ARCHIVEMATICA_DASHBOARD_DASHBOARD_SEARCH_ENABLED': 'true',
'ARCHIVEMATICA_DASHBOARD_DASHBOARD_DISABLE_SEARCH_INDEXING': 'true'},
False),
({'ARCHIVEMATICA_DASHBOARD_DASHBOARD_SEARCH_ENABLED': 'false',
'ARCHIVEMATICA_DASHBOARD_DASHBOARD_DISABLE_SEARCH_INDEXING': 'false'},
True),
])
def test_mapping_list_env_var(envvars, expect):
for var, val in envvars.items():
os.environ[var] = val
config = Config(env_prefix='ARCHIVEMATICA_DASHBOARD', attrs=CONFIG_MAPPING)
if bool(expect) is expect:
search_enabled = config.get('search_enabled')
assert search_enabled is expect
else:
with pytest.raises(expect):
config.get('search_enabled')
for var in envvars:
del os.environ[var]
| agpl-3.0 | Python | |
06dd6ed476549d832159b1dbfe4d415579b4d067 | add wrapper | fontify/fontify,fontify/fontify,fontify/fontify,fontify/fontify | scripts/fontify.py | scripts/fontify.py | #!/usr/bin/env python2
import argparse
import tempfile
import shutil
import os
import crop_image
def check_input(image):
if not os.path.isfile(image):
raise FileNotFoundError
_, ext = os.path.splitext(image)
if ext.lower() not in [".jpg", ".png"]:
raise ValueError("Unrecognized image extension")
def setup_work_dir(image):
tmpdir = tempfile.mkdtemp(prefix="fontify")
_, ext = os.path.splitext(image)
shutil.copyfile(image, os.path.join(tmpdir, 'input' + ext))
return tmpdir
def process(image, font_name):
crop_image.crop()
def tear_down(tmpdir, output):
if output == "":
output = "fontify.ttf"
shutil.copyfile(os.path.join(tmpdir, 'fontify.ttf'), output)
shutil.rmtree(tmpdir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"image", help="input image (JPG or PNG)"
)
parser.add_argument(
"-n", "--name", default="Fontify", help="font name (default: Fontify)"
)
parser.add_argument(
"-o", metavar="OUTPUT", default="",
help="output font file (default to fontify.ttf in current directory)"
)
args = parser.parse_args()
check_input(args.image)
tmpdir = setup_work_dir()
process(tmpdir, args.name)
tear_down(tmpdir, args.output)
| mit | Python | |
5509839e0af89467eb14ee178807e2898202101b | Add port-security extension API test cases | skyddv/neutron,neoareslinux/neutron,barnsnake351/neutron,dims/neutron,jerryz1982/neutron,mandeepdhami/neutron,SamYaple/neutron,wenhuizhang/neutron,suneeth51/neutron,glove747/liberty-neutron,JianyuWang/neutron,mahak/neutron,dhanunjaya/neutron,sebrandon1/neutron,wolverineav/neutron,openstack/neutron,jumpojoy/neutron,bigswitch/neutron,cloudbase/neutron,cloudbase/neutron,apporc/neutron,openstack/neutron,suneeth51/neutron,cisco-openstack/neutron,adelina-t/neutron,swdream/neutron,javaos74/neutron,watonyweng/neutron,paninetworks/neutron,JioCloud/neutron,mattt416/neutron,NeCTAR-RC/neutron,jumpojoy/neutron,shahbazn/neutron,eayunstack/neutron,dims/neutron,vivekanand1101/neutron,eonpatapon/neutron,huntxu/neutron,silenci/neutron,sasukeh/neutron,neoareslinux/neutron,noironetworks/neutron,igor-toga/local-snat,eonpatapon/neutron,wolverineav/neutron,MaximNevrov/neutron,MaximNevrov/neutron,asgard-lab/neutron,wenhuizhang/neutron,chitr/neutron,noironetworks/neutron,watonyweng/neutron,igor-toga/local-snat,dhanunjaya/neutron,klmitch/neutron,bgxavier/neutron,apporc/neutron,mahak/neutron,javaos74/neutron,yanheven/neutron,eayunstack/neutron,mahak/neutron,sebrandon1/neutron,skyddv/neutron,swdream/neutron,SamYaple/neutron,sasukeh/neutron,chitr/neutron,jacknjzhou/neutron,mmnelemane/neutron,asgard-lab/neutron,silenci/neutron,jacknjzhou/neutron,glove747/liberty-neutron,mandeepdhami/neutron,takeshineshiro/neutron,bigswitch/neutron,mattt416/neutron,paninetworks/neutron,JioCloud/neutron,openstack/neutron,shahbazn/neutron,klmitch/neutron,adelina-t/neutron,barnsnake351/neutron,mmnelemane/neutron,bgxavier/neutron,yanheven/neutron,takeshineshiro/neutron,vivekanand1101/neutron,jerryz1982/neutron,huntxu/neutron,cisco-openstack/neutron,NeCTAR-RC/neutron,JianyuWang/neutron | neutron/tests/api/test_extension_driver_port_security.py | neutron/tests/api/test_extension_driver_port_security.py | # Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from neutron.tests.api import base_security_groups as base
from neutron.tests.tempest import config
from neutron.tests.tempest import test
CONF = config.CONF
FAKE_IP = '10.0.0.1'
FAKE_MAC = '00:25:64:e8:19:dd'
class PortSecTest(base.BaseSecGroupTest):
@classmethod
def resource_setup(cls):
super(PortSecTest, cls).resource_setup()
def _create_network(self, network_name=None, port_security_enabled=True):
"""Wrapper utility that returns a test network."""
network_name = network_name or data_utils.rand_name('test-network')
body = self.client.create_network(
name=network_name, port_security_enabled=port_security_enabled)
network = body['network']
self.networks.append(network)
return network
@test.attr(type='smoke')
@test.idempotent_id('7c338ddf-e64e-4118-bd33-e49a1f2f1495')
@test.requires_ext(extension='port-security', service='network')
def test_port_sec_default_value(self):
# Default port-sec value is True, and the attr of the port will inherit
# from the port-sec of the network when it not be specified in API
network = self.create_network()
self.create_subnet(network)
self.assertTrue(network['port_security_enabled'])
port = self.create_port(network)
self.assertTrue(port['port_security_enabled'])
@test.attr(type='smoke')
@test.idempotent_id('e60eafd2-31de-4c38-8106-55447d033b57')
@test.requires_ext(extension='port-security', service='network')
def test_port_sec_specific_value(self):
network = self.create_network()
self.assertTrue(network['port_security_enabled'])
self.create_subnet(network)
port = self.create_port(network, port_security_enabled=False)
self.assertFalse(port['port_security_enabled'])
# Create a network with port-sec set to False
network = self._create_network(port_security_enabled=False)
self.assertFalse(network['port_security_enabled'])
self.create_subnet(network)
port = self.create_port(network, port_security_enabled=True)
self.assertTrue(port['port_security_enabled'])
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('05642059-1bfc-4581-9bc9-aaa5db08dd60')
@test.requires_ext(extension='port-security', service='network')
def test_port_sec_update_port_failed(self):
network = self.create_network()
self.create_subnet(network)
port = self.create_port(network)
# Exception when set port-sec to False with sec-group defined
self.assertRaises(lib_exc.Conflict,
self.update_port, port, port_security_enabled=False)
updated_port = self.update_port(
port, security_groups=[], port_security_enabled=False)
self.assertFalse(updated_port['port_security_enabled'])
allowed_address_pairs = [{'ip_address': FAKE_IP,
'mac_address': FAKE_MAC}]
# Exception when set address-pairs with port-sec is False
self.assertRaises(lib_exc.Conflict,
self.update_port, port,
allowed_address_pairs=allowed_address_pairs)
| apache-2.0 | Python | |
6ca1d9f4a3b8a518661409166a9918a20eb61655 | fix wansu cdn url | gravyboat/streamlink,streamlink/streamlink,streamlink/streamlink,chhe/streamlink,bastimeyer/streamlink,melmorabity/streamlink,javiercantero/streamlink,gravyboat/streamlink,chhe/streamlink,melmorabity/streamlink,back-to/streamlink,beardypig/streamlink,javiercantero/streamlink,bastimeyer/streamlink,beardypig/streamlink,back-to/streamlink,wlerin/streamlink,wlerin/streamlink | src/streamlink/plugins/app17.py | src/streamlink/plugins/app17.py | import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, useragents
from streamlink.stream import HLSStream, RTMPStream, HTTPStream
API_URL = "https://api-dsa.17app.co/api/v1/liveStreams/getLiveStreamInfo"
_url_re = re.compile(r"https://17.live/live/(?P<channel>[^/&?]+)")
_status_re = re.compile(r'\\"closeBy\\":\\"\\"')
_rtmp_re = re.compile(r'\\"url\\"\s*:\s*\\"(.+?)\\"')
class App17(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
match = _url_re.match(self.url)
channel = match.group("channel")
http.headers.update({'User-Agent': useragents.CHROME})
payload = '{"liveStreamID": "%s"}' % (channel)
res = http.post(API_URL, data=payload)
status = _status_re.search(res.text)
if not status:
self.logger.info("Stream currently unavailable.")
return
http_url = _rtmp_re.search(res.text).group(1)
http_url = http_url.replace("http:", "https:")
yield "live", HTTPStream(self.session, http_url)
if 'pull-rtmp' in http_url:
url = http_url.replace("https:", "rtmp:").replace(".flv", "")
stream = RTMPStream(self.session, {
"rtmp": url,
"live": True
})
yield "live", stream
if 'wansu-' in http_url:
url = http_url.replace(".flv", "/playlist.m3u8")
for stream in HLSStream.parse_variant_playlist(self.session, url).items():
yield stream
else:
url = http_url.replace(".flv", ".m3u8")
yield "live", HLSStream(self.session, url)
__plugin__ = App17
| import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, useragents
from streamlink.stream import HLSStream, RTMPStream, HTTPStream
API_URL = "https://api-dsa.17app.co/api/v1/liveStreams/getLiveStreamInfo"
_url_re = re.compile(r"https://17.live/live/(?P<channel>[^/&?]+)")
_status_re = re.compile(r'\\"closeBy\\":\\"\\"')
_rtmp_re = re.compile(r'\\"url\\"\s*:\s*\\"(.+?)\\"')
class App17(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
match = _url_re.match(self.url)
channel = match.group("channel")
http.headers.update({'User-Agent': useragents.CHROME})
payload = '{"liveStreamID": "%s"}' % (channel)
res = http.post(API_URL, data=payload)
status = _status_re.search(res.text)
if not status:
self.logger.info("Stream currently unavailable.")
return
http_url = _rtmp_re.search(res.text).group(1)
if 'pull-rtmp' in http_url:
http_url = http_url.replace("http:", "https:")
yield "live", HTTPStream(self.session, http_url)
if 'pull-rtmp' in http_url:
url = http_url.replace("https:", "rtmp:").replace(".flv", "")
stream = RTMPStream(self.session, {
"rtmp": url,
"live": True
})
yield "live", stream
if 'wansu-global-pull-rtmp' in http_url:
url = http_url.replace(".flv", "/playlist.m3u8")
for stream in HLSStream.parse_variant_playlist(self.session, url).items():
yield stream
else:
url = http_url.replace(".flv", ".m3u8")
yield "live", HLSStream(self.session, url)
__plugin__ = App17
| bsd-2-clause | Python |
7f51b7a1b6a319595df5c360bae0264386e590e9 | add support for tucao.cc | cnbeining/you-get,lilydjwg/you-get,j4s0nh4ck/you-get,runningwolf666/you-get,chares-zhang/you-get,cnbeining/you-get,qzane/you-get,flwh/you-get,zmwangx/you-get,specter4mjy/you-get,smart-techs/you-get,pastebt/you-get,Red54/you-get,smart-techs/you-get,rain1988/you-get,dream1986/you-get,pitatensai/you-get,FelixYin66/you-get,qzane/you-get,zmwangx/you-get,shanyimin/you-get,CzBiX/you-get,linhua55/you-get,linhua55/you-get,forin-xyz/you-get,tigerface/you-get,xyuanmu/you-get,lilydjwg/you-get,jindaxia/you-get,candlewill/you-get,xyuanmu/you-get,XiWenRen/you-get,fffonion/you-get,power12317/you-get | src/you_get/extractors/tucao.py | src/you_get/extractors/tucao.py | #!/usr/bin/env python
__all__ = ['tucao_download']
from ..common import *
# import re
import random
import time
from xml.dom import minidom
#1. <li>type=tudou&vid=199687639</li>
#2. <li>type=tudou&vid=199506910|</li>
#3. <li>type=video&file=http://xiaoshen140731.qiniudn.com/lovestage04.flv|</li>
#4 may ? <li>type=video&file=http://xiaoshen140731.qiniudn.com/lovestage04.flv|xx**type=&vid=?</li>
#5. <li>type=tudou&vid=200003098|07**type=tudou&vid=200000350|08</li>
# re_pattern=re.compile(r"(type=(.+?)&(vid|file)=(.*?))[\|<]")
def tucao_single_download(type_link, title, output_dir=".", merge=True, info_only=False):
if "file" in type_link:
url=type_link[type_link.find("file=")+5:]
vtype, ext, size=url_info(url)
print_info(site_info, title, vtype, size)
if not info_only:
download_urls([url], title, ext, size, output_dir)
else:
u="http://www.tucao.cc/api/playurl.php?{}&key=tucao{:07x}.cc&r={}".format(type_link,random.getrandbits(28),int(time.time()*1000))
xml=minidom.parseString(get_content(u))
urls=[]
size=0
for i in xml.getElementsByTagName("url"):
urls.append(i.firstChild.nodeValue)
vtype, ext, _size=url_info(i.firstChild.nodeValue)
size+=_size
print_info(site_info, title, vtype, size)
if not info_only:
download_urls(urls, title, ext, size, output_dir)
def tucao_download(url, output_dir=".", merge=True, info_only=False):
html=get_content(url)
title=match1(html,r'<h1 class="show_title">(.*?)<\w')
raw_list=match1(html,r"<li>(type=.+?)</li>")
raw_l=raw_list.split("**")
if len(raw_l)==1:
format_link=raw_l[0][:-1] if raw_l[0].endswith("|") else raw_l[0]
tucao_single_download(format_link,title,output_dir,merge,info_only)
else:
for i in raw_l:
format_link,sub_title=i.split("|")
tucao_single_download(format_link,title+"-"+sub_title,output_dir,merge,info_only)
site_info = "tucao.cc"
download = tucao_download
download_playlist = playlist_not_supported("tucao")
| mit | Python | |
617e6741a06fd63f22ec9b28090e39c120061a84 | Add the `vulnerability_tickets.py` sample security plugin to deny access to tickets with "security" or "vulnerability" in the `keywords` or `summary` fields. | moreati/trac-gitsvn,moreati/trac-gitsvn,exocad/exotrac,moreati/trac-gitsvn,dafrito/trac-mirror,dafrito/trac-mirror,exocad/exotrac,exocad/exotrac,moreati/trac-gitsvn,dokipen/trac,dafrito/trac-mirror,exocad/exotrac,dokipen/trac,dokipen/trac,dafrito/trac-mirror | sample-plugins/vulnerability_tickets.py | sample-plugins/vulnerability_tickets.py | from trac.core import *
from trac.config import ListOption
from trac.perm import IPermissionPolicy, IPermissionRequestor, PermissionSystem
from trac.ticket.model import Ticket
class SecurityTicketsPolicy(Component):
"""
Require the VULNERABILITY_VIEW permission to view any ticket with the words
"security" or "vulnerability" in the summary or keywords fields.
"""
implements(IPermissionPolicy, IPermissionRequestor)
# IPermissionPolicy methods
def check_permission(self, username, action, context):
# Find available ticket context
while context.parent:
if context.realm == 'ticket':
break
context = context.parent
if context.realm == 'ticket' and context.id is not None:
ticket = Ticket(self.env, context.id)
fields = (ticket['keywords'] + ticket['summary']).lower()
if 'security' in fields or 'vulnerability' in fields:
perms = PermissionSystem(self.env).get_user_permissions(username)
if 'VULNERABILITY_VIEW' not in perms:
return False
# IPermissionRequestor methods
def get_permission_actions(self):
yield 'VULNERABILITY_VIEW'
| bsd-3-clause | Python | |
b8ab0280ffd76419b7418c39a9f0b9d8131a9d39 | Add merge migration | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/users/migrations/0022_merge_20200814_2045.py | corehq/apps/users/migrations/0022_merge_20200814_2045.py | # Generated by Django 2.2.13 on 2020-08-14 20:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0021_add_view_apps_permission'),
('users', '0021_invitation_email_status'),
]
operations = [
]
| bsd-3-clause | Python | |
cff300eecbbf6189fe7fc9fe4fafd718b414c80e | add command to create root | avlach/univbris-ocf,avlach/univbris-ocf,avlach/univbris-ocf,avlach/univbris-ocf | src/python/expedient/clearinghouse/commands/management/commands/create_default_root.py | src/python/expedient/clearinghouse/commands/management/commands/create_default_root.py | '''Command to create default administrators.
Created on Aug 26, 2010
@author: jnaous
'''
from django.core.management.base import NoArgsCommand
from django.conf import settings
from django.contrib.auth.models import User
class Command(NoArgsCommand):
help = "Creates the default root user specified by " \
"settings.ROOT_USERNAME (%s), settings.ROOT_PASSWORD (%s), " \
"settings.ROOT_EMAIL (%s). See " \
"the 'defaultsettings.admins' module documentation. If the user " \
"already exists, the user's password will be reset, and the user " \
"will be promoted to superuser." % (
settings.ROOT_USERNAME, settings.ROOT_PASSWORD)
def handle_noargs(self, **options):
try:
u = User.objects.get(username=settings.ROOT_USERNAME)
except User.objects.DoesNotExist:
User.objects.create_superuser(
settings.ROOT_USERNAME,
settings.ROOT_EMAIL,
settings.ROOT_PASSWORD,
)
print "Created superuser %s with password %s." % (
settings.ROOT_USERNAME, settings.ROOT_PASSWORD)
else:
u.set_password(settings.ROOT_PASSWORD)
u.is_superuser = True
u.is_staff = True
u.save()
print "Reset user %s's password to %s and promoted the user " \
"to superuser." % (
settings.ROOT_USERNAME, settings.ROOT_PASSWORD)
| bsd-3-clause | Python | |
760c0fb41ae9e6fd5307563b1cda6eaa8e6336af | add try order | sue-chain/sample | lab/try_order.py | lab/try_order.py | # -*- coding: utf-8 -*-
# pylint: disable=broad-except
"""try except return finally 执行顺序
无论except是否执行,finally都会执行,且最后执行
无论try except是否有return(有return时,程序暂存返回值),finally都会执行, 且最后执行
except, finally中return,则会覆盖之前暂存的返回值, so,不要在finally中写return
"""
import logging
__authors__ = ['"sue.chain" <sue.chain@gmail.com>']
logging.getLogger("").setLevel("DEBUG")
def exec_try_finally():
"""顺序执行try finally
"""
try:
logging.info("execute try")
except Exception as error:
logging.error("execute except")
finally:
logging.info("execute finally")
def exec_try_except_finally():
"""顺序执行try finally
"""
try:
raise Exception("")
logging.info("execute try")
except Exception as error:
logging.error("execute except")
finally:
logging.info("execute finally")
def exec_try_return_finally():
"""顺序执行
"""
try:
logging.info("execute try")
return "return try"
except Exception as error:
logging.error("execute except")
finally:
logging.info("execute finally")
def exec_except_return_finally():
"""顺序执行
"""
try:
logging.info("execute try")
raise Exception("test")
except Exception as error:
logging.error("execute except")
return "return except"
finally:
logging.info("execute finally")
def exec_finally_return_finally():
"""顺序执行
"""
try:
logging.info("execute try")
raise Exception("test")
except Exception as error:
logging.error("execute except")
return "return except"
finally:
logging.info("execute finally")
return "return finally"
if __name__ == '__main__':
#exec_try_finally()
#exec_try_except_finally()
#print exec_try_return_finally()
#print exec_except_return_finally()
print exec_finally_return_finally()
| apache-2.0 | Python | |
787f956539eb5e41467e04b8239ae571fad60da7 | Implement code to return how many characters to delete to make 2 strings into an anagram | arvinsim/hackerrank-solutions | all-domains/tutorials/cracking-the-coding-interview/strings-making-anagrams/solution.py | all-domains/tutorials/cracking-the-coding-interview/strings-making-anagrams/solution.py | # https://www.hackerrank.com/challenges/ctci-making-anagrams
# Python 3
def delete_char_at(s, i):
return s[:i] + s[i+1:]
def number_needed(a, b):
counter = 0
loop_over, reference = (a, b) if len(a) > len(b) else (b, a)
for character in loop_over:
index = reference.find(character)
if index == -1:
counter += 1
else:
# Remove the character from the reference string
reference = delete_char_at(reference, index)
# If there are remaining characters in reference string, add those to count
counter += len(reference)
return counter
a = input().strip()
b = input().strip()
# TEST answer should be 3
# betas and beast are anagrams...so the trailing bz and a should be removed
# a = 'betasbz'
# b = 'beasta'
print(number_needed(a, b))
| mit | Python | |
fe088ec159b4b395bcf463cf7ff31db7f7409fcf | Move czml utils computation to core | poliastro/poliastro | src/poliastro/core/czml_utils.py | src/poliastro/core/czml_utils.py | import numpy as np
from numba import njit as jit
@jit
def intersection_ellipsoid_line(x, y, z, u1, u2, u3, a, b, c):
"""Intersection of an ellipsoid defined by its axes a, b, c with the
line p + λu.
Parameters
----------
x, y, z: float
A point of the line
u1, u2, u3: float
The line vector
a, b, c: float
The ellipsoidal axises
Returns
-------
p0, p1: ~np.array
This returns both of the points intersecting the ellipsoid.
"""
# Get rid of one parameter by translating the line's direction vector
k, m = u2 / u1, u3 / u1
t0 = (
-(a ** 2) * b ** 2 * m * z
- a ** 2 * c ** 2 * k * y
- b ** 2 * c ** 2 * x
+ np.sqrt(
a ** 2
* b ** 2
* c ** 2
* (
a ** 2 * b ** 2 * m ** 2
+ a ** 2 * c ** 2 * k ** 2
- a ** 2 * k ** 2 * z ** 2
+ 2 * a ** 2 * k * m * y * z
- a ** 2 * m ** 2 * y ** 2
+ b ** 2 * c ** 2
- b ** 2 * m ** 2 * x ** 2
+ 2 * b ** 2 * m * x * z
- b ** 2 * z ** 2
- c ** 2 * k ** 2 * x ** 2
+ 2 * c ** 2 * k * x * y
- c ** 2 * y ** 2
)
)
) / (a ** 2 * b ** 2 * m ** 2 + a ** 2 * c ** 2 * k ** 2 + b ** 2 * c ** 2)
t1 = (
a ** 2 * b ** 2 * m * z
+ a ** 2 * c ** 2 * k * y
+ b ** 2 * c ** 2 * x
+ np.sqrt(
a ** 2
* b ** 2
* c ** 2
* (
a ** 2 * b ** 2 * m ** 2
+ a ** 2 * c ** 2 * k ** 2
- a ** 2 * k ** 2 * z ** 2
+ 2 * a ** 2 * k * m * y * z
- a ** 2 * m ** 2 * y ** 2
+ b ** 2 * c ** 2
- b ** 2 * m ** 2 * x ** 2
+ 2 * b ** 2 * m * x * z
- b ** 2 * z ** 2
- c ** 2 * k ** 2 * x ** 2
+ 2 * c ** 2 * k * x * y
- c ** 2 * y ** 2
)
)
) / (a ** 2 * b ** 2 * m ** 2 + a ** 2 * c ** 2 * k ** 2 + b ** 2 * c ** 2)
p0, p1 = np.array([x + t0, y + k * t0, z + m * t0]), np.array([x - t1, y - t1 * k, z - t1 * m])
return p0, p1
@jit
def project_point_on_ellipsoid(x, y, z, a, b, c):
"""Return the projection of a point on an ellipsoid.
Parameters
----------
x, y, z: float
Cartesian coordinates of point
a, b, c: float
Semi-axes of the ellipsoid
"""
p1, p2 = intersection_ellipsoid_line(x, y, z, x, y, z, a, b, c)
norm_1 = np.linalg.norm(np.array([p1[0] - x, p1[1] - y, p1[2] - z]))
norm_2 = np.linalg.norm(np.array([p2[0] - x, p2[1] - y, p2[2] - z]))
return p1 if norm_1 <= norm_2 else p2
| mit | Python | |
b906082034822a825ec2963864b32d6619cf938a | Add testing functions for join and relabel | pratapvardhan/scikit-image,oew1v07/scikit-image,paalge/scikit-image,GaZ3ll3/scikit-image,emon10005/scikit-image,rjeli/scikit-image,youprofit/scikit-image,ajaybhat/scikit-image,pratapvardhan/scikit-image,warmspringwinds/scikit-image,ofgulban/scikit-image,SamHames/scikit-image,chintak/scikit-image,vighneshbirodkar/scikit-image,GaZ3ll3/scikit-image,almarklein/scikit-image,SamHames/scikit-image,newville/scikit-image,Midafi/scikit-image,juliusbierk/scikit-image,keflavich/scikit-image,jwiggins/scikit-image,Hiyorimi/scikit-image,blink1073/scikit-image,chintak/scikit-image,dpshelio/scikit-image,michaelaye/scikit-image,paalge/scikit-image,robintw/scikit-image,robintw/scikit-image,paalge/scikit-image,youprofit/scikit-image,rjeli/scikit-image,dpshelio/scikit-image,michaelpacer/scikit-image,juliusbierk/scikit-image,oew1v07/scikit-image,vighneshbirodkar/scikit-image,vighneshbirodkar/scikit-image,SamHames/scikit-image,SamHames/scikit-image,almarklein/scikit-image,rjeli/scikit-image,ClinicalGraphics/scikit-image,bennlich/scikit-image,michaelaye/scikit-image,ofgulban/scikit-image,chintak/scikit-image,emon10005/scikit-image,Midafi/scikit-image,Hiyorimi/scikit-image,ofgulban/scikit-image,Britefury/scikit-image,chintak/scikit-image,keflavich/scikit-image,bennlich/scikit-image,ajaybhat/scikit-image,blink1073/scikit-image,chriscrosscutler/scikit-image,michaelpacer/scikit-image,almarklein/scikit-image,WarrenWeckesser/scikits-image,bsipocz/scikit-image,newville/scikit-image,jwiggins/scikit-image,Britefury/scikit-image,bsipocz/scikit-image,WarrenWeckesser/scikits-image,ClinicalGraphics/scikit-image,almarklein/scikit-image,warmspringwinds/scikit-image,chriscrosscutler/scikit-image | skimage/segmentation/tests/test_join.py | skimage/segmentation/tests/test_join.py | import numpy as np
from numpy.testing import assert_array_equal, assert_raises
from skimage.segmentation import join_segmentations, relabel_from_one
def test_join_segmentations():
s1 = np.array([[0, 0, 1, 1],
[0, 2, 1, 1],
[2, 2, 2, 1]])
s2 = np.array([[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 1, 1, 1]])
# test correct join
# NOTE: technically, equality to j_ref is not required, only that there
# is a one-to-one mapping between j and j_ref. I don't know of an easy way
# to check this (i.e. not as error-prone as the function being tested)
j = join_segmentations(s1, s2)
j_ref = np.array([[0, 1, 3, 2],
[0, 5, 3, 2],
[4, 5, 5, 3]])
assert_array_equal(j, j_ref)
# test correct exception when arrays are different shapes
s3 = np.array([[0, 0, 1, 1], [0, 2, 2, 1]])
assert_raises(ValueError, join_segmentations, s1, s3)
def test_relabel_from_one():
ar = np.array([1, 1, 5, 5, 8, 99, 42])
ar_relab, fw, inv = relabel_from_one(ar)
ar_relab_ref = np.array([1, 1, 2, 2, 3, 5, 4])
assert_array_equal(ar_relab, ar_relab_ref)
fw_ref = np.zeros(100, int)
fw_ref[1] = 1; fw_ref[5] = 2; fw_ref[8] = 3; fw_ref[42] = 4; fw_ref[99] = 5
assert_array_equal(fw, fw_ref)
inv_ref = np.array([0, 1, 5, 8, 42, 99])
assert_array_equal(inv, inv_ref)
if __name__ == "__main__":
np.testing.run_module_suite()
| bsd-3-clause | Python | |
6b38f963cf555576157f063e9c026a94814f93a2 | Fix all target for managed install. | hgl888/chromium-crosswalk-efl,Just-D/chromium-1,mogoweb/chromium-crosswalk,axinging/chromium-crosswalk,Chilledheart/chromium,crosswalk-project/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,ChromiumWebApps/chromium,patrickm/chromium.src,Chilledheart/chromium,anirudhSK/chromium,mogoweb/chromium-crosswalk,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,M4sse/chromium.src,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,ltilve/chromium,chuan9/chromium-crosswalk,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,ondra-novak/chromium.src,Chilledheart/chromium,Jonekee/chromium.src,anirudhSK/chromium,ondra-novak/chromium.src,dednal/chromium.src,markYoungH/chromium.src,markYoungH/chromium.src,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,PeterWangIntel/chromium-crosswalk,patrickm/chromium.src,Jonekee/chromium.src,jaruba/chromium.src,dushu1203/chromium.src,littlstar/chromium.src,ltilve/chromium,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,mogoweb/chromium-crosswalk,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,patrickm/chromium.src,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,bright-sparks/chromium-spacewalk,ChromiumWebApps/chromium,bright-sparks/chromium-spacewalk,mogoweb/chromium-crosswalk,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,littlstar/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,M4sse/chromium.src,anirudhSK/chromium,mogoweb/chromium-crosswalk,patrickm/chromium.src,littlstar/chromium.src,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,dushu1203/chromium.src,anirudhSK/chromium,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,jaruba/chromium.src,dushu1203/chromium.src,Jonekee/chromium.src,Just-D/chromium-1,M4sse/chromium.src,ondra-novak/chromium.src,hgl888/chromium-crosswalk,patrickm/chromium.src,markYoungH/chromium.src,PeterWangIntel/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,Just-D/chromium-1,Fireblend/chromium-crosswalk,fujunwei/chromium-crosswalk,dushu1203/chromium.src,TheTypoMaster/chromium-crosswalk,Fireblend/chromium-crosswalk,dushu1203/chromium.src,Fireblend/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,anirudhSK/chromium,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,M4sse/chromium.src,littlstar/chromium.src,Pluto-tv/chromium-crosswalk,ondra-novak/chromium.src,Just-D/chromium-1,hgl888/chromium-crosswalk,mogoweb/chromium-crosswalk,dednal/chromium.src,Chilledheart/chromium,axinging/chromium-crosswalk,dushu1203/chromium.src,axinging/chromium-crosswalk,jaruba/chromium.src,Jonekee/chromium.src,ondra-novak/chromium.src,M4sse/chromium.src,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,krieger-od/nwjs_chromium.src,dednal/chromium.src,ondra-novak/chromium.src,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,dednal/chromium.src,ChromiumWebApps/chromium,ChromiumWebApps/chromium,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,markYoungH/chromium.src,fujunwei/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,anirudhSK/chromium,axinging/chromium-crosswalk,dushu1203/chromium.src,Fireblend/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,ltilve/chromium,chuan9/chromium-crosswalk,jaruba/chromium.src,Fireblend/chromium-crosswalk,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,mogoweb/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,jaruba/chromium.src,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,littlstar/chromium.src,ChromiumWebApps/chromium,Fireblend/chromium-crosswalk,bright-sparks/chromium-spacewalk,ChromiumWebApps/chromium,jaruba/chromium.src,Just-D/chromium-1,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,Just-D/chromium-1,jaruba/chromium.src,hgl888/chromium-crosswalk,Jonekee/chromium.src,ltilve/chromium,ChromiumWebApps/chromium,Just-D/chromium-1,ChromiumWebApps/chromium,bright-sparks/chromium-spacewalk,mogoweb/chromium-crosswalk,bright-sparks/chromium-spacewalk,anirudhSK/chromium,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,markYoungH/chromium.src,bright-sparks/chromium-spacewalk,Pluto-tv/chromium-crosswalk,ltilve/chromium,dednal/chromium.src,Pluto-tv/chromium-crosswalk,fujunwei/chromium-crosswalk,mogoweb/chromium-crosswalk,M4sse/chromium.src,krieger-od/nwjs_chromium.src,dednal/chromium.src,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,Fireblend/chromium-crosswalk,mogoweb/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,dednal/chromium.src,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,anirudhSK/chromium,axinging/chromium-crosswalk,markYoungH/chromium.src,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,jaruba/chromium.src,dednal/chromium.src,Chilledheart/chromium,patrickm/chromium.src,chuan9/chromium-crosswalk,ltilve/chromium,dushu1203/chromium.src,ChromiumWebApps/chromium,M4sse/chromium.src,chuan9/chromium-crosswalk,ltilve/chromium,hgl888/chromium-crosswalk,ChromiumWebApps/chromium,ChromiumWebApps/chromium,patrickm/chromium.src,littlstar/chromium.src,hgl888/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,bright-sparks/chromium-spacewalk,Chilledheart/chromium,jaruba/chromium.src,ltilve/chromium,anirudhSK/chromium,markYoungH/chromium.src,axinging/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,markYoungH/chromium.src,anirudhSK/chromium,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,M4sse/chromium.src,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,jaruba/chromium.src,axinging/chromium-crosswalk,littlstar/chromium.src,axinging/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,chuan9/chromium-crosswalk,patrickm/chromium.src,hgl888/chromium-crosswalk-efl,dednal/chromium.src,M4sse/chromium.src,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,Jonekee/chromium.src,anirudhSK/chromium,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,ltilve/chromium,Just-D/chromium-1,bright-sparks/chromium-spacewalk,jaruba/chromium.src,markYoungH/chromium.src,mohamed--abdel-maksoud/chromium.src,patrickm/chromium.src | build/android/tests/multiple_proguards/multiple_proguards.gyp | build/android/tests/multiple_proguards/multiple_proguards.gyp | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
'package_name': 'multiple_proguard',
},
'targets': [
{
'target_name': 'multiple_proguards_test_apk',
'type': 'none',
'variables': {
'app_manifest_version_name%': '<(android_app_version_name)',
'java_in_dir': '.',
'proguard_enabled': 'true',
'proguard_flags_paths': [
'proguard1.flags',
'proguard2.flags',
],
'R_package': 'dummy',
'R_package_relpath': 'dummy',
'apk_name': 'MultipleProguards',
# This is a build-only test. There's nothing to install.
'gyp_managed_install': 0,
},
'dependencies': [
# guava has references to objects using reflection which
# should be ignored in proguard step.
'../../../../third_party/guava/guava.gyp:guava_javalib',
],
'includes': [ '../../../../build/java_apk.gypi' ],
},
],
}
| # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
'package_name': 'multiple_proguard',
},
'targets': [
{
'target_name': 'multiple_proguards_test_apk',
'type': 'none',
'variables': {
'app_manifest_version_name%': '<(android_app_version_name)',
'java_in_dir': '.',
'proguard_enabled': 'true',
'proguard_flags_paths': [
'proguard1.flags',
'proguard2.flags',
],
'R_package': 'dummy',
'R_package_relpath': 'dummy',
'apk_name': 'MultipleProguards',
},
'dependencies': [
# guava has references to objects using reflection which
# should be ignored in proguard step.
'../../../../third_party/guava/guava.gyp:guava_javalib',
],
'includes': [ '../../../../build/java_apk.gypi' ],
},
],
}
| bsd-3-clause | Python |
c8bcdf4d586277df940b8fd9f977cd72305b5e85 | add StatusReportView | byteweaver/django-skrill | skrill/views.py | skrill/views.py | from django import http
from django.views.generic.base import View
from skrill.models import PaymentRequest, StatusReport
class StatusReportView(View):
def post(self, request, *args, **kwargs):
payment_request = PaymentRequest.objects.get(pk=request.POST['transaction_id'])
report = StatusReport()
report.payment_request = payment_request
report.pay_to_email = request.POST['pay_to_email']
report.pay_from_email = request.POST['pay_from_email']
report.merchant_id = request.POST['merchant_id']
report.customer_id = request.POST.get('customer_id', None)
report.transaction_id = request.POST['transaction_id']
report.mb_transaction_id = request.POST['mb_transaction_id']
report.mb_amount = request.POST['mb_amount']
report.mb_currency = request.POST['mb_currency']
report.status = request.POST['status']
report.failed_reason_code = request.POST.get('failed_reason_code', None)
report.md5sig = request.POST['md5sig']
report.sha2sig = request.POST.get('sha2sig', None)
report.amount = request.POST['amount']
report.currency = request.POST['currency']
report.payment_type = request.POST.get('payment_type', None)
report.custom_field_1 = request.POST.get('custom_field_1', None)
report.custom_field_2 = request.POST.get('custom_field_2', None)
report.custom_field_3 = request.POST.get('custom_field_3', None)
report.custom_field_4 = request.POST.get('custom_field_4', None)
report.custom_field_5 = request.POST.get('custom_field_5', None)
report.save()
report.validate_md5sig()
report.valid = True
report.save()
return http.HttpResponse()
| bsd-3-clause | Python | |
12c3ded4ed05e34a0a44163abd5ae08ab0289c4c | Create Score-Calculator.py | 3xbun/elab-cpe | Score-Calculator.py | Score-Calculator.py | midterm = float(input())
if midterm >= 0:
if midterm <= 60:
final = float(input())
if final >= 0:
if final <= 60:
total = midterm + final
avg = total/2
print('Total: ' + str(total))
print('Average: ' + str(avg))
| mit | Python | |
3b0fdecb60b9c5e8a104564d5703c85c97c10f27 | Introduce an ExtruderStack class | fieldOfView/Cura,ynotstartups/Wanhao,hmflash/Cura,hmflash/Cura,fieldOfView/Cura,ynotstartups/Wanhao,Curahelper/Cura,Curahelper/Cura | cura/Settings/ExtruderStack.py | cura/Settings/ExtruderStack.py | # Copyright (c) 2017 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from UM.MimeTypeDatabase import MimeType, MimeTypeDatabase
from UM.Settings.ContainerStack import ContainerStack
from UM.Settings.ContainerRegistry import ContainerRegistry
class ExtruderStack(ContainerStack):
def __init__(self, container_id, *args, **kwargs):
super().__init__(container_id, *args, **kwargs)
extruder_stack_mime = MimeType(
name = "application/x-cura-extruderstack",
comment = "Cura Extruder Stack",
suffixes = [ "extruder.cfg" ]
)
MimeTypeDatabase.addMimeType(extruder_stack_mime)
ContainerRegistry.addContainerTypeByName(ExtruderStack, "extruder_stack", extruder_stack_mime.name)
| agpl-3.0 | Python | |
1e996e3cf1c8e067bbbb8bf23f93b34202b4cd44 | add 401 | aenon/OnlineJudge,aenon/OnlineJudge | leetcode/1.Array_String/401.BinaryWatch.py | leetcode/1.Array_String/401.BinaryWatch.py | # 401. Binary Watch
# A binary watch has 4 LEDs on the top which represent the hours (0-11), and the 6 LEDs on the bottom represent the minutes (0-59).
# Each LED represents a zero or one, with the least significant bit on the right.
# off off on on
# off on on off off on
# For example, the above binary watch reads "3:25".
# Given a non-negative integer n which represents the number of LEDs that are currently on, return all possible times the watch could represent.
# Example:
# Input: n = 1
# Return: ["1:00", "2:00", "4:00", "8:00", "0:01", "0:02", "0:04", "0:08", "0:16", "0:32"]
# Note:
# The order of output does not matter.
# The hour must not contain a leading zero, for example "01:00" is not valid, it should be "1:00".
# The minute must be consist of two digits and may contain a leading zero, for example "10:2" is not valid, it should be "10:02".
class Solution(object):
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
The bin() method
"""
return_list = []
for hour in range(12):
for minute in range(60):
if sum(map(lambda number: int(bin(number)[2:].count('1')), [hour, minute])) == num:
return_list += [str(hour) + ":" + str(minute).zfill(2)]
return return_list
class Solution1(object):
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
x = x & (x - 1): turn off the rightmost 1
"""
def bit_count(binnum):
count = 0
while binnum:
binnum &= binnum - 1
count += 1
return count
return_list = []
for hour in range(12):
for minute in range(60):
if bit_count(hour) + bit_count(minute) == num:
return_list += ['{}:{}'.format(str(hour), str(minute).zfill(2))]
return return_list
class Solution2(object):
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
"""
def bit_count(binnum):
count = 0
while binnum:
binnum &= binnum - 1
count += 1
return count
return ['{}:{}'.format(str(hour), str(minute).zfill(2)) for hour in range(12) for minute in range(60) if bit_count(hour) + bit_count(minute) == num] | mit | Python | |
5712da6095594360be9010b0fe6b85606ec1e2d0 | Add regression test for #891 | oroszgy/spaCy.hu,recognai/spaCy,aikramer2/spaCy,spacy-io/spaCy,spacy-io/spaCy,aikramer2/spaCy,Gregory-Howard/spaCy,explosion/spaCy,oroszgy/spaCy.hu,oroszgy/spaCy.hu,oroszgy/spaCy.hu,honnibal/spaCy,recognai/spaCy,recognai/spaCy,aikramer2/spaCy,spacy-io/spaCy,honnibal/spaCy,Gregory-Howard/spaCy,explosion/spaCy,explosion/spaCy,recognai/spaCy,recognai/spaCy,aikramer2/spaCy,aikramer2/spaCy,spacy-io/spaCy,spacy-io/spaCy,Gregory-Howard/spaCy,oroszgy/spaCy.hu,explosion/spaCy,raphael0202/spaCy,recognai/spaCy,aikramer2/spaCy,raphael0202/spaCy,honnibal/spaCy,raphael0202/spaCy,oroszgy/spaCy.hu,raphael0202/spaCy,honnibal/spaCy,explosion/spaCy,raphael0202/spaCy,Gregory-Howard/spaCy,Gregory-Howard/spaCy,explosion/spaCy,raphael0202/spaCy,spacy-io/spaCy,Gregory-Howard/spaCy | spacy/tests/regression/test_issue891.py | spacy/tests/regression/test_issue891.py | # coding: utf8
from __future__ import unicode_literals
import pytest
@pytest.mark.xfail
@pytest.mark.parametrize('text', ["want/need"])
def test_issue891(en_tokenizer, text):
"""Test that / infixes are split correctly."""
tokens = en_tokenizer(text)
assert len(tokens) == 3
assert tokens[1].text == "/"
| mit | Python | |
d11ac35410252c108dcd7e8d2ae03df2abc4697b | add statsquid cli util | bcicen/statsquid | statsquid/statsquid.py | statsquid/statsquid.py | #!/usr/bin/env python
import os,sys,logging,signal
from argparse import ArgumentParser
#from . import __version__
from listener import StatListener
from collector import StatCollector
__version__ = 'alpha'
log = logging.getLogger('statsquid')
class StatSquid(object):
"""
StatSquid
params:
- role(str): Role of this statsquid instance. Either master or agent.
- options(dict): dictionary of options to start instance with
"""
#TODO: improve graceful exiting, fix signal catching
def __init__(self,role,options):
self.role = role
signal.signal(signal.SIGTERM, self.sig_handler)
if self.role == 'master':
self.instance = self.start_master(options)
if self.role == 'agent':
self.instance = self.start_agent(options)
def start_master(self,opts):
return StatListener(redis_host=opts['redis_host'],
redis_port=opts['redis_port'])
def start_agent(self,opts):
#format docker url
docker_url = "tcp://" + opts['docker_host'] + \
":" + str(opts['docker_port'])
return StatCollector(docker_url,
redis_host=opts['redis_host'],
redis_port=opts['redis_port'])
def sig_handler(self,signal,frame):
print('signal caught, exiting')
self.instance.stop()
sys.exit(0)
def main():
commands = [ 'agent', 'master' ]
parser = ArgumentParser(description='statsquid %s' % __version__)
parser.add_argument('--docker-host',
dest='docker_host',
help='docker host to connect to (default: 127.0.0.1)',
default='127.0.0.1')
parser.add_argument('--docker-port',
dest='docker_port',
help='docker port to connect on (default: 4243)',
default=4243)
parser.add_argument('--redis-host',
dest='redis_host',
help='redis host to connect to (default: 127.0.0.1)',
default='127.0.0.1')
parser.add_argument('--redis-port',
dest='redis_port',
help='redis port to connect on (default: 6379)',
default='6379')
parser.add_argument('command',
help='Mode to run as or command to run (%s)' % \
','.join(commands))
args = parser.parse_args()
if args.command not in commands:
log.error('Unknown command %s' % args.command)
exit(1)
s = StatSquid(args.command,args.__dict__)
if __name__ == '__main__':
main()
| mit | Python | |
59160eeb24f6311dafce2db34a40f8ba879fd516 | Add test showing taint for attr store | github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql | python/ql/test/experimental/dataflow/tainttracking/defaultAdditionalTaintStep/test_attr.py | python/ql/test/experimental/dataflow/tainttracking/defaultAdditionalTaintStep/test_attr.py | # Add taintlib to PATH so it can be imported during runtime without any hassle
import sys; import os; sys.path.append(os.path.dirname(os.path.dirname((__file__))))
from taintlib import *
# This has no runtime impact, but allows autocomplete to work
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..taintlib import *
# Actual tests
class Foo:
def __init__(self, arg):
self.arg = arg
self.other_arg = "other_arg"
def test_tainted_attr():
# The following demonstrates how tainting an attribute affected the taintedness of
# the object.
#
# Previously we would (wrongly) treat the object as tainted if we noticed a write of
# a tainted value to any of its' attributes. This lead to FP, highlighted in
# https://github.com/github/codeql/issues/7786
f = Foo(TAINTED_STRING)
ensure_not_tainted(f) # $ SPURIOUS: tainted
ensure_tainted(f.arg) # $ tainted
ensure_not_tainted(f.other_arg)
x = Foo("x")
ensure_not_tainted(x, x.arg, x.other_arg)
x.arg = TAINTED_STRING
ensure_not_tainted(x) # $ SPURIOUS: tainted
ensure_tainted(x.arg) # $ tainted
ensure_not_tainted(f.other_arg)
b = Foo("bar")
ensure_not_tainted(b, b.arg, b.other_arg)
| mit | Python | |
4dbeb72f8c07bcb5e91c71792651b448142beff9 | add script for wrapping shell commands and sending the result to datadog as events | DataDog/dogapi,DataDog/dogapi | src/dogapi/wrap.py | src/dogapi/wrap.py | import sys
import subprocess
import time
from StringIO import StringIO
from optparse import OptionParser
from dogapi import dog_http_api as dog
from dogapi.common import get_ec2_instance_id
class Timeout(Exception): pass
def poll_proc(proc, sleep_interval, timeout):
start_time = time.time()
returncode = None
while returncode is None:
returncode = proc.poll()
if time.time() - start_time > timeout:
raise Timeout()
else:
time.sleep(sleep_interval)
return returncode
def execute(cmd, cmd_timeout, sigterm_timeout, sigkill_timeout):
start_time = time.time()
returncode = -1
stdout = ''
stderr = ''
try:
proc = subprocess.Popen(' '.join(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
except Exception:
print >> sys.stderr, "Failed to execute %s" % (repr(cmd))
raise
try:
returncode = poll_proc(proc, 1, cmd_timeout)
stdout, stderr = proc.communicate()
except Exception:
try:
proc.terminate()
try:
returncode = poll_proc(proc, 1, sigterm_timeout)
print >> sys.stderr, "SIGTERM"
except Timeout:
proc.kill()
returncode = poll_proc(proc, 1, sigkill_timeout)
print >> sys.stderr, "SIGKILL"
except OSError, e:
# Ignore OSError 3: no process found.
if e.errno != 3:
raise
duration = time.time() - start_time
return returncode, stdout, stderr, duration
def main():
parser = OptionParser()
parser.add_option('-n', '--name', action='store', type='string')
parser.add_option('-k', '--api_key', action='store', type='string')
parser.add_option('-m', '--submit_mode', action='store', type='choice', default='errors', choices=['errors', 'all'])
parser.add_option('-t', '--timeout', action='store', type='int', default=60*60*24)
parser.add_option('--sigterm_timeout', action='store', type='int', default=60*2)
parser.add_option('--sigkill_timeout', action='store', type='int', default=60)
options, args = parser.parse_args()
dog.api_key = options.api_key
cmd = []
for part in args:
cmd.extend(part.split(' '))
returncode, stdout, stderr, duration = execute(cmd, options.timeout,
options.sigterm_timeout, options.sigkill_timeout)
host = get_ec2_instance_id()
if returncode == 0:
alert_type = 'success'
event_title = '[%s] %s succeeded in %.2fs' % (host, options.name,
duration)
else:
alert_type = 'error'
event_title = '[%s] %s failed in %.2fs' % (host, options.name,
duration)
event_body = ['%%%\n',
'commmand:\n```\n', ' '.join(cmd), '\n```\n',
'exit code: %s\n\n' % returncode,
]
if stdout:
event_body.extend(['stdout:\n```\n', stdout, '\n```\n'])
if stderr:
event_body.extend(['stderr:\n```\n', stderr, '\n```\n'])
event_body.append('%%%\n')
event_body = ''.join(event_body)
event = {
'alert_type': alert_type,
'aggregation_key': options.name,
'host': host,
}
print >> sys.stderr, stderr.strip()
print >> sys.stdout, stdout.strip()
if options.submit_mode == 'all' or returncode != 0:
dog.event(event_title, event_body, **event)
sys.exit(returncode)
if __name__ == '__main__':
main()
| bsd-3-clause | Python | |
1f92af62d1a58e496c2ce4251676fca3b571e8f1 | Add missing specification model tests | ismailsunni/healthsites,ismailsunni/healthsites,ismailsunni/healthsites,ismailsunni/healthsites | django_project/localities/tests/test_model_Specification.py | django_project/localities/tests/test_model_Specification.py | # -*- coding: utf-8 -*-
from django.test import TestCase
from django.db import IntegrityError
from .model_factories import (
SpecificationF,
DomainF,
AttributeF
)
class TestModelSpecification(TestCase):
def test_model_repr(self):
dom = DomainF.create(id=1, name='A domain')
attr = AttributeF.create(key='An attribute')
spec = SpecificationF.create(domain=dom, attribute=attr)
self.assertEqual(unicode(spec), 'A domain an_attribute')
def test_model_uniqueness(self):
dom = DomainF.create(id=1)
attr = AttributeF.create(id=1, key='An attribute')
SpecificationF.create(domain=dom, attribute=attr)
self.assertRaises(
IntegrityError, SpecificationF.create,
domain=dom, attribute=attr
)
| bsd-2-clause | Python | |
15298dd59aabd817b3b160910b423d3448c9e189 | Test for overriding __import__. | pfalcon/micropython,pfalcon/micropython,pfalcon/micropython,pfalcon/micropython,pfalcon/micropython | tests/import/import_override.py | tests/import/import_override.py | import import1b
assert import1b.var == 123
import builtins
org_import = builtins.__import__
def my_import(*args):
# MicroPython currently doesn't pass globals/locals, so don't print them
# CPython3.5 and lower for "from pkg.mod import foo" appear to call
# __import__ twice - once with 5 args, and once with 1 (for "pkg")
# CPython3.5 and MicroPython doesn't have such an artifact. So, to make
# test pass on CPython3.5-, just don't print calls with less than 5 args
if len(args) == 5:
print("overriden import:", args[0], args[3], args[4])
return org_import(*args)
try:
builtins.__import__ = my_import
except AttributeError:
print("SKIP")
raise SystemExit
# __import__ is called unconditionally on import, even if module is already
# imported (actually, runtime doesn't know or care if module is already
# imported, sys.modules caching is completely on the level of __import__
# itself).
import import1b
print(import1b.var)
from pkg.mod import foo
| mit | Python | |
02bf100a05ed6267ab3fb618c52150fc2d4884f2 | Add some basic tests around contact parsing | Eyepea/aiosip,sangoma/aiosip | tests/test_contact_parsing.py | tests/test_contact_parsing.py | import aiosip
def test_simple_header():
header = aiosip.Contact.from_header('<sip:pytest@127.0.0.1:7000>')
assert not header['name']
assert dict(header['params']) == {}
assert dict(header['uri']) == {'scheme': 'sip',
'user': 'pytest',
'password': None,
'host': '127.0.0.1',
'port': 7000,
'params': None,
'headers': None}
def test_header_with_name():
header = aiosip.Contact.from_header('"Pytest" <sip:pytest@127.0.0.1:7000>')
assert header['name'] == "Pytest"
assert dict(header['params']) == {}
assert dict(header['uri']) == {'scheme': 'sip',
'user': 'pytest',
'password': None,
'host': '127.0.0.1',
'port': 7000,
'params': None,
'headers': None}
def test_add_tag():
header = aiosip.Contact.from_header('<sip:pytest@127.0.0.1:7000>')
assert dict(header['params']) == {}
header.add_tag()
assert 'tag' in header['params']
| apache-2.0 | Python | |
f5720f2609bcb19ffca308a3589c8e6171d1f8b7 | Add test cases for removepunctuation | sknorr/suse-doc-style-checker,sknorr/suse-doc-style-checker,sknorr/suse-doc-style-checker | tests/test_removepunctuation.py | tests/test_removepunctuation.py | #
import pytest
from sdsc.textutil import removepunctuation
@pytest.mark.parametrize("end", [True, False])
@pytest.mark.parametrize("start", [True, False])
@pytest.mark.parametrize("data", [
# 0 - no quotes
'word',
# 1 - single quote at the start
'¸word',
# 2 - single quote at the end
'word\'',
# 3 - single quotes at both ends
'\'word\'',
# 4 - double quotes at the start
"\"word",
# 5 - double quotes at the end
'word"',
# 6 - double quotes at both ends
'"word"',
])
def test_removepunctuation(data, start, end):
result = "word"
# For the time being, we check with .isalpha() to cover
# all punctuation. However, "@".isalpha() would return False
if start and not data[0].isalpha():
result = data[0] + result
if end and not data[-1].isalpha():
result = result + data[-1]
removepunctuation(data, start, end) == result
| lgpl-2.1 | Python | |
e8309903b54598358efc20092760fe933cbd8ce7 | check if a string is a permutation of anohter string | HeyIamJames/CodingInterviewPractice,HeyIamJames/CodingInterviewPractice | CrackingCodingInterview/1.3_string_permutation.py | CrackingCodingInterview/1.3_string_permutation.py | """
check if a string is a permutation of anohter string
"""
#utalize sorted, perhaps check length first to make faster
| mit | Python | |
6dde05fc401ff615b44dc101bfb7775c65535e79 | Create 2.6_circularlinkedlist.py | HeyIamJames/CodingInterviewPractice,HeyIamJames/CodingInterviewPractice | CrackingCodingInterview/2.6_circularlinkedlist.py | CrackingCodingInterview/2.6_circularlinkedlist.py | """
return node at begining of a cricularly linked list
"""
| mit | Python | |
3d64d0be14ea93f53303ead80dcb024c9f8d4b2d | Create save_course_source.py | StepicOrg/Stepic-API | examples/save_course_source.py | examples/save_course_source.py | # Run with Python 3
# Saves all step sources into foldered structure
import os
import json
import requests
import datetime
# Enter parameters below:
# 1. Get your keys at https://stepic.org/oauth2/applications/
# (client type = confidential, authorization grant type = client credentials)
client_id = "..."
client_secret = "..."
api_host = 'https://stepic.org'
course_id = 1
# 2. Get a token
auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
response = requests.post('https://stepic.org/oauth2/token/',
data={'grant_type': 'client_credentials'},
auth=auth)
token = response.json().get('access_token', None)
if not token:
print('Unable to authorize with provided credentials')
exit(1)
# 3. Call API (https://stepic.org/api/docs/) using this token.
def fetch_object(obj_class, obj_id):
api_url = '{}/api/{}s/{}'.format(api_host, obj_class, obj_id)
response = requests.get(api_url,
headers={'Authorization': 'Bearer ' + token}).json()
return response['{}s'.format(obj_class)][0]
def fetch_objects(obj_class, obj_ids):
objs = []
# Fetch objects by 30 items,
# so we won't bump into HTTP request length limits
step_size = 30
for i in range(0, len(obj_ids), step_size):
obj_ids_slice = obj_ids[i:i + step_size]
api_url = '{}/api/{}s?{}'.format(api_host, obj_class,
'&'.join('ids[]={}'.format(obj_id)
for obj_id in obj_ids_slice))
response = requests.get(api_url,
headers={'Authorization': 'Bearer ' + token}
).json()
objs += response['{}s'.format(obj_class)]
return objs
course = fetch_object('course', course_id)
sections = fetch_objects('section', course['sections'])
for section in sections:
unit_ids = section['units']
units = fetch_objects('unit', unit_ids)
for unit in units:
lesson_id = unit['lesson']
lesson = fetch_object('lesson', lesson_id)
step_ids = lesson['steps']
steps = fetch_objects('step', step_ids)
for step in steps:
step_source = fetch_object('step-source', step['id'])
path = [
'{} {}'.format(str(course['id']).zfill(2), course['title']),
'{} {}'.format(str(section['position']).zfill(2), section['title']),
'{} {}'.format(str(unit['position']).zfill(2), lesson['title']),
'{}_{}_{}.step'.format(lesson['id'], str(step['position']).zfill(2), step['block']['name'])
]
try:
os.makedirs(os.path.join(os.curdir, *path[:-1]))
except:
pass
filename = os.path.join(os.curdir, *path)
f = open(filename, 'w')
data = {
'block': step_source['block'],
'id': str(step['id']),
'time': datetime.datetime.now().isoformat()
}
f.write(json.dumps(data))
f.close()
print(filename)
| mit | Python | |
cb2deafae258625f0c4ec8bb68713b391129a27c | add migration of help text changes | bruecksen/isimip,bruecksen/isimip,bruecksen/isimip,bruecksen/isimip | isi_mip/climatemodels/migrations/0085_auto_20180215_1105.py | isi_mip/climatemodels/migrations/0085_auto_20180215_1105.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-02-15 10:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('climatemodels', '0084_inputdata_protocol_relation'),
]
operations = [
migrations.AlterField(
model_name='impactmodel',
name='version',
field=models.CharField(blank=True, help_text='The model version with which these simulations were run. Please indicate if the model version used for ISIMIP2b can be evaluated based on comparison of the ISIMIP2a runs with observed impacts.', max_length=500, null=True, verbose_name='Model version'),
),
migrations.AlterField(
model_name='inputdata',
name='protocol_relation',
field=models.CharField(choices=[('P', 'Protocol'), ('S', 'Supplementary')], default='P', max_length=1),
),
migrations.AlterField(
model_name='inputdata',
name='variables',
field=models.ManyToManyField(blank=True, help_text='The variables are filtered based on the data type. To see variables of a different data type, please change and save data type first.', to='climatemodels.ClimateVariable'),
),
migrations.AlterField(
model_name='outputdata',
name='model',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='climatemodels.ImpactModel'),
),
]
| mit | Python | |
32a7839072f073268f1a90b3521847e59b8ed522 | add logging03.py | devlights/try-python | trypython/stdlib/logging03.py | trypython/stdlib/logging03.py | """
logging モジュールのサンプルです。
最も基本的な使い方について (フォーマッタの指定)
"""
import logging
from trypython.common.commoncls import SampleBase
class Sample(SampleBase):
def exec(self):
"""サンプル処理を実行します。"""
# -----------------------------------------------------------------------------------
# logging モジュールは、python 標準ライブラリで他の言語でいう log4jやlog4netなど
# と同様にロギング処理を提供するもの。公式ドキュメントでは以下のURLで説明が記載されている。
#
# https://docs.python.jp/3/library/logging.html
#
# このモジュールは、非常に多機能であるため以下のチュートリアルが用意されている。
#
# 基本: https://docs.python.jp/3/howto/logging.html#logging-basic-tutorial
# 上級: https://docs.python.jp/3/howto/logging.html#logging-advanced-tutorial
#
# -----------------------------------------------------------------------------------
# 今回も、 logging.basicConfig() について
# format キーワードを指定して、出力文字列のフォーマットを変更する。
# format 文字列内にて指定できるキーワードについては、以下を参照。
#
# https://docs.python.jp/3/library/logging.html#logrecord-attributes
#
# 追加で指定できる属性値については、以下を参照。
#
# https://docs.python.jp/3/library/string.html#formatstrings
#
# つまり、左寄せで8文字表示にするには以下のように指定する。
#
# %(levelname)-8s
#
# 逆に、右寄せで8文字表示にするには以下のように指定する。
#
# %(levelname)8s
#
# -----------------------------------------------------------------------------------
fmt = '[%(asctime)s][%(levelname)-8s] %(name)s %(filename)s:%(funcName)s:%(lineno)d | %(message)s'
logging.basicConfig(level=logging.WARNING, format=fmt)
logger = logging.getLogger(__name__)
# それぞれのログレベルで出力
logger.debug('debug')
logger.info('info')
logger.warning('warn')
logger.error('error')
logger.critical('critical')
# logging を終了
logging.shutdown()
def go():
"""処理を実行します。"""
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
| mit | Python | |
5fa3fc6ba78c3e6cf12a25bddb835e9d885bcbd3 | Create 0035_auto_20190712_2015.py | BirkbeckCTP/janeway,BirkbeckCTP/janeway,BirkbeckCTP/janeway,BirkbeckCTP/janeway | src/submission/migrations/0035_auto_20190712_2015.py | src/submission/migrations/0035_auto_20190712_2015.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.21 on 2019-07-12 19:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('submission', '0034_auto_20190416_1009'),
]
operations = [
migrations.AlterField(
model_name='article',
name='abstract',
field=models.TextField(blank=True, help_text='Please avoid pasting content from word processors as they can add unwanted styling to the abstract. You can retype the abstract here or copy and paste it into notepad/a plain text editor before pasting here.', null=True),
),
migrations.AlterField(
model_name='article',
name='stage',
field=models.CharField(choices=[('Unsubmitted', 'Unsubmitted'), ('Unassigned', 'Unassigned'), ('Assigned', 'Assigned to Editor'), ('Under Review', 'Peer Review'), ('Under Revision', 'Revision'), ('Rejected', 'Rejected'), ('Accepted', 'Accepted'), ('Editor Copyediting', 'Editor Copyediting'), ('Author Copyediting', 'Author Copyediting'), ('Final Copyediting', 'Final Copyediting'), ('Typesetting', 'Typesetting'), ('Proofing', 'Proofing'), ('pre_publication', 'Pre Publication'), ('Published', 'Published'), ('preprint_review', 'Preprint Review'), ('preprint_published', 'Preprint Published')], default='Unsubmitted', max_length=200),
),
]
| agpl-3.0 | Python | |
e0229179b01805ca7f7e23d3094737a4f366e162 | Add missing files for d8af78447f286ad07ad0736d4202e0becd0dd319 | devunt/hydrocarbon,devunt/hydrocarbon,devunt/hydrocarbon | board/migrations/0001_initial.py | board/migrations/0001_initial.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('nick', models.CharField(max_length=16)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
| mit | Python | |
f8c79535d52384a4891ad56125033d69938d987d | Create get_url.py | a67878813/script,a67878813/script | get_url.py | get_url.py |
# coding: utf-8
# In[53]:
#huoqu Url
import requests
import re
import os
#下面三行是编码转换的功能
import sys
#hea是我们自己构造的一个字典,里面保存了user-agent。
#让目标网站误以为本程序是浏览器,并非爬虫。
#从网站的Requests Header中获取。【审查元素】
hea = {'User-Agent':'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36'}
html_tempfile = 'temphtml.txt'
os.remove(html_tempfile)
for i in range(30,55):
print(i,"..")
url_request = 'http://ecchi.iwara.tv/videos?page=' + str(i)
html = requests.get(url_request,headers = hea)
html.encoding = 'utf-8' #这一行是将编码转为utf-8否则中文会显示乱码
with open(html_tempfile,"a") as t:
t.write(html.text + "\n")
t.close
# In[80]:
#chuli url
#wenjian duqu a
with open(html_tempfile,"r") as t:
a = t.read()
print(type(a))
print(type(a))
res = r'<a\ href="\/videos\/[^\s]+">'
m_tr = re.findall(res,a,re.S|re.M)
#print(m_tr)
type(m_tr)
#print(m_tr)
endset = set(m_tr) #合并 一样的地址JJ
#print (endset)
for line in endset:
temp = line[6:-1]
# front = 'http://ecchi.iwara.tv'
# print(line[6:-1])#
file = 'endlist.txt'
with open(file,"w") as f:
for line in endset:
temp = line[9:-2]
front = 'http://ecchi.iwara.tv'
end = '\n'
print(temp)
f.write(front + temp + end)
# In[ ]:
# In[ ]:
| apache-2.0 | Python | |
2a502236de5c28d4f4e6626317565c7bb60ebb13 | Create NumberofDigitOne_001.py | cc13ny/algo,Chasego/codi,cc13ny/Allin,Chasego/codirit,cc13ny/Allin,cc13ny/Allin,Chasego/cod,Chasego/codi,Chasego/cod,Chasego/codirit,Chasego/cod,Chasego/codirit,cc13ny/algo,Chasego/cod,cc13ny/Allin,cc13ny/algo,Chasego/codirit,Chasego/codirit,Chasego/codi,cc13ny/algo,Chasego/codi,Chasego/cod,Chasego/codi,cc13ny/Allin,cc13ny/algo | leetcode/233-Number-of-Digit-One/NumberofDigitOne_001.py | leetcode/233-Number-of-Digit-One/NumberofDigitOne_001.py | class Solution:
# @param {integer} n
# @return {integer}
def countDigitOne(self, n):
res, d = 0, 10
while 10 * n >= d:
t = d / 10
r = n % d
res += n / d * t
if t - 1 < r < 2 * t - 1:
res += r - t + 1
elif 2 * t - 1 <= r:
res += t
d *= 10
return res
| mit | Python | |
052dbe05c0e1d3e2821857a035e469be2a1055ae | Add "what is my purpose in life" plugin | ratchetrobotics/espresso | plugins/pass_the_butter.py | plugins/pass_the_butter.py | from espresso.main import robot
@robot.respond(r"(?i)pass the butter")
def pass_the_butter(res):
res.reply(res.msg.user, "What is my purpose in life?")
@robot.respond(r"(?i)you pass butter")
def you_pass_butter(res):
res.send("Oh my god.")
| bsd-3-clause | Python | |
84a2e9db13b49d8afd1c1bcf5ec5ce9b92c14046 | Add a snippet. | jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets | python/pyside/pyside6/widget_QSqlTableModel_sqlite_from_file_with_sort_and_filter_plus_add_and_remove_rows.py | python/pyside/pyside6/widget_QSqlTableModel_sqlite_from_file_with_sort_and_filter_plus_add_and_remove_rows.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Ref: https://doc.qt.io/qtforpython/PySide6/QtSql/QSqlTableModel.html?highlight=qsqltablemodel
import sys
import sqlite3
from PySide6 import QtCore, QtWidgets
from PySide6.QtCore import Qt, QSortFilterProxyModel, QModelIndex
from PySide6.QtWidgets import QApplication, QWidget, QTableView, QLineEdit, QVBoxLayout, QAbstractItemView
from PySide6.QtGui import QAction
from PySide6.QtSql import QSqlDatabase, QSqlQuery, QSqlTableModel
# INIT THE DATABASE #############################
con = sqlite3.connect("employee.db")
cur = con.cursor()
cur.execute("DROP TABLE employee")
cur.execute("CREATE TABLE employee (id INTEGER PRIMARY KEY AUTOINCREMENT, first_name TEXT, last_name TEXT)")
params_list = [
("Jean", "Dupont"),
("Paul", "Dupond"),
("Jeanne", "Durand"),
("Anne", "Dupuit"),
]
cur.executemany("INSERT INTO employee (first_name, last_name) VALUES(?, ?)", params_list)
con.commit()
con.close()
# OPEN THE DATABASE #############################
db = QSqlDatabase.addDatabase("QSQLITE")
db.setDatabaseName("./employee.db")
assert db.open()
#################################################
app = QApplication(sys.argv)
window = QWidget()
# Make widgets ##############
edit = QLineEdit()
table_view = QTableView()
edit.setPlaceholderText("Filter text (on col. 1)")
# Set the layout ############
vbox = QVBoxLayout()
vbox.addWidget(edit)
vbox.addWidget(table_view)
window.setLayout(vbox)
#############################
model = QSqlTableModel()
model.setTable("employee")
#model.setEditStrategy(QSqlTableModel.OnManualSubmit)
model.select()
model.setHeaderData(0, Qt.Horizontal, "First Name")
model.setHeaderData(1, Qt.Horizontal, "Last Name")
table_view.setModel(model)
table_view.setSortingEnabled(True)
table_view.setSelectionBehavior(QAbstractItemView.SelectRows) # Select the full row when a cell is selected (See http://doc.qt.io/qt-5/qabstractitemview.html#selectionBehavior-prop )
table_view.hideColumn(0) # don't show the ID
# Set LineEdit slot #########################
def filter_callback():
filter_str = edit.text()
if filter_str == '':
model.setFilter("")
else:
model.setFilter("first_name LIKE '%{}%'".format(filter_str))
print(filter_str)
edit.textChanged.connect(filter_callback)
#############################
def add_row_callback():
# See https://doc.qt.io/qtforpython/overviews/sql-model.html#using-the-sql-model-classes
row = 0
model.insertRows(row, 1)
#model.setData(model.index(row, 0), 1013)
model.setData(model.index(row, 1), "n/a")
model.setData(model.index(row, 2), "n/a")
model.submitAll()
#model.select()
def remove_row_callback():
# See https://doc.qt.io/qt-5/qsqltablemodel.html#removeRows
# See https://doc.qt.io/qtforpython/overviews/sql-model.html#using-the-sql-model-classes
# See http://doc.qt.io/qt-5/model-view-programming.html#handling-selections-in-item-views
selection_proxy_index_list = table_view.selectionModel().selectedRows()
selected_row_list = [source_index.row() for source_index in selection_proxy_index_list]
for row_index in sorted(selected_row_list, reverse=True):
# Remove rows one by one to allow the removql of non-contiguously selected rows (e.g. "rows 0, 2 and 3")
success = model.removeRow(row_index)
if not success:
raise Exception("Unknown error...") # TODO
model.submitAll() # When you’re finished changing a record, you should always call submitAll() to ensure that the changes are written to the database
model.select()
# Add row action
add_action = QAction(table_view)
add_action.setShortcut(Qt.CTRL | Qt.Key_N)
add_action.triggered.connect(add_row_callback)
table_view.addAction(add_action)
# Delete action
del_action = QAction(table_view)
del_action.setShortcut(Qt.Key_Delete)
del_action.triggered.connect(remove_row_callback)
table_view.addAction(del_action)
#############################
window.show()
# The mainloop of the application. The event handling starts from this point.
exit_code = app.exec()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
| mit | Python | |
5b57686868b595fb4e7b431822fe4c7bf2de6cfb | Add unittests for title handling methods | Commonists/MassUploadLibrary | test/test_uploadbot.py | test/test_uploadbot.py | #!/usr/bin/env python
# -*- coding: latin-1 -*-
"""Unit tests."""
import unittest
from uploadlibrary.UploadBot import _cut_title
class TestUploadBot(unittest.TestCase):
"""Testing UploadBot methods."""
def test_cut_title_witout_cutting(self):
"""Test _cut_title() without cutting"""
inputs = [("", "ABC", ""),
("", "ABC", " 123456789"),
("1234 ", "ABC", ""),
("1234 ", "ABC", " 123456789")]
outputs = [_cut_title(*x, MAX_LENGTH=25) for x in inputs]
expected_results = ['ABC',
'ABC 123456789',
'1234 ABC',
'1234 ABC 123456789']
self.assertListEqual(outputs, expected_results)
def test_cut_title_with_cutting(self):
"""Test _cut_title() with cutting."""
inputs = [("1234 ", "ABC DEF G H", " 123456789"),
("1234 ", "ABC DE FG H", " 123456789"),
("1234 ", "ABC D E FG H", " 123456789")]
outputs = [_cut_title(*x, MAX_LENGTH=25) for x in inputs]
expected_results = ['1234 ABC DEF... 123456789',
'1234 ABC DE... 123456789',
'1234 ABC D E... 123456789']
self.assertListEqual(outputs, expected_results) | mit | Python | |
b15c7c044b0c514285bcb8c29b7bcfc8cf777c8b | Add tests for the signals | educreations/django-ormcache | ormcache/tests/test_signals.py | ormcache/tests/test_signals.py | from django.core.cache import cache
from django.test import SimpleTestCase
from ormcache.signals import cache_hit, cache_missed, cache_invalidated
from ormcache.tests.testapp.models import CachedDummyModel
class SignalsTestCase(SimpleTestCase):
def setUp(self):
self.signal_called = False
self.instance_pk = CachedDummyModel.objects.create().pk
cache.clear()
def _signal_callback(self, sender, signal):
self.signal_called = True
def test_cache_hit_signal(self):
cache_hit.connect(self._signal_callback)
CachedDummyModel.objects.get(pk=self.instance_pk) # miss
self.assertFalse(self.signal_called)
CachedDummyModel.objects.get(pk=self.instance_pk) # hit
self.assertTrue(self.signal_called)
def test_cache_missed_signal(self):
cache_missed.connect(self._signal_callback)
CachedDummyModel.objects.get(pk=self.instance_pk) # miss
self.assertTrue(self.signal_called)
def test_cache_invalidated_signal(self):
cache_invalidated.connect(self._signal_callback)
instance = CachedDummyModel.objects.get(pk=self.instance_pk) # miss
self.assertFalse(self.signal_called)
instance.title = "hello"
instance.save() # invalidate
self.assertTrue(self.signal_called)
| mit | Python | |
f07cdf5bd22dd352122d679a6e8c4cc213aad013 | Create multiarm_selector.py | shauryashahi/final-year-project,shauryashahi/final-year-project | multiarm_selector.py | multiarm_selector.py | from __future__ import division
import random
class MultiarmSelector(object):
def __init__(self):
self.versions_served = []
self.clicks = 0
self.missed = 0
self.success_count = {
"A": 0,
"B": 0
}
self.total_count = {
"A": 0,
"B": 0
}
def handle_response_from_new_user(self, user_data):
selection, not_selected = self._get_selection()
self.versions_served.append(selection)
self._update_success_and_total(selection, user_data)
if user_data[selection]:
self.clicks += 1
return
if user_data[not_selected]:
self.missed += 1
return
def prepare_report(self):
return self.clicks, self.missed
def versions_served(self):
return self.versions_served
def did_give_correct_answer(self):
"We are assuming for test that B is always better than A"
expected_reward_A = self.success_count["A"] / self.total_count["A"]
expected_reward_B = self.success_count["B"] / self.total_count["B"]
if expected_reward_B > expected_reward_A:
return 1
else:
return 0
def _update_success_and_total(self, selection, user_data):
self.total_count[selection] += 1
if user_data[selection]:
self.success_count[selection] += 1
def _get_selection(self):
if random.random() < 0.1:
return self._get_random_selection()
if self.total_count["A"] == 0 or self.total_count["B"] == 0:
return self._get_random_selection()
expected_reward_A = self.success_count["A"] / self.total_count["A"]
expected_reward_B = self.success_count["B"] / self.total_count["B"]
if expected_reward_B > expected_reward_A:
return "B", "A"
else:
return "A", "B"
def _get_random_selection(self):
if random.random() < 0.5:
return "A", "B"
else:
return "B", "A"
| apache-2.0 | Python | |
bf2cc99162389c6b5c18051f01756e17d9d11ce6 | Add a test for rename. | stratis-storage/stratis-cli,stratis-storage/stratis-cli | tests/integration/test_rename.py | tests/integration/test_rename.py | """
Test 'rename'.
"""
import subprocess
import unittest
from ._constants import _CLI
from ._misc import Service
@unittest.skip("Wating for Rename")
class Rename1TestCase(unittest.TestCase):
"""
Test 'rename' when pool is non-existant.
"""
_MENU = ['rename']
_POOLNAME = 'deadpool'
_NEW_POOLNAME = 'livepool'
def setUp(self):
"""
Start the stratisd daemon with the simulator.
"""
self._service = Service()
self._service.setUp()
def tearDown(self):
"""
Stop the stratisd simulator and daemon.
"""
self._service.tearDown()
def testRename(self):
"""
This should fail because original name does not exist.
"""
try:
command_line = \
['python', _CLI] + \
self._MENU + \
[self._POOLNAME] + \
[self._NEW_POOLNAME]
subprocess.check_call(command_line)
self.fail(
"Should have failed because %s does not exist." % self._POOLNAME
)
except subprocess.CalledProcessError:
pass
| apache-2.0 | Python | |
4d661b0fcb6f4b130370c010d16a2afec2449456 | Create mergesort.py | ueg1990/aids | aids/sorting_and_searching/mergesort.py | aids/sorting_and_searching/mergesort.py | '''
In this module, we implement merge sort
Time complexity: O(n * log n)
'''
def mergesort(arr):
'''
Sort array using mergesort
'''
pass
def _merge(arr):
pass
| mit | Python | |
d199510ab03975832b262cbc2160c3d6f3371e8d | Add solution in Python | julianespinel/training,julianespinel/training,julianespinel/training,julianespinel/trainning,julianespinel/trainning,julianespinel/training | codeforces/dominated_subarray.py | codeforces/dominated_subarray.py | def read_first_line():
return int(input())
def read_cases(number_of_cases):
cases = []
for i in range(number_of_cases):
line = input()
if i % 2 == 1:
case = [int(string) for string in line.strip().split(' ')]
cases.append(case)
return cases
def updateHistory(index, number, history):
if not number in history:
history[number] = { "latestIndex": index, "minDifference": None }
return
value = history[number]
latestIndex = value["latestIndex"]
minDifference = value["minDifference"]
minimum = index - latestIndex
if not minDifference or minimum < minDifference:
history[number] = { "latestIndex": index, "minDifference": minimum }
return
# Update index, preserve minDifference
history[number] = { "latestIndex": index, "minDifference": minDifference }
def solve(case):
history = {}
for index, number in enumerate(case):
updateHistory(index, number, history)
mins = []
for value in history.values():
minDifference = value["minDifference"]
if minDifference:
mins.append(minDifference)
if len(mins) == 0:
return -1
return min(mins) + 1
if __name__ == "__main__":
test_cases = read_first_line()
lines_per_case = 2
cases = read_cases(test_cases * lines_per_case)
for case in cases:
solution = solve(case)
print(solution)
| mit | Python | |
6a8ff154b8468d61b18d390db9e710fc0b224ac7 | Add Left-Handed toons crawler | datagutten/comics,datagutten/comics,jodal/comics,klette/comics,jodal/comics,klette/comics,datagutten/comics,jodal/comics,datagutten/comics,klette/comics,jodal/comics | comics/comics/lefthandedtoons.py | comics/comics/lefthandedtoons.py |
from comics.aggregator.crawler import CrawlerBase, CrawlerResult
from comics.meta.base import MetaBase
class Meta(MetaBase):
name = 'Left-Handed Toons'
language = 'en'
url = 'http://www.lefthandedtoons.com/'
start_date = '2007-01-14'
rights = 'Justin & Drew'
class Crawler(CrawlerBase):
history_capable_days = 12
schedule = 'Mo,Tu,We,Th,Fr'
time_zone = -5
def crawl(self, pub_date):
feed = self.parse_feed(
'http://feeds.feedburner.com/lefthandedtoons/awesome')
for entry in feed.for_date(pub_date):
url = entry.summary.src('img[src*="/toons/"]')
title = entry.title
if url:
return CrawlerResult(url, title)
| agpl-3.0 | Python | |
59e8fe848da5cfa3874c82776205082764efbe63 | Enable Jenkins Python3 monster for i19 | xia2/i19 | tests/test_python3_regression.py | tests/test_python3_regression.py | from __future__ import absolute_import, division, print_function
def test_no_new_python3_incompatible_code_is_introduced_into_this_module():
import i19
import pytest
import dials.test.python3_regression as py3test
result = py3test.find_new_python3_incompatible_code(i19)
if result is None:
pytest.skip('No python3 interpreter available')
elif result:
pytest.fail(result)
| bsd-3-clause | Python | |
0c3f3c444d863ec4acff704efee71a29ab8cdf34 | Add ip_reverse module | synthesio/infra-ovh-ansible-module | plugins/modules/ip_reverse.py | plugins/modules/ip_reverse.py | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
DOCUMENTATION = '''
---
module: ip_reverse
short_description: Modify reverse on IP
description:
- Modify reverse on IP
author: Synthesio SRE Team
requirements:
- ovh >= 0.5.0
options:
ip:
required: true
description: The ip
reverse:
required: true
description: The reverse to assign
'''
EXAMPLES = '''
synthesio.ovh.ip_reverse:
ip: 192.0.2.1
reverse: host.domain.example.
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError, ResourceNotFoundError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def run_module():
module_args = ovh_argument_spec()
module_args.update(dict(
ip=dict(required=True),
reverse=dict(required=True)
))
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
client = ovh_api_connect(module)
ip = module.params['ip']
reverse = module.params['reverse']
if module.check_mode:
module.exit_json(msg="Reverse {} to {} succesfully set ! - (dry run mode)".format(ip, reverse), changed=True)
result = {}
try:
result = client.get('/ip/%s/reverse/%s' % (ip, ip))
except ResourceNotFoundError:
result['reverse'] = ''
if result['reverse'] == reverse:
module.exit_json(msg="Reverse {} to {} already set !".format(ip, reverse), changed=False)
try:
client.post(
'/ip/%s/reverse' % ip,
ipReverse=ip,
reverse=reverse
)
module.exit_json(
msg="Reverse {} to {} succesfully set !".format(ip, reverse),
changed=True)
except APIError as api_error:
return module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
def main():
run_module()
if __name__ == '__main__':
main()
| mit | Python | |
c208263dcc40078e48f78565b37be7b601f0d817 | Add Python wrappers for the bibtex program. | live-clones/pybtex | pybtex/tests/run_bibtex.py | pybtex/tests/run_bibtex.py | #!/usr/bin/env python
# Copyright (C) 2006, 2007, 2008, 2009 Andrey Golovizin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import re
from os import path
from tempfile import mkdtemp
from shutil import rmtree
from subprocess import Popen, PIPE
from pybtex.database.output import bibtex
from pybtex.database import BibliographyData
from pybtex.core import Person, Entry
writer = bibtex.Writer(encoding='ascii')
def write_aux(filename, citations):
with open(filename, 'w') as aux_file:
for citation in citations:
aux_file.write('\\citation{%s}\n' % citation)
aux_file.write('\\bibdata{test}\n')
aux_file.write('\\bibstyle{test}\n')
def write_bib(filename, database):
writer.write_file(database, filename)
def write_bst(filename, style):
with open(filename, 'w') as bst_file:
bst_file.write(style)
bst_file.write('\n')
def run_bibtex(style, database, citations=None):
if citations is None:
citations = database.entries.keys()
tmpdir = mkdtemp(prefix='pybtex_test_')
try:
write_bib(path.join(tmpdir, 'test.bib'), database)
write_aux(path.join(tmpdir, 'test.aux'), citations)
write_bst(path.join(tmpdir, 'test.bst'), style)
bibtex = Popen(('bibtex', 'test'), cwd=tmpdir, stdout=PIPE, stderr=PIPE)
stdout, stderr = bibtex.communicate()
if bibtex.returncode:
raise ValueError(stdout)
with open(path.join(tmpdir, 'test.bbl')) as bbl_file:
result = bbl_file.read()
return result
finally:
pass
rmtree(tmpdir)
def format_name(name, format):
entry = Entry('article', fields={'name': name, 'format': format})
database = BibliographyData(entries={'test_entry': entry})
bst = """
ENTRY {name format} {} {}
FUNCTION {article}
{
name #1 format format.name$ write$ newline$
}
READ
ITERATE {call.type$}
""".strip()
[result] = run_bibtex(bst, database).splitlines()
return result
def parse_name(name):
space = re.compile('[\s~]+')
formatted_name = format_name(name, '{ff}|{vv}|{ll}|{jj}')
parts = [space.sub(' ', part.strip()) for part in formatted_name.split('|')]
first, von, last, junior = parts
return Person(first=first, prelast=von, last=last, lineage=junior)
def main():
names = [
'Vasily Petrovitch Schubin',
'Johan Peter Maria Mueller',
r'Charles Louis Xavier Joseph de la Vall{\'e}e Poussin',
]
for name in names:
print format_name(name, '{f.}{ ll~}')
print unicode(parse_name(name))
if __name__ == '__main__':
main()
| mit | Python | |
c5ac422ff1e4628ad8ea53e4f1442e6a70bf959f | add first command test | edouard-lopez/rangevoting,guillaumevincent/rangevoting,guillaumevincent/rangevoting,edouard-lopez/rangevoting,edouard-lopez/rangevoting,guillaumevincent/rangevoting | tests/test_commands.py | tests/test_commands.py | import unittest
class CreateRangeVotingCommand():
def __init__(self, question, choices):
self.question = question
self.choices = choices
class CreateRangeVotingCommandTestCase(unittest.TestCase):
def test_has_choices_and_question(self):
question = 'Question ?'
choices = ['a', 'b']
create_rangevoting_command = CreateRangeVotingCommand(question, choices)
self.assertEqual(question, create_rangevoting_command.question)
self.assertEqual(choices, create_rangevoting_command.choices)
if __name__ == '__main__':
unittest.main()
| mit | Python | |
391e145b6e82aaa87e2ab23cfea53cb7ae98bc2a | Add a work-in-progress parser for the ClientHello message. | Ayrx/tlsenum,Ayrx/tlsenum | tlsenum/parse_hello.py | tlsenum/parse_hello.py | import construct
from tlsenum import hello_constructs
class ClientHello(object):
@property
def protocol_version(self):
return self._protocol_version
@protocol_version.setter
def protocol_version(self, protocol_version):
assert protocol_version in ["3.0", "1.0", "1.1", "1.2"]
self._protocol_version = protocol_version
if protocol_version == "3.0":
self._protocol_minor = 0
elif protocol_version == "1.0":
self._protocol_minor = 1
elif protocol_version == "1.1":
self._protocol_minor = 2
elif protocol_version == "1.2":
self._protocol_minor = 3
def build(self):
return hello_constructs.ProtocolVersion.build(
construct.Container(major=3, minor=self._protocol_minor)
)
| mit | Python | |
764bad33b598841333d4d1674bf5667957ada551 | Add a no-op measurement | ondra-novak/chromium.src,patrickm/chromium.src,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,chuan9/chromium-crosswalk,littlstar/chromium.src,hgl888/chromium-crosswalk-efl,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,bright-sparks/chromium-spacewalk,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,axinging/chromium-crosswalk,M4sse/chromium.src,patrickm/chromium.src,Pluto-tv/chromium-crosswalk,ChromiumWebApps/chromium,Fireblend/chromium-crosswalk,ChromiumWebApps/chromium,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,Just-D/chromium-1,chuan9/chromium-crosswalk,Jonekee/chromium.src,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,littlstar/chromium.src,markYoungH/chromium.src,Jonekee/chromium.src,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,patrickm/chromium.src,ltilve/chromium,hgl888/chromium-crosswalk,patrickm/chromium.src,chuan9/chromium-crosswalk,dednal/chromium.src,M4sse/chromium.src,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,krieger-od/nwjs_chromium.src,ltilve/chromium,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Chilledheart/chromium,bright-sparks/chromium-spacewalk,M4sse/chromium.src,krieger-od/nwjs_chromium.src,jaruba/chromium.src,dushu1203/chromium.src,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,ltilve/chromium,M4sse/chromium.src,fujunwei/chromium-crosswalk,jaruba/chromium.src,markYoungH/chromium.src,markYoungH/chromium.src,anirudhSK/chromium,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,M4sse/chromium.src,markYoungH/chromium.src,ChromiumWebApps/chromium,Fireblend/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,dednal/chromium.src,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,jaruba/chromium.src,Pluto-tv/chromium-crosswalk,ChromiumWebApps/chromium,fujunwei/chromium-crosswalk,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,littlstar/chromium.src,krieger-od/nwjs_chromium.src,patrickm/chromium.src,krieger-od/nwjs_chromium.src,PeterWangIntel/chromium-crosswalk,bright-sparks/chromium-spacewalk,ltilve/chromium,dednal/chromium.src,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,anirudhSK/chromium,bright-sparks/chromium-spacewalk,Chilledheart/chromium,littlstar/chromium.src,ChromiumWebApps/chromium,jaruba/chromium.src,ltilve/chromium,Chilledheart/chromium,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,bright-sparks/chromium-spacewalk,crosswalk-project/chromium-crosswalk-efl,ondra-novak/chromium.src,ondra-novak/chromium.src,anirudhSK/chromium,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,anirudhSK/chromium,dushu1203/chromium.src,ChromiumWebApps/chromium,ondra-novak/chromium.src,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,Just-D/chromium-1,fujunwei/chromium-crosswalk,Pluto-tv/chromium-crosswalk,patrickm/chromium.src,dednal/chromium.src,littlstar/chromium.src,Jonekee/chromium.src,fujunwei/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,jaruba/chromium.src,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,chuan9/chromium-crosswalk,dednal/chromium.src,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,dushu1203/chromium.src,krieger-od/nwjs_chromium.src,ondra-novak/chromium.src,crosswalk-project/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,hgl888/chromium-crosswalk,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,krieger-od/nwjs_chromium.src,anirudhSK/chromium,patrickm/chromium.src,TheTypoMaster/chromium-crosswalk,Jonekee/chromium.src,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,fujunwei/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,jaruba/chromium.src,dushu1203/chromium.src,fujunwei/chromium-crosswalk,Chilledheart/chromium,Fireblend/chromium-crosswalk,Jonekee/chromium.src,dushu1203/chromium.src,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,anirudhSK/chromium,dushu1203/chromium.src,littlstar/chromium.src,patrickm/chromium.src,jaruba/chromium.src,chuan9/chromium-crosswalk,ltilve/chromium,krieger-od/nwjs_chromium.src,anirudhSK/chromium,hgl888/chromium-crosswalk,markYoungH/chromium.src,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,littlstar/chromium.src,ltilve/chromium,krieger-od/nwjs_chromium.src,M4sse/chromium.src,ltilve/chromium,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,Just-D/chromium-1,hgl888/chromium-crosswalk,Jonekee/chromium.src,ondra-novak/chromium.src,patrickm/chromium.src,ondra-novak/chromium.src,jaruba/chromium.src,bright-sparks/chromium-spacewalk,ChromiumWebApps/chromium,Jonekee/chromium.src,ltilve/chromium,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,Jonekee/chromium.src,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,dednal/chromium.src,markYoungH/chromium.src,Jonekee/chromium.src,M4sse/chromium.src,hgl888/chromium-crosswalk,dednal/chromium.src,Pluto-tv/chromium-crosswalk,jaruba/chromium.src,axinging/chromium-crosswalk,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,markYoungH/chromium.src,anirudhSK/chromium,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,dushu1203/chromium.src,bright-sparks/chromium-spacewalk,Just-D/chromium-1,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,ChromiumWebApps/chromium,ondra-novak/chromium.src,Just-D/chromium-1,axinging/chromium-crosswalk,dednal/chromium.src,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,bright-sparks/chromium-spacewalk,dednal/chromium.src,anirudhSK/chromium,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,Fireblend/chromium-crosswalk,littlstar/chromium.src,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,jaruba/chromium.src,markYoungH/chromium.src,fujunwei/chromium-crosswalk,ChromiumWebApps/chromium,chuan9/chromium-crosswalk,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl | tools/perf/measurements/no_op.py | tools/perf/measurements/no_op.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page_measurement
class NoOp(page_measurement.PageMeasurement):
def __init__(self):
super(NoOp, self).__init__('no_op')
def CanRunForPage(self, page):
return hasattr(page, 'no_op')
def WillRunAction(self, page, tab, action):
pass
def DidRunAction(self, page, tab, action):
pass
def MeasurePage(self, page, tab, results):
pass
| bsd-3-clause | Python | |
a704a1964659a45b007e696ed1547b563dcffa4f | create 2.py | wenpingzheng/new_test | 2.py | 2.py | # content
| mit | Python | |
a1db6c4379c787124d7ee825adbcc76d2069a3c6 | Add check to travis to make sure new boards are built, fix #1886 | adafruit/micropython,adafruit/circuitpython,adafruit/circuitpython,adafruit/circuitpython,adafruit/micropython,adafruit/micropython,adafruit/circuitpython,adafruit/circuitpython,adafruit/circuitpython,adafruit/micropython,adafruit/micropython | tools/travis_new_boards_check.py | tools/travis_new_boards_check.py | #! /usr/bin/env python3
import os
import re
import json
import build_board_info
# Get boards in json format
boards_info_json = build_board_info.get_board_mapping()
# print(boards_info_json)
# TODO (Carlos) Find all the boards on the json format
# We need to know the path of the .travis.yml file
base_path = os.path.dirname(__file__)
travis_path = os.path.abspath(os.path.join(base_path, '..', '.travis.yml'))
# Loading board list based on TRAVIS_BOARDS env variable on .travis.yml
travis_boards = []
with open(travis_path, 'r') as travis:
# Get all lines that contain the substring 'TRAVIS_BOARDS'
for line in travis:
line = travis.readline()
if 'TRAVIS_BOARDS' in line:
print('TRAVIS_BOARDS found')
print(line)
# TODO (Carlos) Store the line content
# We've reached the end of the env: section
elif 'addons' in line:
break
else:
pass
# TODO (Carlos) Getting all the boards on TRAVIS_BOARDS using regex matching
# Tranks sommersoft for the pattern
pattern = '(.+)'
# TODO (Carlos) Comparing boards listed in TRAVIS_BOARDS and boards got from get_board_mapping
| mit | Python | |
9a1c9e2cbe7f9b9decbe93d567458b6a6976e420 | complete 14 longest collatz sequence | dawran6/project-euler | 14-longest-collatz-sequence.py | 14-longest-collatz-sequence.py | from functools import lru_cache
def sequence(n):
'bad idea'
while n is not 1:
yield n
n = 3*n+1 if n%2 else n/2
yield n
def next_num(n):
if n % 2:
return 3 * n + 1
else:
return n / 2
@lru_cache(None)
def collatz_length(n):
if n == 1:
return 1
else:
return 1 + collatz_length(next_num(n))
if __name__ == '__main__':
i = 0
largest = 0
for n in range(1, 1_000_001):
length = collatz_length(n)
if length > largest:
largest = length
i = n
print(i, largest)
| mit | Python | |
aa301d8eaa3c3f89154103bce882501164756017 | Implement the ADMIN command | Heufneutje/txircd,ElementalAlchemist/txircd | txircd/modules/rfc/cmd_admin.py | txircd/modules/rfc/cmd_admin.py | from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from zope.interface import implements
irc.RPL_ADMINLOC1 = "257"
irc.RPL_ADMINLOC2 = "258"
class AdminCommand(ModuleData):
implements(IPlugin, IModuleData)
name = "AdminCommand"
core = True
def hookIRCd(self, ircd):
self.ircd = ircd
def actions(self):
return [ ("sendremoteusermessage-256", 1, lambda user, *params, **kw: self.pushMessage(user, irc.RPL_ADMINME, *params, **kw)),
("sendremoteusermessage-257", 1, lambda user, *params, **kw: self.pushMessage(user, irc.RPL_ADMINLOC1, *params, **kw)),
("sendremoteusermessage-258", 1, lambda user, *params, **kw: self.pushMessage(user, irc.RPL_ADMINLOC2, *params, **kw)),
("sendremoteusermessage-259", 1, lambda user, *params, **kw: self.pushMessage(user, irc.RPL_ADMINEMAIL, *params, **kw)) ]
def userCommands(self):
return [ ("ADMIN", 1, UserAdmin(self.ircd, self.sendAdminData)) ]
def serverCommands(self):
return [ ("ADMINREQ", 1, ServerAdmin(self.ircd, self.sendAdminData)) ]
def sendAdminData(self, user, serverName):
user.sendMessage(irc.RPL_ADMINME, serverName, ":Administrative info for {}".format(serverName))
try:
adminData = self.ircd.config["admin_server"]
except KeyError:
adminData = ""
if not adminData: # If the line is blank, let's provide a default value
adminData = "This server has no admins. Anarchy!"
user.sendMessage(irc.RPL_ADMINLOC1, ":{}".format(adminData))
try:
adminData = self.ircd.config["admin_admin"]
except KeyError:
adminData = ""
if not adminData:
adminData = "Nobody configured the second line of this."
user.sendMessage(irc.RPL_ADMINLOC2, ":{}".format(adminData))
try:
adminEmail = self.ircd.config["admin_email"]
except KeyError:
adminEmail = ""
if not adminEmail:
adminEmail = "No Admin <anarchy@example.com>"
user.sendMessage(irc.RPL_ADMINEMAIL, ":{}".format(adminEmail))
def pushMessage(self, user, numeric, *params, **kw):
server = self.ircd.servers[user.uuid[:3]]
server.sendMessage("PUSH", user.uuid, "::{} {} {}".format(kw["prefix"], numeric, " ".join(params)), prefix=self.ircd.serverID)
return True
class UserAdmin(Command):
implements(ICommand)
def __init__(self, ircd, sendFunc):
self.ircd = ircd
self.sendFunc = sendFunc
def parseParams(self, user, params, prefix, tags):
if not params:
return {}
if params[0] == self.ircd.name:
return {}
if params[0] not in self.ircd.serverNames:
user.sendSingleError("AdminServer", irc.ERR_NOSUCHSERVER, params[0], ":No such server")
return None
return {
"server": self.ircd.servers[self.ircd.serverNames[params[0]]]
}
def execute(self, user, data):
if "server" in data:
server = data["server"]
server.sendMessage("ADMINREQ", server.serverID, prefix=user.uuid)
else:
self.sendFunc(user, self.ircd.name)
return True
class ServerAdmin(Command):
implements(ICommand)
def __init__(self, ircd, sendFunc):
self.ircd = ircd
self.sendFunc = sendFunc
def parseParams(self, server, params, prefix, tags):
if len(params) != 1:
return None
if prefix not in self.ircd.users:
return None
if params[0] == self.ircd.serverID:
return {
"fromuser": self.ircd.users[prefix]
}
if params[0] not in self.ircd.servers:
return None
return {
"fromuser": self.ircd.users[prefix],
"server": self.ircd.servers[params[0]]
}
def execute(self, server, data):
if "server" in data:
server = data["server"]
server.sendMessage("ADMINREQ", server.serverID, prefix=user.uuid)
else:
self.sendFunc(user, self.ircd.name)
return True
adminCmd = AdminCommand() | bsd-3-clause | Python | |
3f5a752a7978c2432ce3106492d771c00a5f1279 | Create geo.py | voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts | geo.py | geo.py | import requests
def example():
# grab some lat/long coords from wherever. For this example,
# I just opened a javascript console in the browser and ran:
#
# navigator.geolocation.getCurrentPosition(function(p) {
# console.log(p);
# })
#
latitude = 35.1330343
longitude = -90.0625056
# Did the geocoding request comes from a device with a
# location sensor? Must be either true or false.
sensor = 'true'
# Hit Google's reverse geocoder directly
# NOTE: I *think* their terms state that you're supposed to
# use google maps if you use their api for anything.
base = "http://maps.googleapis.com/maps/api/geocode/json?"
params = "latlng={lat},{lon}&sensor={sen}".format(
lat=latitude,
lon=longitude,
sen=sensor
)
url = "{base}{params}".format(base=base, params=params)
response = requests.get(url)
return response.json['results'][0]['formatted_address']
| mit | Python | |
30debe34005280517f56795e1f0852dccf3cb7f2 | Add hip module computing hipster rank for a route | kynan/GetLost | hip.py | hip.py | import numpy as np
from numpy import sin, cos, sqrt
import pandas as pd
from sklearn.neighbors import KDTree
fs_df = pd.read_csv('fs.csv')
fs_df.lat = fs_df.lat.apply(float)
fs_df.lng = fs_df.lng.apply(float)
def get_nearby(start, end, dist_meters=50):
x0, y0 = start
x1, y1 = end
dx, dy = x1 - x0, y1 - y0
m0 = dy / dx
c0 = y0 - m0 * x0
m1 = - dx / dy
c1 = (m0 - m1) * x0 + c0
m0 = dy / dx
c0 = y1 - m0 * x1
m1 = - dx / dy
c2 = m0 * x1 - m1 * x1 + c0
theta0 = np.arctan2(dy, dx)
t = dist_meters / 1000.0 / 111.0
_y = fs_df.lng
_x = fs_df.lat
if m0 > 0:
idx = (_y > (m0*(_x - t*cos(theta0)) + c0 - t*sin(theta0))) &\
(_y < (m0*(_x + t*cos(theta0)) + c0 + t*sin(theta0))) &\
(_y < (m1*(_x - t*sin(theta0)) + c2 + t*cos(theta0))) &\
(_y > m1*(_x + t*sin(theta0)) + c1 - t*cos(theta0))
else:
idx = (_y < (m0*(_x - t*cos(theta0)) + c0 - t*sin(theta0))) &\
(_y > (m0*(_x + t*cos(theta0)) + c0 + t*sin(theta0))) &\
(_y < (m1*(_x - t*sin(theta0)) + c2 + t*cos(theta0))) &\
(_y > m1*(_x + t*sin(theta0)) + c1 - t*cos(theta0))
return idx
def get_points(start, end, stride):
x0, y0 = start
x1, y1 = end
Dx, Dy = x1 - x0, y1 - y0
Dh = sqrt(Dx**2 + Dy**2)
H = np.arange(0.0, Dh, stride)
theta = np.arctan(Dy/Dx)
dX = H*cos(theta)
dY = H*sin(theta)
X = dX + x0
Y = dY + y0
return zip(X, Y)
def get_hip_rank(points, sub):
sub_coords = sub[['lat', 'lng']].values
sub_scores = sub.checkinsCount.apply(int).values
kdt = KDTree(sub_coords, metric='euclidean')
d, i = kdt.query(np.array(points), k=10)
return (sub_scores[i] / d**2 * 1e-11).sum(axis=1)
def get_ranking_array(coords):
indexes = []
points = []
for start_coord, end_coord in zip(coords[:-1], coords[1:]):
indexes.append(get_nearby(start_coord, end_coord, dist_meters=300))
points.extend(get_points(start_coord, end_coord, 1e-5))
sub = fs_df[reduce(lambda a, b: a | b, indexes)]
hip_rank = get_hip_rank(points, sub)
return hip_rank, hip_rank.sum()
| apache-2.0 | Python | |
1110311ef90a45497af4cdfb8558d1b05fc799d0 | add a script to run the server | DevMine/devmine-core | run.py | run.py | #!/usr/bin/env python
# coding: utf-8
import bottle
from logging import info
from devmine import Devmine
from devmine.config import (
environment,
settings
)
def main():
info('Devmine server started')
db_url = settings.db_url
server = settings.server
if not db_url:
db_url = environment.db_url
if not server:
server = environment.server
info('\nServer settings:\n'
'server = %s\n'
'host = %s\n'
'port = %s\n'
'db_url = %s\n'
'db_echo = %s\n'
'reloader = %s\n'
'debug = %s\n',
server,
settings.host,
settings.port,
db_url,
environment.db_echo,
environment.reloader,
environment.debug)
a = Devmine(
server=server,
host=settings.host,
port=settings.port,
db_url=db_url,
db_echo=environment.db_echo,
reloader=environment.reloader,
debug=environment.debug
)
bottle.run(
a.app,
server=a.server_type,
reloader=a.reloader,
host=a.host,
port=a.port
)
if __name__ == "__main__":
main()
| bsd-3-clause | Python | |
e007695e38b2207c9229856c95f37a12e740cb91 | Add view demographics tests | renalreg/radar,renalreg/radar,renalreg/radar,renalreg/radar | radar/tests/permissions/test_can_user_view_demographics.py | radar/tests/permissions/test_can_user_view_demographics.py | from radar.permissions import can_user_view_demographics
from radar.roles import COHORT_RESEARCHER, COHORT_SENIOR_RESEARCHER, ORGANISATION_CLINICIAN
from helpers.permissions import make_cohorts, make_user, make_patient, make_organisations
def test_admin():
patient = make_patient()
user = make_user()
assert not can_user_view_demographics(user, patient)
user.is_admin = True
assert can_user_view_demographics(user, patient)
def test_intersecting_cohorts_with_view_demographics_permission():
cohorts = make_cohorts(3)
cohort_a, cohort_b, cohort_c = cohorts
patient = make_patient(cohorts=cohorts)
user = make_user(cohorts=[cohort_a, [cohort_b, COHORT_SENIOR_RESEARCHER], cohort_c])
assert can_user_view_demographics(user, patient)
def test_intersecting_cohorts_without_view_demographics_permission():
cohort_a, cohort_b = make_cohorts(2)
patient = make_patient(cohorts=[cohort_a])
user = make_user(cohorts=[[cohort_b, COHORT_RESEARCHER]])
assert not can_user_view_demographics(user, patient)
def test_disjoint_cohorts_with_view_demographics_permission():
cohort_a, cohort_b = make_cohorts(2)
patient = make_patient(cohorts=[cohort_a])
user = make_user(cohorts=[[cohort_b, COHORT_SENIOR_RESEARCHER]])
assert not can_user_view_demographics(user, patient)
def test_intersecting_organisations_with_view_demographics_permission():
organisations = make_organisations(3)
organisation_a, organisation_b, organisation_c = organisations
patient = make_patient(organisations=organisations)
user = make_user(organisations=[organisation_a, [organisation_b, ORGANISATION_CLINICIAN], organisation_c])
assert can_user_view_demographics(user, patient)
def test_intersecting_organisations_without_view_demographics_permission():
organisations = make_organisations(3)
patient = make_patient(organisations=organisations)
user = make_user(organisations=organisations)
assert not can_user_view_demographics(user, patient)
def test_disjoint_organisations_with_view_demographics_permission():
organisation_a, organisation_b = make_organisations(2)
patient = make_patient(organisations=[organisation_a])
user = make_user(cohorts=[[organisation_b, ORGANISATION_CLINICIAN]])
assert not can_user_view_demographics(user, patient)
| agpl-3.0 | Python | |
a46f2b8e42852b3c51d31c9402328c82d5d1f78c | Create new package. (#8144) | mfherbst/spack,mfherbst/spack,tmerrick1/spack,iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,mfherbst/spack,mfherbst/spack,krafczyk/spack,matthiasdiener/spack,matthiasdiener/spack,iulian787/spack,iulian787/spack,tmerrick1/spack,matthiasdiener/spack,tmerrick1/spack,krafczyk/spack,krafczyk/spack,iulian787/spack,krafczyk/spack,matthiasdiener/spack,tmerrick1/spack,matthiasdiener/spack,mfherbst/spack,LLNL/spack,tmerrick1/spack,krafczyk/spack,iulian787/spack | var/spack/repos/builtin/packages/swap-assembler/package.py | var/spack/repos/builtin/packages/swap-assembler/package.py | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class SwapAssembler(MakefilePackage):
"""A scalable and fully parallelized genome assembler."""
homepage = "https://sourceforge.net/projects/swapassembler/"
url = "https://sourceforge.net/projects/swapassembler/files/SWAP_Assembler-0.4.tar.bz2/download"
version('0.4', '944f2aeae4f451be81160bb625304fc3')
depends_on('mpich')
def edit(self, spec, prefix):
makefile = FileFilter('Makefile')
makefile.filter('$(CC) -O2', '$(CC) -pthread -O2', string=True)
def install(self, spec, prefix):
install_tree('.', prefix.bin)
| lgpl-2.1 | Python | |
2904992eb431ac4a92442ccb1fcff5715ae8c7fa | add migrations for new policy parameters | OpenSourcePolicyCenter/webapp-public,OpenSourcePolicyCenter/webapp-public,OpenSourcePolicyCenter/webapp-public,OpenSourcePolicyCenter/PolicyBrain,OpenSourcePolicyCenter/PolicyBrain,OpenSourcePolicyCenter/webapp-public,OpenSourcePolicyCenter/PolicyBrain,OpenSourcePolicyCenter/PolicyBrain | webapp/apps/taxbrain/migrations/0035_auto_20161110_1624.py | webapp/apps/taxbrain/migrations/0035_auto_20161110_1624.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import webapp.apps.taxbrain.models
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0034_auto_20161004_1953'),
]
operations = [
migrations.AddField(
model_name='taxsaveinputs',
name='CG_nodiff',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
),
migrations.AddField(
model_name='taxsaveinputs',
name='CTC_new_refund_limit_rt',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
),
migrations.AddField(
model_name='taxsaveinputs',
name='EITC_indiv',
field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),
),
]
| mit | Python | |
b068e4f8c3e5e8d7a0f1c45d5f1b6ac424b44153 | Make validate recipients to ignore empty values | VinnieJohns/ggrc-core,kr41/ggrc-core,VinnieJohns/ggrc-core,josthkko/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core,edofic/ggrc-core,j0gurt/ggrc-core,selahssea/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,plamut/ggrc-core | src/ggrc/models/comment.py | src/ggrc/models/comment.py | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: andraz@reciprocitylabs.com
# Maintained By: andraz@reciprocitylabs.com
"""Module containing comment model and comment related mixins."""
from sqlalchemy.orm import validates
from ggrc import db
from ggrc.models.mixins import Base
from ggrc.models.mixins import Described
from ggrc.models.object_document import Documentable
from ggrc.models.relationship import Relatable
class Commentable(object):
"""Mixin for commentable objects.
This is a mixin for adding default options to objects on which people can
comment.
recipients is used for setting who gets notified (Verifer, Requester, ...).
send_by_default should be used for setting the "send notification" flag in
the comment modal.
"""
# pylint: disable=too-few-public-methods
VALID_RECIPIENTS = frozenset([
"Assessor",
"Assignee",
"Creator",
"Requester",
"Verifier",
])
@validates("recipients")
def validate_recipients(self, key, value):
"""
Validate recipients list
Args:
value (string): Can be either empty, or
list of comma separated `VALID_RECIPIENTS`
"""
# pylint: disable=unused-argument
if value:
value = set(name for name in value.split(",") if name)
if value and value.issubset(self.VALID_RECIPIENTS):
# The validator is a bit more smart and also makes some filtering of the
# given data - this is intended.
return ",".join(value)
elif not value:
return ""
else:
raise ValueError(value,
'Value should be either empty ' +
'or comma separated list of ' +
', '.join(sorted(self.VALID_RECIPIENTS))
)
recipients = db.Column(db.String, nullable=True)
send_by_default = db.Column(db.Boolean)
_publish_attrs = [
"recipients",
"send_by_default",
]
_aliases = {
"recipients": "Recipients",
"send_by_default": "Send by default",
}
class Comment(Relatable, Described, Documentable, Base, db.Model):
"""Basic comment model."""
__tablename__ = "comments"
assignee_type = db.Column(db.String)
# REST properties
_publish_attrs = [
"assignee_type",
]
_sanitize_html = [
"description",
]
| # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: andraz@reciprocitylabs.com
# Maintained By: andraz@reciprocitylabs.com
"""Module containing comment model and comment related mixins."""
from sqlalchemy.orm import validates
from ggrc import db
from ggrc.models.mixins import Base
from ggrc.models.mixins import Described
from ggrc.models.object_document import Documentable
from ggrc.models.relationship import Relatable
class Commentable(object):
"""Mixin for commentable objects.
This is a mixin for adding default options to objects on which people can
comment.
recipients is used for setting who gets notified (Verifer, Requester, ...).
send_by_default should be used for setting the "send notification" flag in
the comment modal.
"""
VALID_RECIPIENTS = frozenset([
"Assessor",
"Assignee",
"Creator",
"Requester",
"Verifier",
])
@validates("recipients")
def validate_recipients(self, key, value):
"""
Validate recipients list
Args:
value (string): Can be either empty, or
list of comma separated `VALID_RECIPIENTS`
"""
# pylint: disable=unused-argument
if value and set(value.split(',')).issubset(self.VALID_RECIPIENTS):
return value
elif not value:
return ""
else:
raise ValueError(value,
'Value should be either empty ' +
'or comma separated list of ' +
', '.join(sorted(self.VALID_RECIPIENTS))
)
recipients = db.Column(db.String, nullable=True)
send_by_default = db.Column(db.Boolean)
_publish_attrs = [
"recipients",
"send_by_default",
]
_aliases = {
"recipients": "Recipients",
"send_by_default": "Send by default",
}
class Comment(Relatable, Described, Documentable, Base, db.Model):
"""Basic comment model."""
__tablename__ = "comments"
assignee_type = db.Column(db.String)
# REST properties
_publish_attrs = [
"assignee_type",
]
_sanitize_html = [
"description",
]
| apache-2.0 | Python |
8840340bbd8310cf03f12accbb51dd81921ccf86 | Fix use of `format` for unicode | kr41/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core,vladan-m/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core,plamut/ggrc-core,jmakov/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,vladan-m/ggrc-core,jmakov/ggrc-core,vladan-m/ggrc-core,hasanalom/ggrc-core,vladan-m/ggrc-core,hasanalom/ggrc-core,hyperNURb/ggrc-core,uskudnik/ggrc-core,uskudnik/ggrc-core,uskudnik/ggrc-core,edofic/ggrc-core,prasannav7/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,jmakov/ggrc-core,hasanalom/ggrc-core,VinnieJohns/ggrc-core,vladan-m/ggrc-core,prasannav7/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,hyperNURb/ggrc-core,edofic/ggrc-core,edofic/ggrc-core,hyperNURb/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,prasannav7/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,uskudnik/ggrc-core,hasanalom/ggrc-core,jmakov/ggrc-core,selahssea/ggrc-core,andrei-karalionak/ggrc-core,plamut/ggrc-core,hyperNURb/ggrc-core,josthkko/ggrc-core,hyperNURb/ggrc-core,plamut/ggrc-core,hasanalom/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,plamut/ggrc-core,jmakov/ggrc-core,uskudnik/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core | src/ggrc/models/request.py | src/ggrc/models/request.py | # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: vraj@reciprocitylabs.com
from ggrc import db
from .mixins import deferred, Base, Described
class Request(Described, Base, db.Model):
__tablename__ = 'requests'
VALID_TYPES = (u'documentation', u'interview', u'population sample')
VALID_STATES = (u'Draft', u'Requested', u'Responded', u'Amended Request',
u'Updated Response', u'Accepted')
assignee_id = db.Column(db.Integer, db.ForeignKey('people.id'),
nullable=False)
assignee = db.relationship('Person')
request_type = deferred(db.Column(db.Enum(*VALID_TYPES), nullable=False),
'Request')
status = deferred(db.Column(db.Enum(*VALID_STATES), nullable=False),
'Request')
requested_on = deferred(db.Column(db.Date, nullable=False), 'Request')
due_on = deferred(db.Column(db.Date, nullable=False), 'Request')
audit_id = db.Column(db.Integer, db.ForeignKey('audits.id'), nullable=False)
objective_id = db.Column(db.Integer, db.ForeignKey('objectives.id'),
nullable=False)
gdrive_upload_path = deferred(db.Column(db.String, nullable=True),
'Request')
test = deferred(db.Column(db.Text, nullable=True), 'Request')
notes = deferred(db.Column(db.Text, nullable=True), 'Request')
auditor_contact = deferred(db.Column(db.String, nullable=True), 'Request')
responses = db.relationship('Response', backref='request',
cascade='all, delete-orphan')
_publish_attrs = [
'assignee',
'request_type',
'gdrive_upload_path',
'requested_on',
'due_on',
'status',
'audit',
'objective',
'responses',
'test',
'notes',
'auditor_contact',
]
_sanitize_html = [
'gdrive_upload_path',
'test',
'notes',
'auditor_contact',
]
def _display_name(self):
return u'Request with id={0} for Audit "{1}"'.format(
self.id, self.audit.display_name)
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(Request, cls).eager_query()
return query.options(
orm.joinedload('audit'),
orm.joinedload('objective'),
orm.subqueryload('responses'))
| # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: vraj@reciprocitylabs.com
from ggrc import db
from .mixins import deferred, Base, Described
class Request(Described, Base, db.Model):
__tablename__ = 'requests'
VALID_TYPES = (u'documentation', u'interview', u'population sample')
VALID_STATES = (u'Draft', u'Requested', u'Responded', u'Amended Request',
u'Updated Response', u'Accepted')
assignee_id = db.Column(db.Integer, db.ForeignKey('people.id'),
nullable=False)
assignee = db.relationship('Person')
request_type = deferred(db.Column(db.Enum(*VALID_TYPES), nullable=False),
'Request')
status = deferred(db.Column(db.Enum(*VALID_STATES), nullable=False),
'Request')
requested_on = deferred(db.Column(db.Date, nullable=False), 'Request')
due_on = deferred(db.Column(db.Date, nullable=False), 'Request')
audit_id = db.Column(db.Integer, db.ForeignKey('audits.id'), nullable=False)
objective_id = db.Column(db.Integer, db.ForeignKey('objectives.id'),
nullable=False)
gdrive_upload_path = deferred(db.Column(db.String, nullable=True),
'Request')
test = deferred(db.Column(db.Text, nullable=True), 'Request')
notes = deferred(db.Column(db.Text, nullable=True), 'Request')
auditor_contact = deferred(db.Column(db.String, nullable=True), 'Request')
responses = db.relationship('Response', backref='request',
cascade='all, delete-orphan')
_publish_attrs = [
'assignee',
'request_type',
'gdrive_upload_path',
'requested_on',
'due_on',
'status',
'audit',
'objective',
'responses',
'test',
'notes',
'auditor_contact',
]
_sanitize_html = [
'gdrive_upload_path',
'test',
'notes',
'auditor_contact',
]
def _display_name(self):
return 'Request with id={0} for Audit "{1}"'.format(
self.id, self.audit.display_name)
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(Request, cls).eager_query()
return query.options(
orm.joinedload('audit'),
orm.joinedload('objective'),
orm.subqueryload('responses'))
| apache-2.0 | Python |
dbc761530b77c606038f62ed498c192b67321e8f | Test co2 load_data for Python 3. | nvoron23/statsmodels,wwf5067/statsmodels,wkfwkf/statsmodels,hainm/statsmodels,jstoxrocky/statsmodels,musically-ut/statsmodels,musically-ut/statsmodels,ChadFulton/statsmodels,ChadFulton/statsmodels,ChadFulton/statsmodels,bert9bert/statsmodels,cbmoore/statsmodels,alekz112/statsmodels,bsipocz/statsmodels,bashtage/statsmodels,phobson/statsmodels,waynenilsen/statsmodels,gef756/statsmodels,saketkc/statsmodels,wdurhamh/statsmodels,ChadFulton/statsmodels,gef756/statsmodels,DonBeo/statsmodels,hlin117/statsmodels,bzero/statsmodels,YihaoLu/statsmodels,DonBeo/statsmodels,kiyoto/statsmodels,hlin117/statsmodels,bert9bert/statsmodels,hlin117/statsmodels,wzbozon/statsmodels,cbmoore/statsmodels,bzero/statsmodels,edhuckle/statsmodels,gef756/statsmodels,wzbozon/statsmodels,alekz112/statsmodels,jstoxrocky/statsmodels,wzbozon/statsmodels,yl565/statsmodels,bsipocz/statsmodels,astocko/statsmodels,detrout/debian-statsmodels,detrout/debian-statsmodels,saketkc/statsmodels,alekz112/statsmodels,hainm/statsmodels,kiyoto/statsmodels,waynenilsen/statsmodels,josef-pkt/statsmodels,ChadFulton/statsmodels,jseabold/statsmodels,phobson/statsmodels,jstoxrocky/statsmodels,adammenges/statsmodels,bashtage/statsmodels,adammenges/statsmodels,josef-pkt/statsmodels,bzero/statsmodels,kiyoto/statsmodels,phobson/statsmodels,YihaoLu/statsmodels,bashtage/statsmodels,statsmodels/statsmodels,musically-ut/statsmodels,statsmodels/statsmodels,detrout/debian-statsmodels,astocko/statsmodels,wkfwkf/statsmodels,jseabold/statsmodels,yl565/statsmodels,alekz112/statsmodels,jseabold/statsmodels,wzbozon/statsmodels,Averroes/statsmodels,hainm/statsmodels,statsmodels/statsmodels,musically-ut/statsmodels,nguyentu1602/statsmodels,yl565/statsmodels,phobson/statsmodels,wzbozon/statsmodels,nguyentu1602/statsmodels,josef-pkt/statsmodels,bsipocz/statsmodels,wkfwkf/statsmodels,rgommers/statsmodels,bert9bert/statsmodels,josef-pkt/statsmodels,gef756/statsmodels,wwf5067/statsmodels,nguyentu1602/statsmodels,yl565/statsmodels,huongttlan/statsmodels,edhuckle/statsmodels,wdurhamh/statsmodels,gef756/statsmodels,wdurhamh/statsmodels,edhuckle/statsmodels,cbmoore/statsmodels,kiyoto/statsmodels,astocko/statsmodels,bashtage/statsmodels,josef-pkt/statsmodels,DonBeo/statsmodels,astocko/statsmodels,saketkc/statsmodels,bert9bert/statsmodels,nvoron23/statsmodels,bert9bert/statsmodels,wdurhamh/statsmodels,bzero/statsmodels,nvoron23/statsmodels,bsipocz/statsmodels,waynenilsen/statsmodels,cbmoore/statsmodels,hlin117/statsmodels,YihaoLu/statsmodels,statsmodels/statsmodels,yl565/statsmodels,Averroes/statsmodels,statsmodels/statsmodels,waynenilsen/statsmodels,rgommers/statsmodels,nvoron23/statsmodels,Averroes/statsmodels,rgommers/statsmodels,wdurhamh/statsmodels,YihaoLu/statsmodels,adammenges/statsmodels,statsmodels/statsmodels,saketkc/statsmodels,josef-pkt/statsmodels,edhuckle/statsmodels,phobson/statsmodels,rgommers/statsmodels,jseabold/statsmodels,saketkc/statsmodels,bashtage/statsmodels,rgommers/statsmodels,jstoxrocky/statsmodels,Averroes/statsmodels,hainm/statsmodels,detrout/debian-statsmodels,DonBeo/statsmodels,kiyoto/statsmodels,wwf5067/statsmodels,huongttlan/statsmodels,adammenges/statsmodels,huongttlan/statsmodels,bashtage/statsmodels,wwf5067/statsmodels,nvoron23/statsmodels,cbmoore/statsmodels,huongttlan/statsmodels,nguyentu1602/statsmodels,bzero/statsmodels,ChadFulton/statsmodels,wkfwkf/statsmodels,DonBeo/statsmodels,jseabold/statsmodels,YihaoLu/statsmodels,edhuckle/statsmodels,wkfwkf/statsmodels | statsmodels/datasets/tests/test_data.py | statsmodels/datasets/tests/test_data.py | from statsmodels.datasets import co2
def test_co2_python3():
# this failed in pd.to_datetime on Python 3 with pandas <= 0.12.0
dta = co2.load_pandas()
| bsd-3-clause | Python | |
299bb8bccf8485a12bc341cb45b2d2c82771f5dd | add pentagon import | xzrunner/easyeditor,xzrunner/easyeditor | pentagon_import/gen_pentagon.py | pentagon_import/gen_pentagon.py | import os
import math
SRC_POS = '46,562, 82,562, 82,526, 46,526'
FILE_NAME = '角色5围表.csv'
OUTPUT = 'pentagon.lua'
EDGE = 300
INDEX = 19
START_ID = 0
def rotate(src, rad, dst):
dst[0] = src[0] * math.cos(rad) - src[1] * math.sin(rad)
dst[1] = src[0] * math.sin(rad) + src[1] * math.cos(rad)
def gen_vertex(edge, data, vertex):
unit = math.pi*2/5
angle = math.pi*0.5
src = [edge, 0]
i = 0
while i < 5:
dst = [0, 0]
rotate(src, angle, dst)
vertex[i*2] = dst[0]*float(data[i])
vertex[i*2+1] = -dst[1]*float(data[i])
angle += unit
i += 1
def write2file(files, vertex):
global START_ID
files.write('picture {\n')
files.write('\tid = '+str(START_ID)+',\n')
START_ID += 1
i = 0
while i < 5:
files.write('\t{ tex = 0, src = { '+SRC_POS+' }, screen = { ')
files.write('%d, %d, %d, %d, %d, %d, %d, %d' % (vertex[i*2]*16, vertex[i*2+1]*16, vertex[((i+1)%5)*2]*16, vertex[((i+1)%5)*2+1]*16, 0, 0, 0, 0))
files.write(" } },\n")
i += 1
files.write('}\n')
def write_anim(files, name, start_id, size):
global START_ID
files.write('animation {\n')
files.write('\texport = \"'+name+'\",\n')
files.write('\tid = '+str(START_ID)+',\n')
START_ID += 1
# components
files.write('\tcomponent = {\n')
i = 0
while i < size:
files.write('\t\t{ id = '+str(start_id+i)+', },\n')
i += 1
files.write('\t},\n')
# frames
files.write('\t{\n')
i = 0
while i < size:
files.write('\t\t{\n')
files.write('\t\t\t{index = '+str(i)+', mat = {1024, 0, 0, 1024, 0, 0}},\n')
files.write('\t\t},\n')
i += 1
files.write('\t},\n')
files.write('}\n')
def gen_pentagon():
files = open(OUTPUT, 'w')
skip = 0
name = ''
start_id = 0
for line in open(FILE_NAME):
# skip 2 lines
skip += 1
if skip <= 2:
continue
# remove '\n'
items = line[:-1].split(',')
# print anim
if items[0] != name and items[0] != '':
if name != '':
write_anim(files, name, start_id, START_ID-start_id)
name = items[0]
start_id = START_ID
# print picture
data = [0]*5
i = 0
while i < 5:
data[i] = items[INDEX+i]
i += 1
vertex = [0]*10
gen_vertex(EDGE, data, vertex)
write2file(files, vertex)
gen_pentagon() | mit | Python | |
efc935b030750c26e24217d5f97dde1dc8a7ea66 | add script to clone mvn dependency to local from gradle | passos/scripts,passos/scripts,passos/scripts | python/mirror-mvn-dependency.py | python/mirror-mvn-dependency.py | #!/usr/bin/python
"""
This script is used to make a mirror maven repository from a gradle build
1. make sure your project can be build correctly
2. run this script in your project root directory
3. add following code to your gradle file
buildscript {
repositories {
maven { url "file://${rootProject.projectDir}/maven_local/" }
}
dependencies {
classpath 'com.android.tools.build:gradle:2.1.3'
classpath 'io.dator:packageinfo:1.0-SNAPSHOT'
classpath 'io.dator:staticcheck:1.0-SNAPSHOT'
}
}
"""
import sys
import os
import subprocess
import glob
import shutil
def main(argv):
project_dir = os.path.dirname(os.path.realpath(__file__))
repo_dir = os.path.join(project_dir, "maven_local")
temp_home = os.path.join(project_dir, ".gradle_home")
if not os.path.isdir(temp_home):
os.makedirs(temp_home)
if os.path.isdir(repo_dir):
shutil.rmtree(repo_dir)
subprocess.call(["gradle", "-g", temp_home, "-Dbuild.network_access=allow"])
cache_files = os.path.join(temp_home, "caches/modules-*/files-*")
for cache_dir in glob.glob(cache_files):
for cache_group_id in os.listdir(cache_dir):
cache_group_dir = os.path.join(cache_dir, cache_group_id)
repo_group_dir = os.path.join(repo_dir, cache_group_id.replace('.', '/'))
for cache_artifact_id in os.listdir(cache_group_dir):
cache_artifact_dir = os.path.join(cache_group_dir, cache_artifact_id)
repo_artifact_dir = os.path.join(repo_group_dir, cache_artifact_id)
for cache_version_id in os.listdir(cache_artifact_dir):
cache_version_dir = os.path.join(cache_artifact_dir, cache_version_id)
repo_version_dir = os.path.join(repo_artifact_dir, cache_version_id)
if not os.path.isdir(repo_version_dir):
os.makedirs(repo_version_dir)
cache_items = os.path.join(cache_version_dir, "*/*")
for cache_item in glob.glob(cache_items):
cache_item_name = os.path.basename(cache_item)
repo_item_path = os.path.join(repo_version_dir, cache_item_name)
print "%s:%s:%s (%s)" % (cache_group_id, cache_artifact_id, cache_version_id, cache_item_name)
shutil.copyfile(cache_item, repo_item_path)
shutil.rmtree(temp_home)
print "repo location: %s" % (repo_dir)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| apache-2.0 | Python | |
1b2c67a0d4a237ce56dc40616b1a023b515aee0f | add setup.py | waliens/sldc | sldc/setup.py | sldc/setup.py | from distutils.core import setup
setup(name="sldc",
version="1.0",
description="Segment Locate Dispatch Classify workflow",
author="Romain Mormont",
author_email="romain.mormont@gmail.com",
)
| mit | Python | |
379d8b1fb828918f8d77b0acf0e270eb94e650e5 | Add example for `adapt_rgb` | paalge/scikit-image,keflavich/scikit-image,warmspringwinds/scikit-image,oew1v07/scikit-image,paalge/scikit-image,michaelaye/scikit-image,ajaybhat/scikit-image,GaZ3ll3/scikit-image,Hiyorimi/scikit-image,WarrenWeckesser/scikits-image,michaelpacer/scikit-image,youprofit/scikit-image,emon10005/scikit-image,Britefury/scikit-image,juliusbierk/scikit-image,Britefury/scikit-image,Hiyorimi/scikit-image,pratapvardhan/scikit-image,rjeli/scikit-image,juliusbierk/scikit-image,warmspringwinds/scikit-image,vighneshbirodkar/scikit-image,michaelpacer/scikit-image,Midafi/scikit-image,chriscrosscutler/scikit-image,oew1v07/scikit-image,jwiggins/scikit-image,youprofit/scikit-image,vighneshbirodkar/scikit-image,vighneshbirodkar/scikit-image,ClinicalGraphics/scikit-image,bsipocz/scikit-image,ClinicalGraphics/scikit-image,dpshelio/scikit-image,GaZ3ll3/scikit-image,michaelaye/scikit-image,keflavich/scikit-image,robintw/scikit-image,jwiggins/scikit-image,ofgulban/scikit-image,bennlich/scikit-image,newville/scikit-image,rjeli/scikit-image,pratapvardhan/scikit-image,rjeli/scikit-image,bennlich/scikit-image,dpshelio/scikit-image,blink1073/scikit-image,paalge/scikit-image,blink1073/scikit-image,robintw/scikit-image,emon10005/scikit-image,Midafi/scikit-image,newville/scikit-image,ofgulban/scikit-image,bsipocz/scikit-image,ajaybhat/scikit-image,chriscrosscutler/scikit-image,WarrenWeckesser/scikits-image,ofgulban/scikit-image | doc/examples/plot_adapt_rgb.py | doc/examples/plot_adapt_rgb.py | """
=========================================
Adapting gray-scale filters to RGB images
=========================================
There are many filters that are designed work with gray-scale images but not
color images. To simplify the process of creating functions that can adapt to
RGB images, scikit-image provides the ``adapt_rgb`` decorator.
To actually use the ``adapt_rgb`` decorator, you have to decide how you want to
adapt the RGB image for use with the gray-scale filter. There are two
pre-defined handlers:
``each_channel``
Pass each of the RGB channels to the filter one-by-one, and stitch the
results back into an RGB image.
``hsv_value``
Convert the RGB image to HSV and pass the value channel to the filter.
The filtered result is inserted back into the HSV image and converted
back to RGB.
Below, we demonstrate the use of ``adapt_rgb`` on a couple of gray-scale
filters:
"""
from skimage.color.adapt_rgb import adapt_rgb, each_channel, hsv_value
from skimage import filter
@adapt_rgb(each_channel)
def sobel_each(image):
return filter.sobel(image)
@adapt_rgb(hsv_value)
def sobel_hsv(image):
return filter.sobel(image)
"""
We can use these functions as we would normally use them, but now they work
with both gray-scale and color images. Let's plot the results with a color
image:
"""
from skimage import data
import matplotlib.pyplot as plt
image = data.lena()
fig, (ax_each, ax_hsv) = plt.subplots(ncols=2)
ax_each.imshow(sobel_each(image))
ax_hsv.imshow(sobel_hsv(image))
"""
.. image:: PLOT2RST.current_figure
Notice that the result for the value-filtered image preserves the color of the
original image, but channel filtered image combines in a more surprising way.
In other common cases, smoothing for example, the channel filtered image will
produce a better result than the value-filtered image.
You can also create your own handler functions for ``adapt_rgb``. To do so,
just create a function with the following signature::
def handler(image_filter, image, *args, **kwargs):
# Manipulate RGB image here...
image = image_filter(image, *args, **kwargs)
# Manipulate filtered image here...
return image
Note that ``adapt_rgb`` handlers are written for filters where the image is the
first argument.
As a very simple example, we can just convert any RGB image to grayscale and
then return the filtered result:
"""
from skimage.color import rgb2gray
def as_gray(image_filter, image, *args, **kwargs):
gray_image = rgb2gray(image)
return image_filter(gray_image, *args, **kwargs)
"""
It's important to create a signature that uses ``*args`` and ``**kwargs`` to
pass arguments along to the filter so that the decorated function is allowed to
have any number of positional and keyword arguments.
Finally, we can use this handler with ``adapt_rgb`` just as before:
"""
@adapt_rgb(as_gray)
def sobel_gray(image):
return filter.sobel(image)
fig, ax = plt.subplots()
ax.imshow(sobel_gray(image))
plt.show()
"""
.. image:: PLOT2RST.current_figure
.. note::
A very simple check of the array shape is used for detecting RGB images, so
``adapt_rgb`` is not recommended for functions that support 3D volumes or
color images in non-RGB spaces.
"""
| bsd-3-clause | Python | |
bb065a747215b6665eec78c5141b0a0d82296dac | Add migration to replace '<removed>' with '<removed>@{uuid}.com'.format(uuid=str(uuid4())) in contact_information.email to pass validation | alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api | migrations/versions/1400_repair_contact_information_emails_post_data_retention_removal.py | migrations/versions/1400_repair_contact_information_emails_post_data_retention_removal.py | """Replace '<removed>' with '<removed>@{uuid}.com'.format(uuid=str(uuid4())) in contact_information to pass validation.
Revision ID: 1400
Revises: 1390
Create Date: 2019-10-29 09:09:00.000000
"""
from uuid import uuid4
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
# revision identifiers, used by Alembic.
revision = '1400'
down_revision = '1390'
contact_information = table(
'contact_information',
column('id', sa.INTEGER),
column('email', sa.VARCHAR),
)
def upgrade():
"""Update any contact_information rows where the email is set to '<removed>' to use the UUID email format we use
on the user object in User.remove_personal_data
Loop over the ids so we get a unique UUID for each update.
"""
conn = op.get_bind()
# SELECT id FROM contact_information WHERE email = '<removed>';
query = contact_information.select().where(
contact_information.c.email == '<removed>'
).with_only_columns(
(contact_information.c.id,)
)
ci_ids = (ci_id for ci_id, in conn.execute(query).fetchall())
for ci_id in ci_ids:
# UPDATE contact_information SET email = '<removed>@uuid-etc.com' WHERE id = <ci_id>;
query = contact_information.update().where(
contact_information.c.id == ci_id
).values(
email='<removed>@{uuid}.com'.format(uuid=str(uuid4()))
)
conn.execute(query)
def downgrade():
pass
| mit | Python | |
434827540d4e11254615cd52b7efb36b746f9d0d | Create tf_simple_LR.py | maxlz/ML | tf_simple_LR.py | tf_simple_LR.py | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 1 19:50:54 2016
@author: max
"""
import tensorflow as tf
import numpy as np
import matplotlib.pylab as m
x_data = np.linspace(0.0,1.0,num = 500,dtype='float32')
x_data = np.reshape(x_data,(500,))
y_data = np.linspace(0.0,1.0,num = 500,dtype='float32')
y_data = y_data + np.random.rand(500,)/10+1
x = tf.placeholder(dtype='float32')
y = tf.placeholder(dtype='float32')
W = tf.Variable(tf.random_uniform((1,1),-1,1),name='W',dtype='float32')
b = tf.Variable(tf.random_uniform((1,1),-1,1),name='b',dtype='float32')
Y = W*x+b
loss = tf.reduce_mean(tf.square(Y-y))
opt = tf.train.RMSPropOptimizer(0.01).minimize(loss)
init = tf.initialize_all_variables()
ses = tf.Session()
ses.run(init)
for i in range(1000):
ses.run(opt,feed_dict={x:x_data,y:y_data})
if i%50 == True:
print loss.eval(session = ses,feed_dict={x:x_data,y:y_data})
B = b.eval(session = ses)
final_w = W.eval(session = ses)
print B,final_w
final_y = np.multiply(x_data,final_w) + B
final_y = np.reshape(final_y,(500,))
m.plot(x_data,final_y)
m.plot(x_data,y_data,'r+')
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.