commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
b67dd1f240e913c3423bd4c382852b7c234b487c | Support the 'localtime_into_day' type | dtcooper/python-fitparse,pR0Ps/python-fitparse | fitparse/processors.py | fitparse/processors.py | import datetime
class FitFileDataProcessor(object):
# TODO: Document API
#def process_type_<type_name> (field_data)
#def process_field_<field_name> (field_data) -- can be unknown_DD but NOT recommended
#def process_message_<mesg_name / mesg_type_num> (data_message)
def process_type_bool(self, field_data):
if field_data.value is not None:
field_data.value = bool(field_data.value)
def process_type_date_time(self, field_data):
value = field_data.value
if value is not None and value >= 0x10000000:
field_data.value = datetime.datetime.utcfromtimestamp(631065600 + value)
field_data.units = None # Units were 's', set to None
def process_type_local_date_time(self, field_data):
if field_data.value is not None:
field_data.value = datetime.datetime.fromtimestamp(631065600 + field_data.value)
field_data.units = None
def process_type_localtime_into_day(self, field_data):
if field_data.value is not None:
m, s = divmod(field_data.value, 60)
h, m = divmod(m, 60)
field_data.value = datetime.time(h, m, s)
field_data.units = None
class StandardUnitsDataProcessor(FitFileDataProcessor):
# Example use case
def process_field_distance(self, field_data):
if field_data.value is not None:
field_data.value /= 1000.0
field_data.units = 'km'
def process_field_speed(self, field_data):
if field_data.value is not None:
field_data.value *= 60.0 * 60.0 / 1000.0
field_data.units = 'km/h'
def process_units_semicircles(self, field_data):
if field_data.value is not None:
field_data.value *= 180.0 / (2 ** 31)
field_data.units = 'deg'
| import datetime
class FitFileDataProcessor(object):
# TODO: Document API
#def process_type_<type_name> (field_data)
#def process_field_<field_name> (field_data) -- can be unknown_DD but NOT recommended
#def process_message_<mesg_name / mesg_type_num> (data_message)
def process_type_bool(self, field_data):
if field_data.value is not None:
field_data.value = bool(field_data.value)
def process_type_date_time(self, field_data):
value = field_data.value
if value is not None and value >= 0x10000000:
field_data.value = datetime.datetime.utcfromtimestamp(631065600 + value)
field_data.units = None # Units were 's', set to None
def process_type_local_date_time(self, field_data):
if field_data.value is not None:
field_data.value = datetime.datetime.fromtimestamp(631065600 + field_data.value)
field_data.units = None
class StandardUnitsDataProcessor(FitFileDataProcessor):
# Example use case
def process_field_distance(self, field_data):
if field_data.value is not None:
field_data.value /= 1000.0
field_data.units = 'km'
def process_field_speed(self, field_data):
if field_data.value is not None:
field_data.value *= 60.0 * 60.0 / 1000.0
field_data.units = 'km/h'
def process_units_semicircles(self, field_data):
if field_data.value is not None:
field_data.value *= 180.0 / (2 ** 31)
field_data.units = 'deg'
| mit | Python |
09fafb87c4d1230f47900ed2029eeaee8e44f61d | Add dataset extraction from database. | daskol/mipt-classifier,daskol/mipt-classifier,daskol/mipt-classifier | miptclass/dataset.py | miptclass/dataset.py | #!/usr/bin/env python3
# encoding: utf8
# dataset.py
import logging
from itertools import count
from miptclass import models
from numpy import zeros
from operator import itemgetter
from os.path import realpath
from scipy.io import savemat
from scipy.sparse import csr_matrix, lil_matrix
from tqdm import tqdm
DATASET_FILENAME = 'dataset.mat'
def make_features_from_friend_list(friend_ids, freq_friends):
feat = zeros(len(freq_friends), dtype=int)
for friend_id in friend_ids:
idx = freq_friends.get(friend_id, None)
if idx is not None:
feat[idx] = True
return feat
def make_dataset(db, filename=DATASET_FILENAME):
logging.info('start making dataset')
cursor = db.execute("""
SELECT
id
FROM universities
WHERE name LIKE '%ะะคะขะ%';
""")
mipt_ids = frozenset(map(itemgetter('id'), cursor.fetchall()))
logging.info('MIPT university ids: %r', mipt_ids)
logging.info('build the most frequent friends')
cursor = db.execute("""
SELECT
friend_id
FROM user_friends
GROUP BY friend_id
HAVING COUNT(friend_id) > 4;
""")
freq_friend_ids = map(itemgetter('friend_id'), cursor.fetchall())
freq_friends = {
friend_id: i
for i, friend_id in zip(count(), freq_friend_ids)
}
freq_friend_count = len(freq_friends)
logging.info('total %d the most frequent friends', freq_friend_count)
uid_count = db.execute("""
SELECT
COUNT(u.id) AS cnt
FROM user_universities uu
JOIN users u ON u.id = uu.id;
""").fetchone()['cnt']
cursor = db.execute("""
SELECT
u.id AS id
FROM user_universities uu
JOIN users u ON u.id = uu.id;
""")
uids = map(itemgetter('id'), cursor.fetchall())
dataset = lil_matrix((uid_count, 1 + 1 + freq_friend_count), dtype=int)
for i, uid in enumerate(tqdm(uids, total=uid_count, unit='uid')):
cursor = db.execute("""
SELECT
friend_id
FROM user_friends
WHERE id = :uid;
""", dict(uid=uid))
friend_ids = map(itemgetter('friend_id'), cursor.fetchall())
cursor = db.execute("""
SELECT
university_id
FROM user_universities
WHERE id = :uid;
""", dict(uid=uid))
university_ids = map(itemgetter('university_id'), cursor.fetchall())
is_mipt = any([university_id in mipt_ids
for university_id in university_ids])
row = zeros(1 + freq_friend_count + 1, dtype=int)
row[0] = uid
row[1] = is_mipt
row[2:] = make_features_from_friend_list(friend_ids, freq_friends)
dataset[i, :] = row
logging.info('total %d non zero elements', dataset.nnz)
logging.info('converting lil to csr format')
dataset = csr_matrix(dataset)
filename = realpath(filename)
logging.info('store dataset matrix into `%s`', filename)
with open(filename, 'wb') as fout:
savemat(fout, dict(dataset=dataset.T))
logging.info('done')
return dataset
def test():
logging.basicConfig(
format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
dataset = make_dataset(models.db)
if __name__ == '__main__':
test()
| mit | Python | |
a50b96ece7db9b732a6dc96c6d981588a5760311 | Add script to convert tests from RethinkDB core. | grandquista/ReQL-Core,grandquista/ReQL-Core,grandquista/ReQL-Core,grandquista/ReQL-Core | test_builder.py | test_builder.py | import re
try:
import yaml
except:
print('PyYAML not installed')
from pathlib import Path
def mkdir(path):
try:
path.mkdir(parents=True)
except FileExistsError:
pass
def test_loop(path, c_path, cpp_path):
for file in path.glob('**/*.yaml'):
each_test(path, file, c_path, cpp_path)
def each_test(path, file, c_path, cpp_path):
with file.open() as istream:
test = yaml.load(istream)
test_keys = test.keys()
try:
tables = re.findall('([a-zA-Z\d\.\_\-]+)', test['table_variable_name'])
except KeyError:
pass
else:
test['table_variable_name'] = tables
test_file = file.relative_to(path)
c_file = (c_path / test_file).with_suffix('.cpp')
if not c_file.exists():
mkdir(c_file)
c_file.touch()
with c_file.open('w') as ostream:
yaml.dump(test, ostream)
cpp_file = (cpp_path / test_file).with_suffix('.cpp')
if not cpp_file.exists():
mkdir(cpp_file)
cpp_file.touch()
with cpp_file.open('w') as ostream:
yaml.dump(test, ostream)
def main():
cwd_path = Path('.')
base_path = cwd_path / 'rethinkdb' / 'test'
polyglot_path = base_path / 'rql_test' / 'src'
tests_path = cwd_path / 'test'
new_test_c_path = (tests_path / 'c' / 'polyglot')
mkdir(new_test_c_path)
new_test_c_path = new_test_c_path.resolve()
new_test_cpp_path = (tests_path / 'cpp' / 'polyglot')
mkdir(new_test_cpp_path)
new_test_cpp_path = new_test_cpp_path.resolve()
test_loop(polyglot_path)
main()
| apache-2.0 | Python | |
b7785c53dbf8bd07360fa2ae62590fb0fcd1012e | Add gpio test | iver56/auto-light | gpio_test.py | gpio_test.py | import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7, GPIO.OUT)
for i in range(5):
GPIO.output(7, False)
sleep(2)
GPIO.output(7, True)
sleep(2)
GPIO.cleanup()
| mit | Python | |
f47a7678072471c7f78232138c850a2a7b5800a0 | add kattis/flexible | mjenrungrot/competitive_programming,mjenrungrot/competitive_programming,mjenrungrot/algorithm,mjenrungrot/competitive_programming,mjenrungrot/competitive_programming | Kattis/flexible.py | Kattis/flexible.py | """
Problem: flexible
Link: https://open.kattis.com/problems/flexible
Source: ACM ICPC 2014 North America Qualifier
"""
W, P = list(map(int, input().split()))
A = [0] + list(map(int, input().split())) + [W]
answer = set()
for i in range(len(A)-1):
for j in range(i+1,len(A)):
answer.add(A[j] - A[i])
space = False
answer = sorted(answer)
for i in range(len(answer)):
if(space): print(" ", end="")
space = True
print(answer[i], end="")
print("", end="\n")
| mit | Python | |
3b75bc7254dbccc139635c2b7ccf52b12a8eef19 | Add brain_curses.py for curses attributes defined at runtime (#456) | PyCQA/astroid | astroid/brain/brain_curses.py | astroid/brain/brain_curses.py | # Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
import astroid
def _curses_transform():
return astroid.parse('''
A_ALTCHARSET = 1
A_BLINK = 1
A_BOLD = 1
A_DIM = 1
A_INVIS = 1
A_ITALIC = 1
A_NORMAL = 1
A_PROTECT = 1
A_REVERSE = 1
A_STANDOUT = 1
A_UNDERLINE = 1
A_HORIZONTAL = 1
A_LEFT = 1
A_LOW = 1
A_RIGHT = 1
A_TOP = 1
A_VERTICAL = 1
A_CHARTEXT = 1
A_ATTRIBUTES = 1
A_CHARTEXT = 1
A_COLOR = 1
KEY_MIN = 1
KEY_BREAK = 1
KEY_DOWN = 1
KEY_UP = 1
KEY_LEFT = 1
KEY_RIGHT = 1
KEY_HOME = 1
KEY_BACKSPACE = 1
KEY_F0 = 1
KEY_Fn = 1
KEY_DL = 1
KEY_IL = 1
KEY_DC = 1
KEY_IC = 1
KEY_EIC = 1
KEY_CLEAR = 1
KEY_EOS = 1
KEY_EOL = 1
KEY_SF = 1
KEY_SR = 1
KEY_NPAGE = 1
KEY_PPAGE = 1
KEY_STAB = 1
KEY_CTAB = 1
KEY_CATAB = 1
KEY_ENTER = 1
KEY_SRESET = 1
KEY_RESET = 1
KEY_PRINT = 1
KEY_LL = 1
KEY_A1 = 1
KEY_A3 = 1
KEY_B2 = 1
KEY_C1 = 1
KEY_C3 = 1
KEY_BTAB = 1
KEY_BEG = 1
KEY_CANCEL = 1
KEY_CLOSE = 1
KEY_COMMAND = 1
KEY_COPY = 1
KEY_CREATE = 1
KEY_END = 1
KEY_EXIT = 1
KEY_FIND = 1
KEY_HELP = 1
KEY_MARK = 1
KEY_MESSAGE = 1
KEY_MOVE = 1
KEY_NEXT = 1
KEY_OPEN = 1
KEY_OPTIONS = 1
KEY_PREVIOUS = 1
KEY_REDO = 1
KEY_REFERENCE = 1
KEY_REFRESH = 1
KEY_REPLACE = 1
KEY_RESTART = 1
KEY_RESUME = 1
KEY_SAVE = 1
KEY_SBEG = 1
KEY_SCANCEL = 1
KEY_SCOMMAND = 1
KEY_SCOPY = 1
KEY_SCREATE = 1
KEY_SDC = 1
KEY_SDL = 1
KEY_SELECT = 1
KEY_SEND = 1
KEY_SEOL = 1
KEY_SEXIT = 1
KEY_SFIND = 1
KEY_SHELP = 1
KEY_SHOME = 1
KEY_SIC = 1
KEY_SLEFT = 1
KEY_SMESSAGE = 1
KEY_SMOVE = 1
KEY_SNEXT = 1
KEY_SOPTIONS = 1
KEY_SPREVIOUS = 1
KEY_SPRINT = 1
KEY_SREDO = 1
KEY_SREPLACE = 1
KEY_SRIGHT = 1
KEY_SRSUME = 1
KEY_SSAVE = 1
KEY_SSUSPEND = 1
KEY_SUNDO = 1
KEY_SUSPEND = 1
KEY_UNDO = 1
KEY_MOUSE = 1
KEY_RESIZE = 1
KEY_MAX = 1
ACS_BBSS = 1
ACS_BLOCK = 1
ACS_BOARD = 1
ACS_BSBS = 1
ACS_BSSB = 1
ACS_BSSS = 1
ACS_BTEE = 1
ACS_BULLET = 1
ACS_CKBOARD = 1
ACS_DARROW = 1
ACS_DEGREE = 1
ACS_DIAMOND = 1
ACS_GEQUAL = 1
ACS_HLINE = 1
ACS_LANTERN = 1
ACS_LARROW = 1
ACS_LEQUAL = 1
ACS_LLCORNER = 1
ACS_LRCORNER = 1
ACS_LTEE = 1
ACS_NEQUAL = 1
ACS_PI = 1
ACS_PLMINUS = 1
ACS_PLUS = 1
ACS_RARROW = 1
ACS_RTEE = 1
ACS_S1 = 1
ACS_S3 = 1
ACS_S7 = 1
ACS_S9 = 1
ACS_SBBS = 1
ACS_SBSB = 1
ACS_SBSS = 1
ACS_SSBB = 1
ACS_SSBS = 1
ACS_SSSB = 1
ACS_SSSS = 1
ACS_STERLING = 1
ACS_TTEE = 1
ACS_UARROW = 1
ACS_ULCORNER = 1
ACS_URCORNER = 1
ACS_VLINE = 1
COLOR_BLACK = 1
COLOR_BLUE = 1
COLOR_CYAN = 1
COLOR_GREEN = 1
COLOR_MAGENTA = 1
COLOR_RED = 1
COLOR_WHITE = 1
COLOR_YELLOW = 1
''')
astroid.register_module_extender(astroid.MANAGER, 'curses', _curses_transform)
| lgpl-2.1 | Python | |
42dfe9e99f24c0e05b2f411ea48ccee03612c711 | Add hfsm.py | rokujyouhitoma/tips,rokujyouhitoma/tips,rokujyouhitoma/tips,rokujyouhitoma/tips | hfsm/hfsm.py | hfsm/hfsm.py | # -*- coding: utf-8 -*-
#TODO
class State(object):
def __init__(self, name, parent):
self.name = name
self.parent = parent
def entry(self):
print('%s entry' % (self.name))
def execute(self):
print('%s execute' % (self.name))
def exit(self):
print('%s exit' % (self.name))
def __str__(self):
return '<State %s>' % self.name
#TODO
class StateMachine(object):
def __init__(self):
self.startState = None
self.currentState = None
def add_transition(self, current_state, event_name, next_state, action):
pass
def set_start(self, state):
self.startState = state
def run(self):
while True:
break
class HFSM(StateMachine):
def __init__(self):
super(HFSM, self).__init__()
if __name__ == '__main__':
#state
start = State('start', None)
state_4 = State('state_4', None)
state_0 = State('state_0', state_4)
state_1 = State('state_1', state_4)
state_2 = State('state_2', state_1)
state_3 = State('state_3', state_2)
end = State('end', None)
#action
action_0 = lambda: 1
m = HFSM()
#state transition table
m.add_transition(start, None, state_0, None)
m.add_transition(state_0, 'event_0', state_3, action_0)
m.add_transition(state_1, 'event_1', state_0, None)
m.add_transition(state_0, 'event_1', end, None)
m.add_transition(end, None, None, None)
m.set_start(start)
| mit | Python | |
d99f5fd775d0ab57e964d8403266fc1adc7a4004 | add a script to print a json summary of our jenkins configs | kplus/devstack-moonshot,yamt/devstack,avvocatodemarchis/devstack,sstrato/devstack,avvocatodemarchis/devstack,neerja28/Devstack_GlusterFS,mc2014/devstack,LoHChina/devstack,nawawi/openstack,jamielennox/devstack,williamthegrey/devstack,bljgaurav/openstack-test,liuquansheng47/devstack,samgoon/devstack,srics/devstack,vsham20/devstack,mmasaki/devstack,richliu/devstack,sc68cal/devstack,bigswitch/devstack,group-policy/devstack,parul24/devstack,nttmcl/devstack_public,liuquansheng47/devstack,costingalan/devstack,pombredanne/devstack,deepakselvaraj/devstack,prithivm/branch01,inkerra/devstack,dirkmueller/devstack,citrix-openstack-build/devstack,andrewrothstein/devstack,thomasem/devstack,savi-dev/devstack,noironetworks/devstack,kplus/devstack-moonshot,wenhuizhang/devstack,HPCHub/devstack,pczerkas/devstack,nati/devstack_public,atulpatil301/devstack,atulpatil301/devstack,upadhyay-prashant/devstack,armando-migliaccio/devstack,bq-xiao/devstack,olivierlemasle/devstack,tianweizhang/devstack,dtroyer/devstack,pczerkas/devstack,r-mibu/devstack,rushiagr/devstack,eshnil2000/devstack,parul24/devstack,herrsechs/devstack,flavio-fernandes/devstack,TwinkleChawla/devstack,eharney/devstack,openstack-dev/devstack,Mahito/devstack,mmasaki/devstack,LoHChina/devstack,dtroyer/devstack,nawawi/openstack,pacinete/devstack,JioCloud/devstack,Yuriy-Leonov/devstack,vishnugonela/devstack,andrewrothstein/devstack,olivierlemasle/devstack,TwinkleChawla/devstack,Millnert/contrail-devstack,bswartz/devstack,mssumanth/devstack,nati/devstack_public,innodee/openstackk,mssumanth/devstack,sequenceiq/devstack,virtualopensystems/devstack,ewindisch/raspi-devstack,upadhyay-prashant/devstack,Mahito/devstack,prithivm/branch01,citrix-openstack-build/devstack,affo/devstack,pombredanne/devstack,ykaneko/devstack,NaokiMiyata/devstack,varunarya10/devstack,thomasem/devstack,NaokiMiyata/devstack,ewindisch/raspi-devstack,rushiagr/devstack,vsham20/devstack,pacinete/devstack,nuxeh/devstack,ozamiatin/devstack,flavio-fernandes/devstack,CloudServer/devstack,cloudbuilders/devstack,promptworks/devstack,nuxeh/devstack,bljgaurav/openstack-test,openstack-dev/devstack,costingalan/devstack,sequenceiq/devstack,j4/devstack,eharney/devstack,nttmcl/devstack_public,Juniper/devstack,CloudServer/devstack,affo/devstack,samgoon/devstack,group-policy/devstack,cloudbuilders/devstack,BSJAIN92/OpenStack,herrsechs/devstack,bq-xiao/devstack,varunarya10/devstack,j4/devstack,noironetworks/devstack,bigswitch/devstack,dirkmueller/devstack,wenhuizhang/devstack,promptworks/devstack,BSJAIN92/OpenStack,mc2014/devstack,savi-dev/devstack,tianweizhang/devstack,CiscoSystems/devstack,ozamiatin/devstack,r-mibu/devstack,ykaneko/devstack,vishnugonela/devstack,sc68cal/devstack,bswartz/devstack,richliu/devstack,HPCHub/devstack,neerja28/Devstack_GlusterFS,shootstar/devstacktest | tools/jenkins/jenkins_home/print_summary.py | tools/jenkins/jenkins_home/print_summary.py | #!/usr/bin/python
import urllib
import json
import sys
def print_usage():
print "Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]"\
% sys.argv[0]
sys.exit()
def fetch_blob(url):
return json.loads(urllib.urlopen(url + '/api/json').read())
if len(sys.argv) < 2:
print_usage()
BASE_URL = sys.argv[1]
root = fetch_blob(BASE_URL)
results = {}
for job_url in root['jobs']:
job = fetch_blob(job_url['url'])
if job.get('activeConfigurations'):
(tag, name) = job['name'].split('-')
if not results.get(tag):
results[tag] = {}
if not results[tag].get(name):
results[tag][name] = []
for config_url in job['activeConfigurations']:
config = fetch_blob(config_url['url'])
log_url = ''
if config.get('lastBuild'):
log_url = config['lastBuild']['url'] + 'console'
results[tag][name].append({'test': config['displayName'],
'status': config['color'],
'logUrl': log_url,
'healthReport': config['healthReport']})
print json.dumps(results)
| apache-2.0 | Python | |
9819658d9cc343d67c1b2c438853c6f065394751 | Add Flask example. | rduplain/wsgi_party,rduplain/wsgi_party | examples/flask/flask_party.py | examples/flask/flask_party.py | from flask import Flask, request
from wsgi_party import WSGIParty, PartylineConnector
class PartylineFlask(Flask, PartylineConnector):
def __init__(self, import_name, *args, **kwargs):
super(PartylineFlask, self).__init__(import_name, *args, **kwargs)
self.add_url_rule(WSGIParty.invite_path, endpoint='partyline',
view_func=self.join_party_wrapper)
def join_party_wrapper(self, request=request):
"""A simple wrapper to support Flask's request pattern."""
return self.join_party(request)
# Demonstrate.
root = PartylineFlask(__name__)
one = PartylineFlask(__name__)
two = PartylineFlask(__name__)
root.debug = True
one.debug = True
two.debug = True
one.config['APPLICATION_ROOT'] = '/one'
two.config['APPLICATION_ROOT'] = '/two'
application = WSGIParty(root, {
'/one': one,
'/two': two,
})
if __name__ == '__main__':
import os
from werkzeug.serving import run_simple
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get('PORT', 5000))
run_simple('0.0.0.0', port, application, use_reloader=True)
| bsd-3-clause | Python | |
69b1e1bdf50991e609b548c02dd14225642cae85 | Add focus-last example | xenomachina/i3ipc-python,chrsclmn/i3ipc-python,acrisci/i3ipc-python,nicoe/i3ipc-python | examples/focus-last.py | examples/focus-last.py | #!/usr/bin/env python3
import os
import socket
import selectors
from argparse import ArgumentParser
from multiprocessing import Process, Value
from gi.repository import i3ipc
SOCKET_FILE = '/tmp/i3_focus_last'
class FocusWatcher:
def __init__(self):
self.window_id = Value('i', 0)
self.old_window_id = Value('i', 0)
self.i3 = i3ipc.Connection()
self.i3.on('window::focus', self.on_window_focus)
self.listening_socket = socket.socket(socket.AF_UNIX,
socket.SOCK_STREAM)
if os.path.exists(SOCKET_FILE):
os.remove(SOCKET_FILE)
self.listening_socket.bind(SOCKET_FILE)
self.listening_socket.listen(1)
def on_window_focus(self, i3conn, event):
if event.change == 'focus':
self.old_window_id.value = self.window_id.value
self.window_id.value = event.container.props.id
def launch_i3(self):
self.i3.main()
def launch_server(self):
selector = selectors.DefaultSelector()
def accept(sock):
conn, addr = sock.accept()
selector.register(conn, selectors.EVENT_READ, read)
def read(conn):
data = conn.recv(1024)
if data == b'switch':
window_id = self.old_window_id.value
if window_id:
self.i3.command('[con_id=%s] focus' %
self.old_window_id.value)
elif not data:
selector.unregister(conn)
conn.close()
selector.register(self.listening_socket, selectors.EVENT_READ, accept)
while True:
for key, event in selector.select():
callback = key.data
callback(key.fileobj)
def run(self):
p_i3 = Process(target=self.launch_i3)
p_server = Process(target=self.launch_server)
for p in (p_i3, p_server):
p.start()
if __name__ == '__main__':
parser = ArgumentParser(prog='focus-last.py',
description='''
Focus last focused window.
''')
parser.add_argument('--switch', dest='switch', action='store_true',
help='Switch to the previous window', default=False)
args = parser.parse_args()
if not args.switch:
focus_watcher = FocusWatcher()
focus_watcher.run()
else:
client_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
client_socket.connect(SOCKET_FILE)
client_socket.send('switch'.encode('utf-8'))
client_socket.close()
| bsd-3-clause | Python | |
54f3c05d369a3256d1adac1118ca6b2a8cd9b77e | add proximity example | francois-berder/PyLetMeCreate | examples/proximity_example.py | examples/proximity_example.py | #!/usr/bin/env python3
"""This example shows how to read a measure from the Proximity Click inserted
in Mikrobus 1.
"""
from letmecreate.core import i2c
from letmecreate.core.common import MIKROBUS_1
from letmecreate.click import proximity
# Initialise I2C on Mikrobus 1
i2c.init()
i2c.select_bus(MIKROBUS_1)
# Read measure from Proximity Click
proximity.enable()
print('measure: {}'.format(proximity.get_measure()))
proximity.disable()
# Release I2C
i2c.release()
| bsd-3-clause | Python | |
e5ad0f3029df610a308c107a640de438f62eb00b | Add test for early stopping trigger | rezoo/chainer,keisuke-umezawa/chainer,keisuke-umezawa/chainer,aonotas/chainer,chainer/chainer,hvy/chainer,niboshi/chainer,ktnyt/chainer,tkerola/chainer,niboshi/chainer,chainer/chainer,niboshi/chainer,anaruse/chainer,wkentaro/chainer,okuta/chainer,ktnyt/chainer,ronekko/chainer,hvy/chainer,hvy/chainer,jnishi/chainer,okuta/chainer,chainer/chainer,niboshi/chainer,okuta/chainer,keisuke-umezawa/chainer,hvy/chainer,ktnyt/chainer,wkentaro/chainer,chainer/chainer,wkentaro/chainer,jnishi/chainer,wkentaro/chainer,jnishi/chainer,jnishi/chainer,keisuke-umezawa/chainer,okuta/chainer,ktnyt/chainer,pfnet/chainer | tests/chainer_tests/training_tests/triggers_tests/test_early_stopping_trigger.py | tests/chainer_tests/training_tests/triggers_tests/test_early_stopping_trigger.py | import unittest
import chainer
import numpy
from chainer import testing
from chainer import training
from chainer.training import triggers
from chainer.training import util
class DummyUpdater(training.Updater):
def __init__(self):
self.iteration = 0
def finalize(self):
pass
def get_all_optimizers(self):
return {}
def update(self):
self.iteration += 1
@property
def epoch(self):
return 1
@property
def is_new_epoch(self):
return False
def _test_trigger(self, trigger, key, accuracies, expected):
updater = DummyUpdater()
trainer = training.Trainer(updater)
for accuracy, expected in zip(accuracies, expected):
updater.update()
trainer.observation = {key: accuracy}
self.assertEqual(trigger(trainer), expected)
class TestEarlyStoppingTrigger(unittest.TestCase):
def test_early_stopping_trigger(self):
key = 'main/accuracy'
trigger = triggers.EarlyStoppingTrigger(monitor=key, patients=3,
trigger=(1, 'iteration'),
verbose=False)
trigger = util.get_trigger(trigger)
accuracies = [0.5, 0.5, 0.6, 0.7, 0.6, 0.4, 0.3, 0.2]
accuracies = numpy.asarray([
chainer.Variable(numpy.asarray(acc, dtype=numpy.float32))
for acc in accuracies])
expected = [False, False, False, False, False, False, False, True]
_test_trigger(self, trigger, key, accuracies, expected)
testing.run_module(__name__, __file__)
| mit | Python | |
ca8d539f39015b043d51eff8c1359dca0818f348 | Include scratch script to duplicate cropped dataset | seung-lab/Julimaps,seung-lab/Julimaps | src/tasks/python/create_test_cutout.py | src/tasks/python/create_test_cutout.py | from cloudvolume import CloudVolume
image_in = 'gs://neuroglancer/pinky100_v0/image_single_slices'
image_out = 'gs://neuroglancer/pinky100_v0/test_image'
image_mip = 0
roi_in = 'gs://neuroglancer/pinky100_v0/image_single_slices/roicc'
roi_out = 'gs://neuroglancer/pinky100_v0/test_image/roicc'
roi_mip = 6
cfsplit_in = 'gs://neuroglancer/pinky100_v0/image_single_slices/cfsplit'
cfsplit_out = 'gs://neuroglancer/pinky100_v0/test_image/cfsplit'
cfsplit_mip = 2
match_in = 'gs://neuroglancer/pinky100_v0/image_single_slices/nccnet'
match_out = 'gs://neuroglancer/pinky100_v0/test_image/nccnet'
match_mip = 2
dst_in = 'gs://neuroglancer/pinky100_v0/aligned_test_v5'
dst_mip = 0
src_dst = [(cfsplit_in, cfsplit_out, cfsplit_mip),
(match_in, match_out, match_mip)]
z_slice = slice(199, 208)
src_mip = 0
def scale_slice(s, src_mip, dst_mip):
scale = 1/2**(dst_mip - src_mip)
return slice(int(s.start*scale), int(s.stop*scale))
def scale_slices(x_slice, y_slice, z_slice, src_mip, dst_mip):
return (scale_slice(x_slice, src_mip, dst_mip),
scale_slice(y_slice, src_mip, dst_mip),
scale_slice(z_slice, src_mip, dst_mip))
def get_cloudvolume(path, mip):
return CloudVolume(path, mip=mip)
def update_info_mips(cv, no_of_mips=6):
print("updating info mips")
for mip in range(1,no_of_mips+1):
factor = (2**mip, 2**mip, 1)
cv.add_scale(factor)
cv.commit_info()
def get_xy_slice(cv):
o = cv.voxel_offset
s = cv.shape
return slice(o[0], o[0]+s[0]), slice(o[1], o[1]+s[1])
for (src_path, dst_path, mip) in src_dst:
print(src_path)
print(dst_path)
print(mip)
cv = get_cloudvolume(dst_path, 0)
update_info_mips(cv, 6)
dst_cv = get_cloudvolume(dst_path, mip)
src_cv = get_cloudvolume(src_path, mip)
sl = get_xy_slice(dst_cv) + (z_slice,)
print(sl)
dst_cv[sl] = src_cv[sl] | mit | Python | |
b28e8b94191752a92de24845f99db1f59da32a9b | Add module | dgu-dna/DNA-Bot | apps/word.py | apps/word.py | from apps.decorators import on_command
from bs4 import BeautifulSoup
from urllib.request import urlopen, quote
import json
import re
CACHE_DEFAULT_URL = './apps/game_cache/relay.json'
NAVER_DICTIONARY_URL = 'http://krdic.naver.com/search.nhn?query=%s&kind=keyword'
@on_command(['!๋จ์ด'])
def run(robot, channel, tokens, user, command):
''''''
msg = '๋จ์ด๋ฅผ ๋งํด์ค์ผ ํ์ง'
is_word = False
if len(tokens) < 1:
return channel, msg
html = urlopen(quote((NAVER_DICTIONARY_URL % tokens[0]).encode('utf-8'), '/:&?='))
if len(tokens[0]) < 2:
msg = '๋ ๊ธ์ ์ด์์ ๋จ์ด๋ง ๊ฐ๋ฅํจ'
return channel, msg
soup = BeautifulSoup(html, 'html.parser')
s = soup.find_all('a', {'class': 'fnt15'})
if s:
for ss in s:
if re.sub(r'[^๊ฐ-ํฃ]', '', str(ss)) == tokens[0]:
is_word = True
break
#for ss in s:
# print(re.sub(r'[^ใฑ-ใ
๊ฐ-ํฃ]', '', str(ss)))
#print(s)
#print(soup.find_all('ul', {'class': 'lst3'})[0].find_all('li'))
if is_word:
wdat = json.loads(open(CACHE_DEFAULT_URL).read())
if tokens[0] not in wdat:
wdat[tokens[0]] = 0
wdat[tokens[0]] += 1
with open(CACHE_DEFAULT_URL, 'w') as fp:
json.dump(wdat, fp, indent=4)
msg = tokens[0] + ' ์(๋) ๋จ์ด์'
else:
msg = tokens[0] + ' ์(๋) ๋จ์ด๊ฐ ์๋'
return channel, msg
| mit | Python | |
9f088ee18bafb3b3d3fbc445bdc46298dea4850c | Create v2 of PWMController | thelonious/g2x,gizmo-cda/g2x,gizmo-cda/g2x,gizmo-cda/g2x,gizmo-cda/g2x,thelonious/g2x | app_v2/pwm_controller.py | app_v2/pwm_controller.py | import Adafruit_PCA9685
class Device:
def __init__(self, parent, name, channel, on, off):
self.parent = parent
self.name = name
self.channel = channel
self._on = on
self._off = off
self.initial_on = on
self.initial_off = off
@property
def on(self):
return self._on
@on.setter
def on(self, value):
value = max(0, min(value, 4095))
if self._on != value:
self._on = value
self.parent.set_pwm(self.channel, self.on, self.off)
@property
def off(self):
return self._off
@off.setter
def off(self, value):
value = max(0, min(value, 4095))
if self._off != value:
self._off = value
self.parent.set_pwm(self.channel, self.on, self.off)
@property
def duty_cycle(self):
on_duration = abs(self.off - self.on)
return round(100.0 * on_duration / 4096, 2)
@property
def on_duration(self):
one_cycle = 1.0 / self.parent.frequency
on_percent = abs(self.off - self.on) / 4096.0
return round(1000000 * one_cycle * on_percent, 2)
@property
def off_duration(self):
one_cycle = 1.0 / self.parent.frequency
off_percent = 1.0 - (abs(self.off - self.on) / 4096.0)
return round(1000000 * one_cycle * off_percent, 2)
def reset(self):
self.on = self.initial_on
self.off = self.initial_off
class PWMController:
def __init__(self):
self.pwm = Adafruit_PCA9685.PCA9685()
self._frequency = 60
self.pwm.set_pwm_freq(self._frequency)
self.devices = []
self.current_device_index = 0
@property
def frequency(self):
return self._frequency
@frequency.setter
def frequency(self, freq):
freq = max(40, min(freq, 1000))
if self._frequency != freq:
self._frequency = freq
self.pwm.set_pwm_freq(self._frequency)
@property
def current_device(self):
if 0 <= self.current_device_index < len(self.devices):
return self.devices[self.current_device_index]
else:
return None
@property
def device_count(self):
return len(self.devices)
def add_device(self, name, channel, on, off):
device = Device(self, name, channel, on, off)
self.current_device_index = len(self.devices)
self.devices.append(device)
self.pwm.set_pwm(device.channel, device.on, device.off)
return device
def previous_device(self):
if self.device_count > 0:
self.current_device_index = (self.current_device_index - 1) % len(self.devices)
return True
def next_device(self):
if self.device_count > 0:
self.current_device_index = (self.current_device_index + 1) % len(self.devices)
return True
# on/off are in ticks (based on freq)
def set_pwm(self, channel, on, off):
# [0,15] comes from Adafruit docs
channel = max(0, min(channel, 15))
# [0,4095] comes from Adafruit docs
on = max(0, min(on, 4095))
off = max(0, min(off, 4095))
self.pwm.set_pwm(channel, on, off)
| mit | Python | |
f1ba28d900e6a06aeab34fa67ef8d4552f438c99 | Create ImageNormalize.py | lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples,lorensen/VTKExamples | src/Python/ImageData/ImageNormalize.py | src/Python/ImageData/ImageNormalize.py | #!/usr/bin/env python
import vtk
def main():
colors = vtk.vtkNamedColors()
# Create an image
source = vtk.vtkImageSinusoidSource()
source.Update()
normalizeFilter = vtk.vtkImageNormalize()
normalizeFilter.SetInputConnection(source.GetOutputPort())
normalizeFilter.Update()
inputCastFilter = vtk.vtkImageCast()
inputCastFilter.SetInputConnection(source.GetOutputPort())
inputCastFilter.SetOutputScalarTypeToUnsignedChar()
inputCastFilter.Update()
normalizeCastFilter = vtk.vtkImageCast()
normalizeCastFilter.SetInputConnection(normalizeFilter.GetOutputPort())
normalizeCastFilter.SetOutputScalarTypeToUnsignedChar()
normalizeCastFilter.Update()
# Create actors
inputActor = vtk.vtkImageActor()
inputActor.GetMapper().SetInputConnection(inputCastFilter.GetOutputPort())
normalizedActor = vtk.vtkImageActor()
normalizedActor.GetMapper().SetInputConnection(normalizeCastFilter.GetOutputPort())
# There will be one render window
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetSize(600, 300)
# And one interactor
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(renderWindow)
# Define viewport ranges
# (xmin, ymin, xmax, ymax)
leftViewport = [0.0, 0.0, 0.5, 1.0]
rightViewport = [0.5, 0.0, 1.0, 1.0]
# Setup both renderers
leftRenderer = vtk.vtkRenderer()
renderWindow.AddRenderer(leftRenderer)
leftRenderer.SetViewport(leftViewport)
leftRenderer.SetBackground(colors.GetColor3d("lemon_chiffon"))
rightRenderer = vtk.vtkRenderer()
renderWindow.AddRenderer(rightRenderer)
rightRenderer.SetViewport(rightViewport)
rightRenderer.SetBackground(colors.GetColor3d("gray"))
leftRenderer.AddActor(inputActor)
rightRenderer.AddActor(normalizedActor)
leftRenderer.ResetCamera()
rightRenderer.ResetCamera()
renderWindow.Render()
interactor.Start()
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
dc7b2e1674806511c0ec117b5d13f9e4c4f8e5b6 | Bump version to 0.3.2 | pekermert/django-socketio,clarkperkins/django-socketio,DESHRAJ/django-socketio,freylis/django-socketio,freylis/django-socketio,Solution4Future/django-socketio,stephenmcd/django-socketio,pekermert/django-socketio,kostyll/django-socketio,stephenmcd/django-socketio,Solution4Future/django-socketio,Solution4Future/django-socketio,clarkperkins/django-socketio,vetional/django-socketio,vetional/django-socketio,stephenmcd/django-socketio,kostyll/django-socketio,vetional/django-socketio,kostyll/django-socketio,clarkperkins/django-socketio,freylis/django-socketio,DESHRAJ/django-socketio,DESHRAJ/django-socketio,pekermert/django-socketio | django_socketio/__init__.py | django_socketio/__init__.py |
from django_socketio.utils import NoSocket, send, broadcast, broadcast_channel
__version__ = "0.3.2"
|
from django_socketio.utils import NoSocket, send, broadcast, broadcast_channel
__version__ = "0.3.1"
| bsd-2-clause | Python |
748f335059ce25d1e8177cfbd42c075c51fc450e | Create ifonemail.py | ioangogo/ifon | ifonemail.py | ifonemail.py | import os
import time
from subprocess import Popen, PIPE
import re
import os
import socket
import stmplib
from email.mime.text import MIMEText
def sendemail(msg, email, personin):
msg=MIMEText(msg)
msg['Subject'] = personin + "Is home"
msg['From'] = "ioan.loosley@loosleyweb.co.uk"
msg['To'] = email
s = smtplib.SMTP('mail.loosleyweb.co.uk')
s.sendmail("ioan.loosley@loosleyweb.co.uk", email, msg.as_string())
s.quit
if os.name != "nt":
import fcntl
import struct
def get_interface_ip(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s',
ifname[:15]))[20:24])
def get_lan_ip():
ip = socket.gethostbyname(socket.gethostname())
if ip.startswith("127.") and os.name != "nt":
interfaces = [
"eth0",
"eth1",
"eth2",
"wlan0",
"wlan1",
"wifi0",
"ath0",
"ath1",
"ppp0",
]
for ifname in interfaces:
try:
ip = get_interface_ip(ifname)
break
except IOError:
pass
return ip
os.system("clear")
uip=[]
macar=[]
lanip=get_lan_ip()
notorm=re.search(r"([1-9])\d+$", lanip)
notorm=notorm.group(0)
lanip=lanip.replace(str(notorm),"")
knowmac={"Carys":"ac:bc:32:0f:e2:22"}
def rescan():
devnull = open(os.devnull, 'wb')
p = [] # ip -> process
for n in range(1, 100): # start ping processes
ip = str(lanip) + "%d" % n
p.append((ip, Popen(['ping', '-c', '3', ip], stdout=devnull)))
#NOTE: you could set stderr=subprocess.STDOUT to ignore stderr also
os.system("clear")
while p:
for i, (ip, proc) in enumerate(p[:]):
if proc.poll() is not None: # ping finished
p.remove((ip, proc)) # this makes it O(n**2)
if proc.returncode == 0:
uip.append(ip)
devnull.close()
os.system("clear")
for ipaddr in uip:
pid = Popen(["arp", "-n", ipaddr], stdout=PIPE)
s = pid.communicate()[0]
mac = re.search(r"(([a-f\d]{1,2}\:){5}[a-f\d]{1,2})", s)
macar.append(mac.group(0) if mac else "")
while True:
os.system("clear")
rescan()
for dev in macar:
if str(dev) in knowmac:
print knowmac[dev], "is on the network"
txt=knowmac[dev] + "is on the network"
sendemail(txt, "legit.ioangogo@gmail.com", knowmac[dev])
del macar[:]
time.sleep(30)
os.system("clear")
print "Refreshing"
| unlicense | Python | |
c4a05cfc469793fccb4bb958e51ef536a8f8e983 | Add Support Vector regression with Python | a-holm/MachinelearningAlgorithms,a-holm/MachinelearningAlgorithms | Regression/SupportVectorRegression/regularSVMRegression.py | Regression/SupportVectorRegression/regularSVMRegression.py | # -*- coding: utf-8 -*-
"""Support Vector regression for machine learning.
Support Vector Machine can also be used as a regression method, maintaining all
the main features that characterize the algorithm (maximal margin). The Support
Vector Regression (SVR) uses the same principles as the SVM for classification,
with only a few minor differences. First of all, because output is a real
number it becomes very difficult to predict the information at hand, which has
infinite possibilities. In the case of regression, a margin of tolerance is set
in approximation to the SVM which would have already requested from the
problem. But besides this fact, there is also a more complicated reason, the
algorithm is more complicated therefore to be taken in consideration. However,
the main idea is always the same: to minimize error, individualizing the
hyperplane which maximizes the margin, keeping in mind that part of the error
is tolerated.
Example:
$ python regularSVMRegression.py
Todo:
*
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.svm import SVR
from sklearn.preprocessing import StandardScaler
# from sklearn.model_selection import train_test_split
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
features = dataset.iloc[:, 1:2].values
labels = dataset.iloc[:, 2].values
# Splitting the Dataset into a Training set and a Test set
"""feature_train, feature_test, label_train, label_test = train_test_split(
features, labels, test_size=0.2)"""
# Feature scaling, normalize scale is important. Especially on algorithms
# involving euclidian distance. Two main feature scaling formulas are:
# Standardisation: x_stand = (x-mean(x))/(standard_deviation(x))
# Normalisation: x_norm = (x-min(x))/(max(x)-min(x))
sc_features = StandardScaler()
sc_labels = StandardScaler()
features = sc_features.fit_transform(features)
labels = sc_labels.fit_transform(labels.reshape(-1, 1))
# Fit the SVR regression model to the dataset
regressor = SVR(kernel='rbf')
regressor.fit(features, labels)
# Predict new result with the SVR regression model
# y_pred = sc_labels.inverse_transform(regressor.predict(
# sc_features.transform(np.array([65]).reshape(-1, 1))))
x_pred = sc_features.transform(np.array([6.5]).reshape(-1, 1))
y_pred = regressor.predict(x_pred)
# Visualising the regression results with smoother curve
x_grid = np.arange(min(features), max(features), 0.1)
x_grid = x_grid.reshape((len(x_grid), 1))
plt.scatter(features, labels, color='r')
plt.scatter(x_pred, y_pred, color='c')
plt.plot(x_grid, regressor.predict(x_grid), color='b')
plt.title('Truth or Bluff (SVR regression Model)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
| mit | Python | |
f6d3594042f41866b7b590539700d000c01d0e91 | add script to integrate disambiguated results | yngcan/patentprocessor,funginstitute/patentprocessor,funginstitute/patentprocessor,funginstitute/patentprocessor,nikken1/patentprocessor,nikken1/patentprocessor,nikken1/patentprocessor,yngcan/patentprocessor,yngcan/patentprocessor | integrate.py | integrate.py | #!/usr/bin/env python
"""
Takes in a CSV file that represents the output of the disambiguation engine:
Patent Number, Firstname, Lastname, Unique_Inventor_ID
Groups by Unique_Inventor_ID and then inserts them into the Inventor table using
lib.alchemy.match
"""
import sys
import lib.alchemy as alchemy
from lib.util.csv_reader import read_file
from lib.handlers.xml_util import normalize_document_identifier
from collections import defaultdict
import cPickle as pickle
def integrate(filename):
blocks = defaultdict(list)
for line in read_file(filename):
patent_number, name_first, name_last, unique_inventor_id = line
patent_number = normalize_document_identifier(patent_number)
rawinventors = alchemy.session.query(alchemy.RawInventor).filter_by(
patent_id = patent_number,
name_first = name_first,
name_last = name_last).all()
blocks[unique_inventor_id].extend(rawinventors)
pickle.dump(blocks, open('integrate.db', 'wb'))
for block in blocks.itervalues():
alchemy.match(block)
def main():
if len(sys.argv) <= 1:
print 'USAGE: python integrate.py <path-to-csv-file>'
sys.exit()
filename = sys.argv[1]
integrate(filename)
if __name__ == '__main__':
main()
| bsd-2-clause | Python | |
3dd99c1e3f2776a44ff9d9354d15e209fccbffbb | Add simple script to send order with the joystick | baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite | Motors/motorJoy.py | Motors/motorJoy.py | #!/usr/bin/env python
# -*- coding: utf8 -*-
#
# Copyright (c) 2013 Nautilabs
#
# Licensed under the MIT License,
# https://github.com/baptistelabat/robokite
# Authors: Baptiste LABAT
import time
import serial
import numpy as np
import pygame
from pygame.locals import *
pygame.init()
fenetre = pygame.display.set_mode((300,300))
pygame.joystick.init()
nb_joysticks = pygame.joystick.get_count()
mon_joystick = pygame.joystick.Joystick(0)
mon_joystick.init() #Initialisation
def computeXORChecksum(chksumdata):
# Inspired from http://doschman.blogspot.fr/2013/01/calculating-nmea-sentence-checksums.html
# Initializing XOR counter
csum = 0
# For each char in chksumdata, XOR against the previous
# XOR char. The final XOR of the last char will be the
# checksum
for c in chksumdata:
# Makes XOR value of counter with the next char in line
# and stores the new XOR value in csum
csum ^= ord(c)
h = hex(csum)
return h[2:]#get hex data without 0x prefix
dt = 0.01
locations=['/dev/ttyACM0','/dev/ttyACM1','/dev/ttyACM2','/dev/ttyACM3','/dev/ttyACM4','/dev/ttyACM5','/dev/ttyUSB0','/dev/ttyUSB1','/dev/ttyUSB2','/dev/ttyUSB3','/dev/ttyS0','/dev/ttyS1','/dev/ttyS2','/dev/ttyS3']
for device in locations:
try:
print "Trying...", device
ser = serial.Serial(device, baudrate=19200, timeout=1)
print "Connected on ", device
break
except:
print "Failed to connect on ", device
time.sleep(1.5)
ser.write('i1')
t0 = time.time()
msg1 = "ORPW1"+","+str(0.00)
msg1 = "$"+msg1 +"*"+ computeXORChecksum(msg1) + chr(13).encode('ascii')
msg2 = "ORPW2"+","+str(0.00)
msg2 = "$"+msg2 +"*"+ computeXORChecksum(msg2) + chr(13).encode('ascii')
while True:
for event in pygame.event.get():
if event.type == JOYAXISMOTION:
if event.axis == 2:
#print "direction control ", event.value
alpha2 = np.round(event.value, 2)
msg2 = "ORPW2"+","+str(alpha2)
msg2 = "$"+msg2 +"*"+ computeXORChecksum(msg2) + chr(13).encode('ascii')
if event.axis == 3:
#print "power control ", event.value
alpha1 = np.round(event.value, 2)
msg1 = "ORPW1"+","+str(alpha1)
msg1 = "$"+msg1 +"*"+ computeXORChecksum(msg1) + chr(13).encode('ascii')
if time.time()-t0 > dt:
ser.write(msg1)
print msg1
ser.write(msg2)
print msg2
t0 = time.time()
try: #The ressource can be temporarily unavailable
line = ser.readline()
print "Received from arduino: ", line
except Exception, e:
print("Error reading from serial port" + str(e))
ser.close()
| mit | Python | |
704f3441de39a07901faaf8b0622de77aa3d0f86 | Fix error in loader.discover() call | rakeshmi/tempest,xbezdick/tempest,vedujoshi/tempest,alinbalutoiu/tempest,cisco-openstack/tempest,flyingfish007/tempest,bigswitch/tempest,Juraci/tempest,pandeyop/tempest,zsoltdudas/lis-tempest,dkalashnik/tempest,NexusIS/tempest,Juniper/tempest,masayukig/tempest,Tesora/tesora-tempest,sebrandon1/tempest,tonyli71/tempest,pandeyop/tempest,cisco-openstack/tempest,xbezdick/tempest,JioCloud/tempest,pczerkas/tempest,sebrandon1/tempest,Juraci/tempest,varunarya10/tempest,akash1808/tempest,LIS/lis-tempest,roopali8/tempest,bigswitch/tempest,manasi24/tempest,tudorvio/tempest,JioCloud/tempest,manasi24/jiocloud-tempest-qatempest,redhat-cip/tempest,hayderimran7/tempest,masayukig/tempest,flyingfish007/tempest,tudorvio/tempest,openstack/tempest,hayderimran7/tempest,zsoltdudas/lis-tempest,openstack/tempest,manasi24/jiocloud-tempest-qatempest,vedujoshi/tempest,dkalashnik/tempest,rakeshmi/tempest,Tesora/tesora-tempest,roopali8/tempest,alinbalutoiu/tempest,pczerkas/tempest,akash1808/tempest,varunarya10/tempest,manasi24/tempest,nunogt/tempest,LIS/lis-tempest,Juniper/tempest,NexusIS/tempest,izadorozhna/tempest,nunogt/tempest,tonyli71/tempest,izadorozhna/tempest,redhat-cip/tempest | tempest/test_discover/test_discover.py | tempest/test_discover/test_discover.py | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from tempest.test_discover import plugins
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
def load_tests(loader, tests, pattern):
ext_plugins = plugins.TempestTestPluginManager()
suite = unittest.TestSuite()
base_path = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
base_path = os.path.split(base_path)[0]
# Load local tempest tests
for test_dir in ['tempest/api', 'tempest/scenario',
'tempest/thirdparty']:
full_test_dir = os.path.join(base_path, test_dir)
if not pattern:
suite.addTests(loader.discover(full_test_dir,
top_level_dir=base_path))
else:
suite.addTests(loader.discover(full_test_dir, pattern=pattern,
top_level_dir=base_path))
plugin_load_tests = ext_plugins.get_plugin_load_tests_tuple()
if not plugin_load_tests:
return suite
# Load any installed plugin tests
for plugin in plugin_load_tests:
test_dir, top_path = plugin_load_tests[plugin]
if not pattern:
suite.addTests(loader.discover(test_dir, top_level_dir=top_path))
else:
suite.addTests(loader.discover(test_dir, pattern=pattern,
top_level_dir=top_path))
return suite
| # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from tempest.test_discover import plugins
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
def load_tests(loader, tests, pattern):
ext_plugins = plugins.TempestTestPluginManager()
suite = unittest.TestSuite()
base_path = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
base_path = os.path.split(base_path)[0]
# Load local tempest tests
for test_dir in ['tempest/api', 'tempest/scenario',
'tempest/thirdparty']:
full_test_dir = os.path.join(base_path, test_dir)
if not pattern:
suite.addTests(loader.discover(full_test_dir,
top_level_dir=base_path))
else:
suite.addTests(loader.discover(full_test_dir, pattern=pattern,
top_level_dir=base_path))
plugin_load_tests = ext_plugins.get_plugin_load_tests_tuple()
if not plugin_load_tests:
return suite
# Load any installed plugin tests
for plugin in plugin_load_tests:
test_dir, top_path = plugin_load_tests[plugin]
if not pattern:
suite.addTests(loader.discover(test_dir, top_level=top_path))
else:
suite.addTests(loader.discover(test_dir, pattern=pattern,
top_level=top_path))
return suite
| apache-2.0 | Python |
0483be7fc08f429461d2901d22ef220ab9ee59e5 | allow to run tox as 'python -m tox', which is handy on Windoze | msabramo/tox,msabramo/tox | tox/__main__.py | tox/__main__.py | from tox._cmdline import main
main()
| mit | Python | |
91f961fa73bc193ba72700814fd8cec0c81168b4 | add iris classification example | ramon-oliveira/aorun | examples/classification.py | examples/classification.py | import os
import sys
sys.path.insert(0, os.path.abspath('..'))
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
import torch
import numpy as np
from aorun.models import Model
from aorun.layers import Dense
from aorun.layers import Activation
from aorun.optimizers import SGD
torch.manual_seed(42)
X, y = datasets.load_iris(return_X_y=True)
X = X.astype('float32')
y = np.eye(y.max()+1)[y].astype('float32')
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
print(X_train.shape, y_train.shape)
X_train = torch.from_numpy(X_train)
y_train = torch.from_numpy(y_train)
X_test = torch.from_numpy(X_test)
y_test = torch.from_numpy(y_test)
model = Model(
Dense(10, input_dim=X_train.size()[-1]),
Activation('relu'),
Dense(10),
Activation('relu'),
Dense(y_test.size()[-1]),
Activation('softmax')
)
sgd = SGD(lr=0.5)
history = model.fit(X_train, y_train, n_epochs=100,
loss='categorical_crossentropy', optimizer=sgd)
y_test = y_test.numpy()
y_pred = model.forward(X_test).data.numpy()
acc = metrics.accuracy_score(y_test.argmax(axis=1), y_pred.argmax(axis=1))
print('Accuracy:', acc)
| mit | Python | |
3644df1b645d4fd607f22b24c5e676644be4a9da | Test script for the bsddb C extension module. | sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator | Lib/test/test_bsddb.py | Lib/test/test_bsddb.py | #! /usr/bin/env python
"""Test script for the bsddb C module
Roger E. Masse
"""
import bsddb
import tempfile
from test_support import verbose
def test(openmethod, what):
if verbose:
print '\nTesting: ', what
fname = tempfile.mktemp()
f = openmethod(fname, 'c')
if verbose:
print 'creation...'
f['0'] = ''
f['a'] = 'Guido'
f['b'] = 'van'
f['c'] = 'Rossum'
f['d'] = 'invented'
f['f'] = 'Python'
if verbose:
print '%s %s %s' % (f['a'], f['b'], f['c'])
if what == 'BTree' :
if verbose:
print 'key ordering...'
f.set_location(f.first()[0])
while 1:
try:
rec = f.next()
except KeyError:
if rec <> f.last():
print 'Error, last <> last!'
f.previous()
break
if verbose:
print rec
if not f.has_key('a'):
print 'Error, missing key!'
f.sync()
f.close()
if verbose:
print 'modification...'
f = openmethod(fname, 'w')
f['d'] = 'discovered'
if verbose:
print 'access...'
for key in f.keys():
word = f[key]
if verbose:
print word
f.close()
types = [(bsddb.btopen, 'BTree'),
(bsddb.hashopen, 'Hash Table'),
# (bsddb.rnopen,'Record Numbers'), 'put' for RECNO for bsddb 1.85
# appears broken... at least on
# Solaris Intel - rmasse 1/97
]
for type in types:
test(type[0], type[1])
| mit | Python | |
a99cf844a8a50a70a65347dad0d656763bc8a408 | Add tests to validate SystemML's deep learning APIs. | niketanpansare/incubator-systemml,apache/incubator-systemml,nakul02/incubator-systemml,deroneriksson/systemml,nakul02/incubator-systemml,deroneriksson/incubator-systemml,deroneriksson/systemml,nakul02/systemml,niketanpansare/systemml,dusenberrymw/incubator-systemml,nakul02/incubator-systemml,gweidner/incubator-systemml,nakul02/systemml,apache/incubator-systemml,deroneriksson/incubator-systemml,gweidner/incubator-systemml,deroneriksson/incubator-systemml,apache/incubator-systemml,dusenberrymw/incubator-systemml,niketanpansare/incubator-systemml,dusenberrymw/systemml,deroneriksson/systemml,niketanpansare/systemml,dusenberrymw/systemml,deroneriksson/systemml,gweidner/incubator-systemml,deroneriksson/incubator-systemml,dusenberrymw/systemml,niketanpansare/systemml,dusenberrymw/incubator-systemml,deroneriksson/incubator-systemml,dusenberrymw/incubator-systemml,dusenberrymw/incubator-systemml,gweidner/systemml,niketanpansare/systemml,nakul02/incubator-systemml,dusenberrymw/incubator-systemml,nakul02/incubator-systemml,nakul02/systemml,gweidner/systemml,niketanpansare/incubator-systemml,dusenberrymw/systemml,gweidner/systemml,gweidner/systemml,gweidner/incubator-systemml,nakul02/systemml,nakul02/systemml,nakul02/systemml,niketanpansare/incubator-systemml,apache/incubator-systemml,apache/incubator-systemml,nakul02/incubator-systemml,deroneriksson/systemml,apache/incubator-systemml,deroneriksson/incubator-systemml,gweidner/systemml,gweidner/systemml,gweidner/incubator-systemml,dusenberrymw/systemml,deroneriksson/systemml,niketanpansare/systemml,dusenberrymw/systemml,gweidner/incubator-systemml,niketanpansare/systemml | src/main/python/tests/test_nn_numpy.py | src/main/python/tests/test_nn_numpy.py | #!/usr/bin/python
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
# Assumption: pip install keras
#
# This test validates SystemML's deep learning APIs (Keras2DML, Caffe2DML and nn layer) by comparing the results with that of keras.
#
# To run:
# - Python 2: `PYSPARK_PYTHON=python2 spark-submit --master local[*] --driver-memory 10g --driver-class-path SystemML.jar,systemml-*-extra.jar test_nn_numpy.py`
# - Python 3: `PYSPARK_PYTHON=python3 spark-submit --master local[*] --driver-memory 10g --driver-class-path SystemML.jar,systemml-*-extra.jar test_nn_numpy.py`
# Make the `systemml` package importable
import os
import sys
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../")
sys.path.insert(0, path)
import unittest
import numpy as np
from keras.models import Sequential
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout,Flatten
from keras import backend as K
from keras.models import Model
from systemml.mllearn import Keras2DML
from pyspark.sql import SparkSession
batch_size = 32
input_shape = (3,64,64)
K.set_image_data_format("channels_first")
# K.set_image_dim_ordering("th")
keras_tensor = np.random.rand(batch_size,input_shape[0], input_shape[1], input_shape[2])
sysml_matrix = keras_tensor.reshape((batch_size, -1))
tmp_dir = 'tmp_dir'
spark = SparkSession.builder.getOrCreate()
def are_predictions_all_close(keras_model):
sysml_model = Keras2DML(spark, keras_model, input_shape=input_shape, weights=tmp_dir)
keras_preds = keras_model.predict(keras_tensor).flatten()
sysml_preds = sysml_model.predict_proba(sysml_matrix).flatten()
#print(str(keras_preds))
#print(str(sysml_preds))
return np.allclose(keras_preds, sysml_preds)
class TestNNLibrary(unittest.TestCase):
def test_1layer_cnn_predictions(self):
keras_model = Sequential()
keras_model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape, padding='valid'))
keras_model.add(Flatten())
keras_model.add(Dense(10, activation='softmax'))
self.failUnless(are_predictions_all_close(keras_model))
def test_multilayer_cnn_predictions(self):
keras_model = Sequential()
keras_model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape, padding='valid'))
keras_model.add(MaxPooling2D(pool_size=(2, 2)))
keras_model.add(Conv2D(64, (3, 3), activation='relu'))
keras_model.add(MaxPooling2D(pool_size=(2, 2)))
keras_model.add(Flatten())
keras_model.add(Dense(256, activation='softmax'))
keras_model.add(Dropout(0.25))
keras_model.add(Dense(10, activation='softmax'))
self.failUnless(are_predictions_all_close(keras_model))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python | |
7cb4a7290b2af1983e1e291073c0a740d9e1334e | Add some useful finder tests | harlowja/failure | failure/tests/test_finders.py | failure/tests/test_finders.py | # -*- coding: utf-8 -*-
# Copyright (C) 2016 GoDaddy Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslotest import base
class FindersTest(base.BaseTestCase):
pass
| apache-2.0 | Python | |
0f5716b10afff9ccbc17fb595cd7cc2f85b45f8f | Add name attribute to each Page in ServiceWorkerPageSet | Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,Chilledheart/chromium,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,Fireblend/chromium-crosswalk,Fireblend/chromium-crosswalk,Just-D/chromium-1,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,Just-D/chromium-1,hgl888/chromium-crosswalk,Chilledheart/chromium,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,Just-D/chromium-1,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,Just-D/chromium-1,Fireblend/chromium-crosswalk,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,axinging/chromium-crosswalk,Chilledheart/chromium,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,Fireblend/chromium-crosswalk,Chilledheart/chromium,Pluto-tv/chromium-crosswalk | tools/perf/page_sets/service_worker.py | tools/perf/page_sets/service_worker.py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page
from telemetry.page import page_set as page_set
archive_data_file_path = 'data/service_worker.json'
class ServiceWorkerPageSet(page_set.PageSet):
"""Page set of applications using ServiceWorker"""
def __init__(self):
super(ServiceWorkerPageSet, self).__init__(
archive_data_file=archive_data_file_path,
bucket=page_set.PARTNER_BUCKET)
# Why: the first application using ServiceWorker
# 1st time: registration
self.AddUserStory(page.Page(
'https://jakearchibald.github.io/trained-to-thrill/', self,
name='first_load', make_javascript_deterministic=False))
# 2st time: 1st onfetch with caching
self.AddUserStory(page.Page(
'https://jakearchibald.github.io/trained-to-thrill/', self,
name='second_load', make_javascript_deterministic=False))
# 3rd time: 2nd onfetch from cache
self.AddUserStory(page.Page(
'https://jakearchibald.github.io/trained-to-thrill/', self,
name='third_load', make_javascript_deterministic=False))
| # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page
from telemetry.page import page_set as page_set
archive_data_file_path = 'data/service_worker.json'
class ServiceWorkerPageSet(page_set.PageSet):
"""Page set of applications using ServiceWorker"""
def __init__(self):
super(ServiceWorkerPageSet, self).__init__(
archive_data_file=archive_data_file_path,
bucket=page_set.PARTNER_BUCKET)
# Why: the first application using ServiceWorker
# 1st time: registration
self.AddUserStory(page.Page(
'https://jakearchibald.github.io/trained-to-thrill/', self,
make_javascript_deterministic=False))
# 2st time: 1st onfetch with caching
self.AddUserStory(page.Page(
'https://jakearchibald.github.io/trained-to-thrill/', self,
make_javascript_deterministic=False))
# 3rd time: 2nd onfetch from cache
self.AddUserStory(page.Page(
'https://jakearchibald.github.io/trained-to-thrill/', self,
make_javascript_deterministic=False))
| bsd-3-clause | Python |
b071df5de1316807e955e9ec6b4463ec44203fdf | add libhessian.gyp | pmq20/libhessian,pmq20/libhessian | libhessian.gyp | libhessian.gyp | # Copyright (c) 2017 Minqi Pan <pmq2001@gmail.com>
#
# This file is part of libhessian, distributed under the MIT License
# For full terms see the included LICENSE file
{
'targets': [
{
'target_name': 'libhessian',
'type': 'static_library',
'sources': [
'include/hessian.h',
'include/hessian/encode.h',
'include/hessian/decode.h',
'src/date.c',
'src/int.c',
'src/long.c',
'src/string.c',
],
'include_dirs': [
'include',
],
},
],
}
| mit | Python | |
41d0716d9e8fc16df2c67bd0ec12be706d41c678 | add an examples folder, has waterfall to start | matthewperkins/abf_reader | examples/waterfall_plot.py | examples/waterfall_plot.py | from abf_reader import *
import matplotlib.pyplot as plt
from abf_epoch import epoch, waterfall
import matplotlib.gridspec as gridspec
from scale_bars import *
if __name__=='__main__':
import os
labdir = os.environ.get("LABDIR")
# change some legend plot stuff
plt.rcParams.update(\
{'legend.frameon':False,
'legend.numpoints':1,
'legend.loc':'upper left',
'legend.fontsize':12})
# organize info for files / conditions / plot labels into dict
files_dict = [{'abf':abf_reader('2012_08_24_0000_cntrl.abf'),
'color':'black',
'label':'control'},
{'abf':abf_reader('2012_08_24_0004_frf.abf'),
'color':'red',
'label':'frf'}]
# pre plot grid
plt.figure()
gs = gridspec.GridSpec(2, 1, hspace = 0, wspace = 0,
left = 0, right = 1.)
# plot in loop
xlims = []
for i, fdict in enumerate(files_dict):
plt.subplot(gs[i,0])
epch = epoch(fdict.pop('abf'), 1, 1)
epch.set_pading(left = 1000, right = 2000)
wf = waterfall(epch)
wf.set_range() # this will change the xlim
xlims.append(wf._xlim)
for i, (xs,ys) in enumerate(wf):
plt.plot(xs,ys, linewidth = 0.5, **fdict)
if i==0:
fdict.pop('label')
plt.show()
# post plotting adjust axis so grids are comparable, maybe can do
# this with some shared axis object? Also hide axis ticks, labels
# and spines.
xmin = min([x[0] for x in xlims])
xmax = max([x[1] for x in xlims])
for ax in plt.gcf().axes:
ax.set_xlim((xmin, xmax))
ax.set_ylim((-60,30))
ax.legend()
# add a scale bar
add_scalebar(ax, matchx = False, matchy = False,
sizex = 1, sizey = 30,
labelx = '1 sec', labely = '30 mV',
bbox_to_anchor = (0.3,0.4),
sep = 1, pad = 1,
bbox_transform = ax.transAxes, borderpad=0)
plt.draw()
| mit | Python | |
b2a0247746756cc86074754bc993a757d6702b12 | Add coin flip simulator (hw02) | JMill/edX-Learning-From-Data-Programming | hw02/exercise-02-01.py | hw02/exercise-02-01.py | '''
For Homework 02, Exercieses 01-02. EdX Learning From Data course.
Jonathan Miller
'''
import random
# FUNCTIONS ###########################
def runTrial(numCoins, numFlips):
def flipCoin():
if random.random() > 0.5:
return head
else:
return tail
def findv1(vList):
return vList[0]
def findvrand(vList):
return random.choice(vList)
def findvmin(vList):
vmin = 1.
for v in vList:
if v < vmin:
vmin = v
return vmin
def sequencesToRatios(flipSequences):
v1 = 0
vrand = 0
vmin = 0
vList = []
for sequence in flipSequences:
numHeads = 0
#print sequence
for flip in sequence:
if flip == head:
numHeads += 1.
vList.append( numHeads / numFlips)
#print vList
v1 = findv1(vList)
vrand = findvrand(vList)
vmin = findvmin(vList)
return v1, vrand, vmin
flipSequences = []
v1 = 0
vrand = 0
vmin = 0
for coin in range(numCoins):
coinFlipResults = ""
for flip in range(numFlips):
coinFlipResults += flipCoin()
flipSequences.append(coinFlipResults)
v1, vrand, vmin = sequencesToRatios(flipSequences)
return v1, vrand, vmin
# MAIN ###########################
numTrials = 100000
#numTrials = 1
numCoins = 1000
numFlips = 10
v1Exp = 0
vrandExp = 0
vminExp = 0
head = "H"
tail = 't'
for trial in range(numTrials):
v1Trial, vrandTrial, vminTrial = runTrial(numCoins,numFlips)
#print v1Trial, vrandTrial, vminTrial
v1Exp += v1Trial
vrandExp += vrandTrial
vminExp += vminTrial
v1Exp /= numTrials
vrandExp /= numTrials
vminExp /= numTrials
print v1Exp, vrandExp, vminExp
| apache-2.0 | Python | |
c67dc16e73eea093befaa03790bd8d6f1b452c9a | Add simple test for FormDesignerPlugin | kcsry/django-form-designer,andersinno/django-form-designer,kcsry/django-form-designer,andersinno/django-form-designer,andersinno/django-form-designer-ai,andersinno/django-form-designer-ai | form_designer/tests/test_cms_plugin.py | form_designer/tests/test_cms_plugin.py | import pytest
from cms import api
from cms.page_rendering import render_page
from django.contrib.auth.models import AnonymousUser
from django.utils.crypto import get_random_string
from form_designer.contrib.cms_plugins.form_designer_form.cms_plugins import FormDesignerPlugin
from form_designer.models import FormDefinition, FormDefinitionField
@pytest.mark.django_db
def test_cms_plugin_renders_in_cms_page(rf):
fd = FormDefinition.objects.create(
mail_to='test@example.com',
mail_subject='Someone sent you a greeting: {{ test }}'
)
field = FormDefinitionField.objects.create(
form_definition=fd,
name='test',
label=get_random_string(),
field_class='django.forms.CharField',
)
page = api.create_page("test", "page.html", "en")
ph = page.get_placeholders()[0]
api.add_plugin(ph, FormDesignerPlugin, "en", form_definition=fd)
request = rf.get("/")
request.user = AnonymousUser()
request.current_page = page
response = render_page(request, page, "fi", "test")
response.render()
content = response.content.decode("utf8")
assert field.label in content
assert "<form" in content
| bsd-3-clause | Python | |
a7220b46393bac832c9a922afaa125f0512bf53e | add handmappedtrack migration | brki/rpspot,brki/rpspot,brki/rpspot,brki/rpspot | trackmap/migrations/0003_handmappedtrack.py | trackmap/migrations/0003_handmappedtrack.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('rphistory', '0001_initial'),
('trackmap', '0002_trackavailability_score'),
]
operations = [
migrations.CreateModel(
name='HandmappedTrack',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('spotify_track_id', models.CharField(max_length=120, help_text='Spotify track id')),
('info_url', models.CharField(max_length=255, help_text='URL to resource with information about album', blank=True)),
('processed', models.BooleanField(default=False)),
('rp_song', models.OneToOneField(related_name='handmapped_track', to='rphistory.Song')),
],
),
]
| bsd-2-clause | Python | |
f908dd5fda528b4ce6ebaed082050348bf6f23a5 | Test the console scripts entry point | gbenson/i8c | i8c/tests/test_entry_point.py | i8c/tests/test_entry_point.py | from i8c.tests import TestCase
import i8c
import sys
class TestEntryPoint(TestCase):
"""Test the console scripts entry point."""
def setUp(self):
self.saved_argv = sys.argv
self.saved_stderr = sys.stderr
def tearDown(self):
sys.argv = self.saved_argv
sys.stderr = self.saved_stderr
def test_success_path(self):
"""Check the console scripts entry point success path."""
sys.argv[1:] = ["--version"]
self.assertIs(i8c.run_compiler(), None)
def test_failure_path(self):
"""Check the console scripts entry point failure path."""
sys.argv[1:] = ["-x"]
sys.stderr = sys.stdout
self.assertEqual(i8c.run_compiler(), 1)
| lgpl-2.1 | Python | |
f7b875bb3d4b313e9c1e22297918d33e67633104 | Add test trek_dtail_pdf language none | makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek | geotrek/altimetry/tests/test_models.py | geotrek/altimetry/tests/test_models.py | import os
from django.test import TestCase
from django.conf import settings
from geotrek.trekking.factories import TrekFactory
from geotrek.trekking.models import Trek
class AltimetryMixinTest(TestCase):
def test_get_elevation_chart_none(self):
trek = TrekFactory.create(no_path=True)
trek.get_elevation_chart_path()
basefolder = os.path.join(settings.MEDIA_ROOT, 'profiles')
print(Trek._meta.model_name, trek.pk, 'en')
self.assertTrue(os.path.exists(os.path.join(basefolder,
'%s-%s-%s.png' % (Trek._meta.model_name, '1', 'en'))))
| bsd-2-clause | Python | |
4a96826205a628d9b3215418841a9ad552d8f2c2 | Add Climbing the leaderboard solution in Python | julianespinel/training,julianespinel/training,julianespinel/training,julianespinel/training | hackerrank/climbing_the_leaderboard.py | hackerrank/climbing_the_leaderboard.py | '''
https://www.hackerrank.com/challenges/climbing-the-leaderboard
# Climbing the leaderboard
The function that solves the problem is:
```python
get_positions_per_score(ranks, scores)
```
The solution uses `deque` instead of lists. Why?
To have O(1) in appends and pops from either side of the deque.
See: https://docs.python.org/3/library/collections.html#collections.deque
The complexity of this solution is:
- Time: O(ranks) + O(scores) = O(max(ranks,scores))
- Space: O(ranks) + O(scores) = O(max(ranks,scores))
'''
import sys
from collections import deque
def read_values() -> list[int]:
sys.stdin.readline() # skip
ranks_str = sys.stdin.readline()
strings = ranks_str.split(' ')
return list(map(int, strings))
def remove_duplicates(ranks: list[int]) -> list[int]:
'''
Given a list sorted in descending order (ranks),
remove the duplicates of the list.
Time complexity: O(n)
Why? See: https://stackoverflow.com/a/7961390/2420718
'''
return list(dict.fromkeys(ranks))
def move_to_next_rank(index: int, ranks: deque[int], score: int) -> tuple[int, deque[int]]:
'''
Remove the elements from the ranks that are < than the given score.
When an element is removed the index is decreased.
Return the tuple (index, ranks)
'''
if not ranks:
return index, ranks
rank = ranks.pop() # O(1)
index -= 1
while (ranks and score > rank): # worst case: O(ranks)
rank = ranks.pop() # O(1)
index -= 1
# should put back the element that ended the while
if not (score > rank):
ranks.append(rank) # O(1)
index += 1
return index, ranks
def get_positions_per_score(ranks: deque[int], scores: deque[int]) -> deque[int]:
'''
Return the position of each score in the ranks.
Time complexity: O(scores) + O(ranks)
'''
positions = deque() # why a deque? so all appends are O(1)
index = len(ranks) - 1 # O(1)
for score in scores: # O(scores)
# O(ranks) in the worst case, however we guarantee that we
# traverse ranks only once. Therefore, this is not a nested loop
index, ranks = move_to_next_rank(index, ranks, score)
if not ranks:
positions.append(0) # O(1)
continue
rank = ranks[-1] # get last element, # O(1)
if score < rank:
positions.append(index + 1) # O(1)
elif score == rank:
positions.append(index) # O(1)
return positions
def print_positions(positions: deque[int]) -> None:
'''
Print the given positions.
Add +1 to each element because the response must be
one-based index instead of zero-based index.
'''
for position in positions:
print(position + 1)
if __name__ == '__main__':
ranks_input = read_values()
scores_input = read_values()
ranks_list = remove_duplicates(ranks_input) # O(ranks)
ranks = deque(ranks_list) # O(ranks)
scores = deque(scores_input) # O(scores)
positions = get_positions_per_score(ranks, scores) # O(ranks) + O(scores)
print_positions(positions) # O(positions)
| mit | Python | |
05471fc9d02335915d3697f92189f33c2e557624 | add missing coordinate_space.py file | google/neuroglancer,janelia-flyem/neuroglancer,janelia-flyem/neuroglancer,janelia-flyem/neuroglancer,google/neuroglancer,google/neuroglancer,google/neuroglancer,google/neuroglancer,google/neuroglancer,janelia-flyem/neuroglancer,google/neuroglancer,google/neuroglancer,janelia-flyem/neuroglancer | python/neuroglancer/coordinate_space.py | python/neuroglancer/coordinate_space.py | # coding=utf-8
# @license
# Copyright 2019-2020 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for representing a Neuroglancer coordinate space."""
import collections
import numpy as np
__all__ = []
def export(obj):
__all__.append(obj.__name__)
return obj
si_prefixes = {
'Y': 24,
'Z': 21,
'E': 18,
'P': 15,
'T': 12,
'G': 9,
'M': 6,
'k': 3,
'h': 2,
'': 0,
'c': -2,
'm': -3,
'u': -6,
'ยต': -6,
'n': -9,
'p': -12,
'f': -15,
'a': -18,
'z': -21,
'y': -24,
}
si_units = ['m', 's', 'rad/s', 'Hz']
si_units_with_prefixes = {
'%s%s' % (prefix, unit): (unit, exponent)
for (prefix, exponent) in si_prefixes.items() for unit in si_units
}
si_units_with_prefixes[''] = ('', 0)
def parse_unit(scale, unit):
unit, exponent = si_units_with_prefixes[unit]
if exponent >= 0:
return (scale * 10**exponent, unit)
else:
return (scale / 10**(-exponent), unit)
@export
class DimensionScale(collections.namedtuple('DimensionScale', ['scale', 'unit'])):
__slots__ = ()
def __new__(cls, scale=1, unit=''):
return super(DimensionScale, cls).__new__(cls, scale, unit)
@export
class CoordinateSpace(object):
__slots__ = ('names', 'scales', 'units')
def __init__(self, json=None, names=None, scales=None, units=None):
if json is None:
if names is not None:
self.names = tuple(names)
scales = np.array(scales, dtype=np.float64)
if isinstance(units, str):
units = tuple(units for _ in names)
scales_and_units = tuple(
parse_unit(scale, unit) for scale, unit in zip(scales, units))
scales = np.array([s[0] for s in scales_and_units], dtype=np.float64)
units = tuple(s[1] for s in scales_and_units)
self.units = units
self.scales = scales
else:
self.names = ()
self.scales = np.zeros(0, dtype=np.float64)
self.units = ()
else:
if not isinstance(json, dict): raise TypeError
self.names = tuple(json.keys())
self.scales = np.array([json[k][0] for k in self.names], dtype=np.float64)
self.units = tuple(json[k][1] for k in self.names)
self.scales.setflags(write=False)
@property
def rank(self):
return len(self.names)
def __getitem__(self, i):
if isinstance(i, str):
idx = self.names.index(i)
return DimensionScale(scale=self.scales[idx], unit=self.units[idx])
if isinstance(i, slice):
idxs = range(self.rank)[i]
return [DimensionScale(scale=self.scales[j], unit=self.units[j]) for j in idxs]
return DimensionScale(scale=self.scales[i], unit=self.units[i])
def __repr__(self):
return 'CoordinateSpace(%r)' % (self.to_json(), )
def to_json(self):
d = collections.OrderedDict()
for name, scale, unit in zip(self.names, self.scales, self.units):
d[name] = [scale, unit]
return d
| apache-2.0 | Python | |
cea956d06e63e2f4c63a35e72a3eaf4861394671 | Create RotateArray.py | lingcheng99/LeetCode | RotateArray.py | RotateArray.py | """
Rotate Array
Rotate an array of n elements to the right by k steps.
For example, with n = 7 and k = 3, the array [1,2,3,4,5,6,7] is rotated to [5,6,7,1,2,3,4].
Note:
Try to come up as many solutions as you can, there are at least 3 different ways to solve this problem.
"""
#First solution with slicing
class Solution(object):
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
if len(nums)==1 or k==0: return
k%=len(nums)
temp=nums+nums[:len(nums)-k]
nums[:]=temp[len(nums)-k:]
#Second solution, another kind of slicing
#Also slicing, shorter code
class Solution(object):
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
if len(nums)==1 or k==0:
return
k%=len(nums)
nums[:] = nums[-k:] + nums[:-k]
#Third solution, with insert/pop
class Solution(object):
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
if len(nums)==1 or k==0:
return
k%=len(nums)
for i in range(k):
nums.insert(0,nums.pop())
| mit | Python | |
b3dc26a391fac6de26140c0ff6590a85820a8091 | Update hyperparameters for sklearn model | lilleswing/deepchem,ktaneishi/deepchem,ktaneishi/deepchem,rbharath/deepchem,rbharath/deepchem,deepchem/deepchem,Agent007/deepchem,lilleswing/deepchem,miaecle/deepchem,Agent007/deepchem,lilleswing/deepchem,joegomes/deepchem,ktaneishi/deepchem,miaecle/deepchem,miaecle/deepchem,Agent007/deepchem,deepchem/deepchem,joegomes/deepchem,peastman/deepchem,peastman/deepchem | examples/gdb7/gdb7_sklearn.py | examples/gdb7/gdb7_sklearn.py | """
Script that trains Sklearn singletask models on GDB7 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import deepchem as dc
import numpy as np
import shutil
from sklearn.kernel_ridge import KernelRidge
np.random.seed(123)
base_dir = "/tmp/gdb7_sklearn"
data_dir = os.path.join(base_dir, "dataset")
model_dir = os.path.join(base_dir, "model")
train_dir = os.path.join(base_dir, "train")
test_dir = os.path.join(base_dir, "test")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
max_num_atoms = 23
featurizers = dc.feat.CoulombMatrixEig(max_num_atoms)
input_file = "gdb7.sdf"
tasks = ["u0_atom"]
smiles_field = "smiles"
mol_field = "mol"
featurizer = dc.data.SDFLoader(tasks, smiles_field=smiles_field, mol_field=mol_field, featurizer=featurizers)
dataset = featurizer.featurize(input_file, data_dir)
random_splitter = dc.splits.RandomSplitter()
train_dataset, test_dataset = random_splitter.train_test_split(dataset, train_dir, test_dir)
transformers = [dc.trans.NormalizationTransformer(transform_X=True, dataset=train_dataset), dc.trans.NormalizationTransformer(transform_y=True, dataset=train_dataset)]
#transformers = [dc.trans.NormalizationTransformer(transform_y=True, dataset=train_dataset)]
for transformer in transformers:
train_dataset = transformer.transform(train_dataset)
for transformer in transformers:
test_dataset = transformer.transform(test_dataset)
regression_metric = dc.metrics.Metric(dc.metrics.mean_absolute_error, mode="regression")
def model_builder(model_dir):
sklearn_model = KernelRidge(
kernel="laplacian", alpha=0.05, gamma=0.1)
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tasks, model_builder, model_dir)
# Fit trained model
model.fit(train_dataset)
model.save()
train_evaluator = dc.utils.evaluate.Evaluator(model, train_dataset, transformers)
train_scores = train_evaluator.compute_model_performance([regression_metric])
print("Train scores [kcal/mol]")
print(train_scores)
test_evaluator = dc.utils.evaluate.Evaluator(model, test_dataset, transformers)
test_scores = test_evaluator.compute_model_performance([regression_metric])
print("Validation scores [kcal/mol]")
print(test_scores)
| """
Script that trains Sklearn singletask models on GDB7 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import deepchem as dc
import numpy as np
import shutil
from sklearn.kernel_ridge import KernelRidge
np.random.seed(123)
base_dir = "/tmp/gdb7_sklearn"
data_dir = os.path.join(base_dir, "dataset")
model_dir = os.path.join(base_dir, "model")
train_dir = os.path.join(base_dir, "train")
test_dir = os.path.join(base_dir, "test")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
max_num_atoms = 23
featurizers = dc.feat.CoulombMatrixEig(max_num_atoms)
input_file = "gdb7.sdf"
tasks = ["u0_atom"]
smiles_field = "smiles"
mol_field = "mol"
featurizer = dc.data.SDFLoader(tasks, smiles_field=smiles_field, mol_field=mol_field, featurizer=featurizers)
dataset = featurizer.featurize(input_file, data_dir)
random_splitter = dc.splits.RandomSplitter()
train_dataset, test_dataset = random_splitter.train_test_split(dataset, train_dir, test_dir)
transformers = [dc.trans.NormalizationTransformer(transform_X=True, dataset=train_dataset), dc.trans.NormalizationTransformer(transform_y=True, dataset=train_dataset)]
for transformer in transformers:
train_dataset = transformer.transform(train_dataset)
for transformer in transformers:
test_dataset = transformer.transform(test_dataset)
regression_metric = dc.metrics.Metric(dc.metrics.mean_absolute_error, mode="regression")
def model_builder(model_dir):
sklearn_model = KernelRidge(
kernel="laplacian", alpha=0.0001, gamma=0.0001)
return dc.models.SklearnModel(sklearn_model, model_dir)
model = dc.models.SingletaskToMultitask(tasks, model_builder, model_dir)
# Fit trained model
model.fit(train_dataset)
model.save()
train_evaluator = dc.utils.evaluate.Evaluator(model, train_dataset, transformers)
train_scores = train_evaluator.compute_model_performance([regression_metric])
print("Train scores [kcal/mol]")
print(train_scores)
test_evaluator = dc.utils.evaluate.Evaluator(model, test_dataset, transformers)
test_scores = test_evaluator.compute_model_performance([regression_metric])
print("Validation scores [kcal/mol]")
print(test_scores)
| mit | Python |
8a0d40f9874119084f5f7a1471cb565bb85d6938 | Add match to wd script. | lawlesst/c4l16-idhub | match_to_wd.py | match_to_wd.py | import csv
import pickle
import sys
def load_index():
with open('data/wd_issn.pkl') as inf:
data = pickle.load(inf)
return data
wd_idx = load_index()
def match(issn, eissn):
for isn in [issn, eissn]:
if issn != "":
wd = wd_idx.get(issn)
if wd is not None:
return wd
def main():
"""
Check the WD index for each row.
"""
matches = 0
with open(sys.argv[2], 'wb') as csvfile:
fields = ['wosid', 'title', 'issn', 'eissn', 'wikidata']
jwriter = csv.DictWriter(csvfile, fieldnames=fields)
jwriter.writeheader()
with open(sys.argv[1]) as infile:
for n, row in enumerate(csv.DictReader(infile)):
issn = row.get('issn')
eissn = row.get('eissn')
wd = match(issn, eissn)
row['wikidata'] = wd
jwriter.writerow(row)
if wd is not None:
matches += 1
print
print '-' * 25
print "Total journals", n + 1
print "Wikidata matches", matches
print "Matches ", round(matches / float(n) * 100, 2), "%"
print
if __name__ == "__main__":
main() | mit | Python | |
b6d829177391f59d32614c54670fa993b93a64ee | add simple menu for first iteration of program | marshki/pyWipe,marshki/pyWipe | menu_simple.py | menu_simple.py | #!/usr/bin/env
def menu():
"""Menu prompt for user to select program option"""
while True:
print '1. Overwrite all sectors with zeros (Faster, less secure).'
print '2. Overwrite all sectors with random data (Slower, more secure).'
print '3. I want to quit.'
print()
choice = input('Select an option (1, 2 or 3): ')
if choice in ('1','2','3'):
return choice
| mit | Python | |
7716818beb0dba581cd3536e321676d756e282d9 | Remove the lms_comerce_api_url field from partners object | edx/course-discovery,edx/course-discovery,edx/course-discovery,edx/course-discovery | course_discovery/apps/core/migrations/0011_remove_partner_lms_commerce_api_url.py | course_discovery/apps/core/migrations/0011_remove_partner_lms_commerce_api_url.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-04-12 17:31
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0010_partner_lms_coursemode_api_url'),
]
operations = [
migrations.RemoveField(
model_name='partner',
name='lms_commerce_api_url',
),
]
| agpl-3.0 | Python | |
e95daed610d840fe2230c3ca515dea1b0a6f4f27 | Add CN client with simple async wrappers for two APIs | DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python | gmn/src/d1_gmn/app/management/commands/async_client.py | gmn/src/d1_gmn/app/management/commands/async_client.py | # -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aiohttp
import asyncio
import ssl
import d1_common.url
class AsyncCoordinatingNodeClient_2_0:
def __init__(self, base_url, timeout_sec=None, cert_pub_path=None, cert_key_path=None, disable_server_side_cert_validation=False):
"""
Args:
base_url:
timeout_sec:
cert_pub_path:
cert_key_path:
disable_server_side_cert_validation:
"""
self._base_url = base_url
if cert_pub_path and cert_key_path:
# ssl_ctx = ssl.create_default_context(cafile=cert_pub_path)
ssl_ctx = ssl._create_unverified_context()
ssl_ctx.load_cert_chain(cert_pub_path, cert_key_path)
# client_side_cert_connector = aiohttp.TCPConnector(ssl=False)
client_side_cert_connector = aiohttp.TCPConnector(ssl_context=ssl_ctx)
else:
client_side_cert_connector = None
self._session = aiohttp.ClientSession(
connector=client_side_cert_connector,
timeout=aiohttp.ClientTimeout(total=timeout_sec),
)
async def close(self):
await self._session.close()
async def describe(self, pid):
"""Get headers describing an object
Args:
pid:
"""
async with self._session.head(
self._prep_url(["v2", "object", pid])
) as response:
# print('describe_headers={}'.format(response.headers))
# print('describe_body={}'.format(await response.text()))
return response.status
async def synchronize(self, pid):
"""Send an object synchronization request to the CN
Args:
pid:
"""
async with session.post(
self._prep_url(["v2", "synchronize"]), data={"pid": pid}
) as response:
# print('synchronize_headers={}'.format(response.headers))
# print('synchronize_body={}'.format(await response.text()))
return response.status
def _prep_url(self, rest_path_list):
"""
Args:
rest_path_list:
"""
if isinstance(rest_path_list, str):
rest_path_list = [rest_path_list]
prepped_url = d1_common.url.joinPathElements(
# self._base_url, *self._encode_path_elements(rest_path_list)
self._base_url, *rest_path_list
)
# print('prepped_url="{}"'.format(prepped_url))
return prepped_url
def _encode_path_elements(self, path_element_list):
"""
Args:
path_element_list:
"""
return [
d1_common.url.encodePathElement(v)
if isinstance(v, (int, str))
else d1_common.url.encodePathElement(v.value())
for v in path_element_list
]
| apache-2.0 | Python | |
bb1aafd72899d35bfcad5a84373281f732ad01ab | Add integration test for minerva-dump script | hendrikx-itc/minerva,hendrikx-itc/minerva | integration_tests/test_minerva_dump.py | integration_tests/test_minerva_dump.py | from contextlib import closing
import subprocess
import unittest
from nose.tools import eq_
from minerva.test import connect
class MinervaDump(unittest.TestCase):
"""
Use standard Python unittest TestCase here because of the assertMultiLineEqual
function.
"""
def test_run(self):
self.maxDiff = None
with closing(connect()) as conn:
with closing(conn.cursor()) as cursor:
cursor.execute("DELETE FROM trend.trendstore")
cursor.execute("DELETE FROM attribute_directory.attributestore")
cursor.execute(
"SELECT trend.create_trendstore("
" 'test-datasource',"
" 'test-entitytype',"
" '900',"
" ARRAY["
" ('x', 'integer', 'test trend'),"
" ('y', 'double precision', 'another test trend')"
" ]::trend.trend_descr[]"
")")
cursor.execute(
"SELECT attribute_directory.create_attributestore("
" 'test-datasource',"
" 'test-entitytype',"
" ARRAY["
" ('height', 'double precision', 'fictive attribute'),"
" ('power', 'integer', 'another fictive attribute')"
" ]::attribute_directory.attribute_descr[]"
")"
)
conn.commit()
process = subprocess.Popen(['minerva-dump'], stdout=subprocess.PIPE)
out, err = process.communicate()
self.assertMultiLineEqual(out, """\
SELECT trend.create_trendstore(
'test-datasource',
'test-entitytype',
'900',
ARRAY[
('x', 'integer', ''),
('y', 'double precision', '')
]::trend.trend_descr[]
);
SELECT attribute_directory.create_attributestore(
'test-datasource',
'test-entitytype',
ARRAY[
('height', 'double precision', 'fictive attribute'),
('power', 'integer', 'another fictive attribute')
]::attribute_directory.attribute_descr[]
);
""")
| agpl-3.0 | Python | |
5c6b0c5e070ee9c67fa00bb3bc17c0b5dfec0bd0 | Create get_started_with_syntax.py | Soyofuki/python-playyard | get_started_with_syntax.py | get_started_with_syntax.py | # ๆฌๆ็ฎ็๏ผๆป่ง Python ็ไธป่ฆ่ฏญๆณ๏ผๅนถไพๆฅๅๅคๅฟใ
# ้
่ฏปๆๅ๏ผๆฌๆไธๆฏๆๆกฃใ้่ฟ้่ฏปๆฌๆ๏ผๅฏไปฅไบ่งฃ Python ๅบๆฌ็่ฏญๆณๅไธไบๅธธ็จ็ๆนๆณใ่ฏป่
ๅฏไปฅๅฐๅ
ถไธๅ
ถไป่ฏญ่จๆฏ่พ๏ผ็่งฃๅ
ถๆๅพไธๅซไนใ
# ็ฎๆ ่ฏป่
๏ผๅ
ทๆ็จๅบ่ฎพ่ฎกๅบ็ก็ฅ่ฏ๏ผ็่งฃ้ขๅๅฏน่ฑก็จๅบ่ฎพ่ฎกๅบๆฌๆฆๅฟต็็จๅบๅ
# ๆจกๅๅฏผๅ
ฅ
from math import pi, sqrt # ๅๆถๅฏผๅ
ฅๅคไธชๆจกๅ
from re import match as re_match # ้ๅฝๅ
# ็ฑป็ๅฎไนไธๅณๆ
class Animal:
__character = "Positive" # ไธไผ็ป่ฟ "from module_name import *" ๅฏผๅ
ฅ
_gender = "Unknown" # ไป
่กจ็คบๅปบ่ฎฎไธ่ฎฟ้ฎ
"""
ๆๆกฃๆณจ้๏ผๅฐๅจ่ฟ่กๆถ่พๅบ
"""
def __init__(self, name, age):
self.name = name
self.age = age
def shout(self): # ๅฎไน็ฑป็ๆนๆณ
print("Sound")
def live(self):
print("Alive!")
def train(behavior): # ไฟฎ้ฅฐๅจ
def wrap():
behavior()
return wrap
@property # ๅฑๆง๏ผๅธธ็จไบๅฎ็ฐๅช่ฏป็นๆง
def gender(self):
return self._gender
@gender.setter # ๅฎ็ฐๅฑๆง็่ตๅผ
def gender(self, value):
if value:
self._gender = value
else:
print("Gender not set")
class Wolf(Animal): # ็ปงๆฟไบ Animal ็ฑป
def shout(self, default = "Woof!", *sounds, **tones):
print(default)
if len(sounds) > 0:
print(sounds[0])
print(tones) # {'key':value,}
def bite(self):
print("Bite!")
class Dog(Wolf):
def shout(self):
print("Woo")
def bite(self):
print("Bite?")
super().shout()
def __add__(self, other): # ๆนๅ + ่ฟ็ฎ็ฌฆใๅ
ถไป่ฟ็ฎ็ฌฆไนๅฏไปฅ็ฑปไผผๅฝขๅผๆนๅ
return Dog(self.name + " and " + other.name, self.age + other.age)
def __specialBehavior(self): # ่ฏฅๆนๆณไธไผ็ป่ฟ "from module_name import *" ๅฏผๅ
ฅ
print("Read books!")
@classmethod # ๅฎไน็ฑปๆนๆณ
def find_a_puppy(cls, name):
return cls(name, 0)
@staticmethod # ๅฎไน้ๆๆนๆณ
def find(times)
print((self.name + "!") * 3)
@train
def run():
print("train running")
# ่ฎฟ้ฎ็ฑป
puppy = Dog("Puppy", 5)
print(puppy.age) # 5
puppy.live() # Alive!
puppy.shout() # Woo
puppy.bite() # Bite? Woof!
print(_Animal__character) # Positive
# ๆฐ็ป็ธๅ
ณ
def add_one(x):
return x + 1
nums = [1,2,3]
resultMap = list(map(add_one,nums)) # [2,3,4]
resultFilter = list(filter(lambda x:x%2==0,nums)) # [1,3]
nums[0:2:1] # [1,2,3]
nums[1:-1] # [2,3]
first = nums[0]
print(list(range(3))) # [0,1,2]
# Generator ไธๅพช็ฏ
def infinitive_loop():
i = 1
while True:
yield i
if True:
i += 1
elif False:
i = 0
else
i = 1
for i in infinitive_loop():
print(i) # 1 2 3...
if False:
break
else:
print("finished without break")
# ้ๅ
num_set = {1,2,3}
print(3 in num_set) # True
# ๅญๅ
ธ
age_dictionary = {"Alice":17, "Bob":18}
print(age_dictionary["Alice"]
# ๅคๅ
็ป
immutable_tuple = ("A","B") # tuple ๅง็ปๆ ๆณ่ขซๆดๆน
simple_tuple = 1,2,3
a,b = immutable_tuple
a, *b, c = [1,2,3,4] # a ไธบ 1, b ไธบ [2,3]๏ผc ไธบ 4
b = 5 if a == 2 else 3 # b ไธบ 3
# ๅญ็ฌฆไธฒๆ ผๅผ
stringFormat = "String: {0} {1} {x}".format("A","B",x="C")
# ๅผๅธธๅค็
try:
print(1/0)
assert (2 + 2 == 6), "wrong answer" # AssertionError:wrong answer
with open("file.txt") as f:
print(f.read())
file = open(file.txt)
print("A" * 3) # AAA
n = int("9")
except ZeroDivisionError:
print("Devided by zero"
except (ValueError, TypeError):
print("Error")
finally:
file.close()
print("Finally")
raise
input("Enter a number: ")
if __name__=="__main__":
print("Won't be printed if it is imported")
| mit | Python | |
186b231b7149b52dc95837aabd5f44b1c02c8e41 | Add PyQtGraph random walk without datetime | scls19fr/numpy-buffer | samples/sample_pyqtgraph_no_datetime.py | samples/sample_pyqtgraph_no_datetime.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This example demonstrates a random walk with pyqtgraph.
"""
import sys
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
from numpy_buffer import RingBuffer # https://github.com/scls19fr/numpy-buffer
class RandomWalkPlot:
def __init__(self, win):
#self.plot = pg.plot()
self.plot = win.addPlot(title="Updating plot")
self.ptr = 0
#pen = 'r'
pen = pg.mkPen('b', style=QtCore.Qt.SolidLine)
self.curve = self.plot.plot(pen=pen, symbol='+')
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update)
self.timer.start(50)
self.value = 1000 # initial value
N = 100 # number of elements into circular buffer
self.data_y = RingBuffer(N, self.value)
def update(self):
self.value += np.random.uniform(-1, 1)
self.data_y.append(self.value)
self.curve.setData(y=self.data_y) # size is increasing up to N
#self.curve.setData(y=self.data_y.all[::-1]) # size is always N
#if self.ptr == 0:
# self.plot.enableAutoRange('xy', False) ## stop auto-scaling after the first data set is plotted
#self.ptr += 1
def main():
#QtGui.QApplication.setGraphicsSystem('raster')
app = QtGui.QApplication(sys.argv)
#mw = QtGui.QMainWindow()
#mw.resize(800,800)
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
win = pg.GraphicsWindow(title="Basic plotting examples")
win.resize(1000, 600)
win.setWindowTitle('plot')
# Enable antialiasing for prettier plots
pg.setConfigOptions(antialias=True)
upl = RandomWalkPlot(win)
## Start Qt event loop unless running in interactive mode or using pyside.
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
if __name__ == '__main__':
main()
| bsd-3-clause | Python | |
fc5714951bac61f17509eacf8ec2413e14a79ddc | Add a snomask for OPER attempts | ElementalAlchemist/txircd,Heufneutje/txircd | txircd/modules/core/sno_oper.py | txircd/modules/core/sno_oper.py | from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class SnoOper(ModuleData):
implements(IPlugin, IModuleData)
name = "ServerNoticeOper"
core = True
def hookIRCd(self, ircd):
self.ircd = ircd
def actions(self):
return [ ("operreport", 1, self.sendOperNotice),
("servernoticetype", 1, self.checkSnoType) ]
def sendOperNotice(self, user, reason):
if reason:
message = "Failed OPER attempt from {} ({}).".format(user.nick, reason)
else:
message = "{} has opered.".format(user.nick)
snodata = {
"mask": "oper",
"message": message
}
self.ircd.runActionProcessing("sendservernotice", snodata)
def checkSnoType(self, user, typename):
return typename == "oper"
snoOper = SnoOper() | bsd-3-clause | Python | |
e02a633ef268a58a0054c0f9ab1a03dacdb3919f | Add preferences converter | senttech/Cura,Curahelper/Cura,hmflash/Cura,ynotstartups/Wanhao,totalretribution/Cura,fieldOfView/Cura,fieldOfView/Cura,totalretribution/Cura,hmflash/Cura,senttech/Cura,ynotstartups/Wanhao,Curahelper/Cura | plugins/VersionUpgrade/VersionUpgrade21to22/Preferences.py | plugins/VersionUpgrade/VersionUpgrade21to22/Preferences.py | # Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
import configparser #To read config files.
import io #To output config files to string.
import UM.VersionUpgrade #To indicate that a file is of the wrong format.
## Creates a new preferences instance by parsing a serialised preferences file
# in version 1 of the file format.
#
# \param serialised The serialised form of a preferences file in version 1.
# \return A representation of those preferences, or None if the file format is
# incorrect.
def importFrom(serialised):
try:
return Preferences(serialised)
except (configparser.Error, UM.VersionUpgrade.FormatException, UM.VersionUpgrade.InvalidVersionException):
return None
## A representation of preferences files as intermediary form for conversion
# from one format to the other.
class Preferences:
## Reads version 2 of the preferences file format, storing it in memory.
#
# \param serialised A serialised version 2 preferences file.
def __init__(self, serialised):
self._config = configparser.ConfigParser(interpolation = None)
self._config.read_string(serialised)
#Checking file correctness.
if not self._config.has_section("general"):
raise UM.VersionUpgrade.FormatException("No \"general\" section.")
if not self._config.has_option("general", "version"):
raise UM.VersionUpgrade.FormatException("No \"version\" in \"general\" section.")
if int(self._config.get("general", "version")) != 2: # Explicitly hard-code version 2, since if this number changes the programmer MUST change this entire function.
raise UM.VersionUpgrade.InvalidVersionException("The version of this preferences file is wrong. It must be 2.")
## Serialises these preferences as a preferences file of version 3.
#
# This is where the actual translation happens.
#
# \return A serialised version of a preferences file in version 3.
def export(self):
#Reset the cura/categories_expanded property since it works differently now.
if self._config.has_section("cura") and self._config.has_option("cura", "categories_expanded"):
self._config.remove_option("cura", "categories_expanded")
#Translate the setting names in the visible settings.
if self._config.has_section("machines") and self._config.has_option("machines", "setting_visibility"):
visible_settings = self._config.get("machines", "setting_visibility")
visible_settings = visible_settings.split(",")
import VersionUpgrade21to22 #Import here to prevent a circular dependency.
visible_settings = VersionUpgrade21to22.translateSettingNames(visible_settings)
visible_settings = visible_settings.join(",")
self._config.set("machines", "setting_visibility", value = visible_settings)
#Output the result as a string.
output = io.StringIO()
self._config.write(output)
return output.getvalue() | agpl-3.0 | Python | |
5dde2f07399e09fd9e15467b5df78447ae6b2404 | Create add.py | WebClub-NITK/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17,WebClub-NITK/Hacktoberfest-2k17 | add2numbers/add.py | add2numbers/add.py | # Python Program - Add Two Numbers
while True:
print("Enter '0' for exit.")
print("Enter two numbers: ")
val1 = int(input())
val2 = int(input())
if val1 == 0:
break
else:
sum = val1 + val2
print("Sum of the given two number:",sum,"\n")
| mit | Python | |
af43e5009e6f21e75ee7244d13a2a80e88bf5288 | add add_grid_config.py | tahoe-lafs/perf-tests,tahoe-lafs/perf-tests,tahoe-lafs/perf-tests | add_grid_config.py | add_grid_config.py | from gcloud import datastore
key = datastore.Key("GridConfig")
c = datastore.Entity(key)
c.update({
"num_server_instances": 3,
"server_instance_types": ["n1-standard-1"]*3,
"num_servers": 6,
"server_versions": ["1.10.0"]*6,
"server_latencies": [0]*6,
"client_instance_type": "n1-standard-1",
"client_version": "1.10.0",
})
datastore.put(c)
print "configid:", c.key.id
| mit | Python | |
9df2c8ad6208d89fc1865b36a94115731492f902 | test code for resize_to_yolo | Swall0w/clib | tests/converts/test_format_image_size.py | tests/converts/test_format_image_size.py | import unittest
from clib.converts import resize_to_yolo
from skimage import data
class ResizeYoloTest(unittest.TestCase):
def setUp(self):
self.grayimg = data.coins()
self.rgbimg = data.astronaut()
def test_resize_to_yolo(self):
self.assertEqual(resize_to_yolo(self.rgbimg).shape,
(448, 448, 3))
# with self.assertRaises(ValueError ) as cm:
# resize_to_yolo(self.grayimg)
# exception = cm.exception
# self.assertEqual(exception.message, '')
| mit | Python | |
8f3c5dc924b0a8ad35d99d66eb809a51b49fc178 | Add a management command to rename a stream. | eeshangarg/zulip,dhcrzf/zulip,wavelets/zulip,hengqujushi/zulip,praveenaki/zulip,armooo/zulip,JanzTam/zulip,glovebx/zulip,amanharitsh123/zulip,dxq-git/zulip,xuxiao/zulip,stamhe/zulip,pradiptad/zulip,akuseru/zulip,bitemyapp/zulip,JPJPJPOPOP/zulip,zorojean/zulip,kokoar/zulip,zwily/zulip,LAndreas/zulip,christi3k/zulip,natanovia/zulip,dwrpayne/zulip,ashwinirudrappa/zulip,dxq-git/zulip,lfranchi/zulip,susansls/zulip,dattatreya303/zulip,ryanbackman/zulip,codeKonami/zulip,ufosky-server/zulip,eastlhu/zulip,nicholasbs/zulip,shaunstanislaus/zulip,zhaoweigg/zulip,vikas-parashar/zulip,Vallher/zulip,andersk/zulip,pradiptad/zulip,peiwei/zulip,dotcool/zulip,johnny9/zulip,jackrzhang/zulip,udxxabp/zulip,saitodisse/zulip,ipernet/zulip,jonesgithub/zulip,xuanhan863/zulip,zorojean/zulip,easyfmxu/zulip,arpith/zulip,Frouk/zulip,Diptanshu8/zulip,saitodisse/zulip,sonali0901/zulip,dnmfarrell/zulip,themass/zulip,dawran6/zulip,RobotCaleb/zulip,deer-hope/zulip,ikasumiwt/zulip,DazWorrall/zulip,suxinde2009/zulip,fw1121/zulip,DazWorrall/zulip,cosmicAsymmetry/zulip,vikas-parashar/zulip,ufosky-server/zulip,sup95/zulip,akuseru/zulip,gigawhitlocks/zulip,shubhamdhama/zulip,zulip/zulip,levixie/zulip,noroot/zulip,aliceriot/zulip,aps-sids/zulip,Jianchun1/zulip,paxapy/zulip,dwrpayne/zulip,guiquanz/zulip,brainwane/zulip,souravbadami/zulip,udxxabp/zulip,ipernet/zulip,MayB/zulip,brainwane/zulip,RobotCaleb/zulip,littledogboy/zulip,kokoar/zulip,themass/zulip,Diptanshu8/zulip,samatdav/zulip,shaunstanislaus/zulip,blaze225/zulip,m1ssou/zulip,ryansnowboarder/zulip,sharmaeklavya2/zulip,Gabriel0402/zulip,wangdeshui/zulip,KJin99/zulip,nicholasbs/zulip,gkotian/zulip,isht3/zulip,EasonYi/zulip,ryansnowboarder/zulip,sonali0901/zulip,mansilladev/zulip,gigawhitlocks/zulip,pradiptad/zulip,j831/zulip,MariaFaBella85/zulip,MayB/zulip,developerfm/zulip,wangdeshui/zulip,souravbadami/zulip,MariaFaBella85/zulip,blaze225/zulip,shubhamdhama/zulip,aps-sids/zulip,MayB/zulip,synicalsyntax/zulip,dawran6/zulip,ryansnowboarder/zulip,gkotian/zulip,bitemyapp/zulip,hj3938/zulip,so0k/zulip,zacps/zulip,esander91/zulip,luyifan/zulip,littledogboy/zulip,ashwinirudrappa/zulip,JPJPJPOPOP/zulip,niftynei/zulip,huangkebo/zulip,adnanh/zulip,vakila/zulip,xuanhan863/zulip,zorojean/zulip,zachallaun/zulip,gkotian/zulip,seapasulli/zulip,Gabriel0402/zulip,shrikrishnaholla/zulip,joshisa/zulip,Vallher/zulip,PaulPetring/zulip,DazWorrall/zulip,johnny9/zulip,aps-sids/zulip,jessedhillon/zulip,JPJPJPOPOP/zulip,sup95/zulip,blaze225/zulip,fw1121/zulip,sonali0901/zulip,arpith/zulip,swinghu/zulip,TigorC/zulip,KingxBanana/zulip,Suninus/zulip,EasonYi/zulip,reyha/zulip,itnihao/zulip,wavelets/zulip,mansilladev/zulip,armooo/zulip,hengqujushi/zulip,Galexrt/zulip,ryanbackman/zulip,zulip/zulip,jrowan/zulip,jphilipsen05/zulip,shrikrishnaholla/zulip,udxxabp/zulip,ApsOps/zulip,joyhchen/zulip,zofuthan/zulip,hj3938/zulip,Drooids/zulip,showell/zulip,LeeRisk/zulip,ericzhou2008/zulip,kaiyuanheshang/zulip,hayderimran7/zulip,Diptanshu8/zulip,avastu/zulip,jeffcao/zulip,reyha/zulip,bitemyapp/zulip,moria/zulip,zachallaun/zulip,esander91/zulip,Drooids/zulip,dnmfarrell/zulip,ApsOps/zulip,xuxiao/zulip,littledogboy/zulip,Frouk/zulip,xuanhan863/zulip,ahmadassaf/zulip,guiquanz/zulip,wdaher/zulip,proliming/zulip,ryanbackman/zulip,amanharitsh123/zulip,mahim97/zulip,huangkebo/zulip,amallia/zulip,mdavid/zulip,schatt/zulip,jrowan/zulip,zachallaun/zulip,tbutter/zulip,rishig/zulip,itnihao/zulip,grave-w-grave/zulip,deer-hope/zulip,zhaoweigg/zulip,yuvipanda/zulip,samatdav/zulip,rishig/zulip,yuvipanda/zulip,natanovia/zulip,firstblade/zulip,punchagan/zulip,akuseru/zulip,synicalsyntax/zulip,kou/zulip,peiwei/zulip,jeffcao/zulip,eastlhu/zulip,mdavid/zulip,yocome/zulip,LAndreas/zulip,swinghu/zulip,itnihao/zulip,MayB/zulip,bssrdf/zulip,gkotian/zulip,zulip/zulip,Batterfii/zulip,tommyip/zulip,codeKonami/zulip,shrikrishnaholla/zulip,kokoar/zulip,m1ssou/zulip,proliming/zulip,glovebx/zulip,yuvipanda/zulip,luyifan/zulip,seapasulli/zulip,swinghu/zulip,jainayush975/zulip,Jianchun1/zulip,vaidap/zulip,showell/zulip,aakash-cr7/zulip,timabbott/zulip,hafeez3000/zulip,hengqujushi/zulip,aliceriot/zulip,aps-sids/zulip,punchagan/zulip,ericzhou2008/zulip,jimmy54/zulip,jessedhillon/zulip,willingc/zulip,eastlhu/zulip,brockwhittaker/zulip,jackrzhang/zulip,babbage/zulip,jimmy54/zulip,timabbott/zulip,jonesgithub/zulip,amyliu345/zulip,yuvipanda/zulip,he15his/zulip,timabbott/zulip,rht/zulip,tdr130/zulip,jerryge/zulip,noroot/zulip,zofuthan/zulip,SmartPeople/zulip,armooo/zulip,kou/zulip,jerryge/zulip,voidException/zulip,hj3938/zulip,nicholasbs/zulip,blaze225/zulip,kaiyuanheshang/zulip,andersk/zulip,firstblade/zulip,LAndreas/zulip,Drooids/zulip,christi3k/zulip,cosmicAsymmetry/zulip,seapasulli/zulip,hackerkid/zulip,johnnygaddarr/zulip,ApsOps/zulip,christi3k/zulip,shaunstanislaus/zulip,proliming/zulip,verma-varsha/zulip,dwrpayne/zulip,zhaoweigg/zulip,calvinleenyc/zulip,EasonYi/zulip,PhilSk/zulip,Juanvulcano/zulip,aliceriot/zulip,KJin99/zulip,jonesgithub/zulip,bastianh/zulip,voidException/zulip,brockwhittaker/zulip,EasonYi/zulip,stamhe/zulip,punchagan/zulip,dnmfarrell/zulip,hengqujushi/zulip,tiansiyuan/zulip,levixie/zulip,Juanvulcano/zulip,Juanvulcano/zulip,bowlofstew/zulip,ryanbackman/zulip,aakash-cr7/zulip,zulip/zulip,sharmaeklavya2/zulip,cosmicAsymmetry/zulip,zachallaun/zulip,cosmicAsymmetry/zulip,christi3k/zulip,dhcrzf/zulip,krtkmj/zulip,Cheppers/zulip,thomasboyt/zulip,umkay/zulip,so0k/zulip,Cheppers/zulip,lfranchi/zulip,technicalpickles/zulip,so0k/zulip,Cheppers/zulip,rht/zulip,developerfm/zulip,dwrpayne/zulip,natanovia/zulip,stamhe/zulip,jonesgithub/zulip,codeKonami/zulip,shubhamdhama/zulip,jeffcao/zulip,ericzhou2008/zulip,jessedhillon/zulip,peiwei/zulip,pradiptad/zulip,andersk/zulip,Cheppers/zulip,luyifan/zulip,vakila/zulip,dhcrzf/zulip,peiwei/zulip,avastu/zulip,he15his/zulip,zorojean/zulip,udxxabp/zulip,synicalsyntax/zulip,punchagan/zulip,bastianh/zulip,JanzTam/zulip,TigorC/zulip,aliceriot/zulip,zulip/zulip,tbutter/zulip,dxq-git/zulip,rishig/zulip,PaulPetring/zulip,mdavid/zulip,Jianchun1/zulip,JPJPJPOPOP/zulip,adnanh/zulip,peiwei/zulip,kou/zulip,amallia/zulip,qq1012803704/zulip,firstblade/zulip,umkay/zulip,huangkebo/zulip,vabs22/zulip,karamcnair/zulip,ufosky-server/zulip,ahmadassaf/zulip,JanzTam/zulip,nicholasbs/zulip,shubhamdhama/zulip,swinghu/zulip,seapasulli/zulip,timabbott/zulip,aps-sids/zulip,xuanhan863/zulip,noroot/zulip,hackerkid/zulip,jerryge/zulip,wdaher/zulip,jeffcao/zulip,shubhamdhama/zulip,willingc/zulip,grave-w-grave/zulip,wweiradio/zulip,itnihao/zulip,karamcnair/zulip,themass/zulip,dhcrzf/zulip,proliming/zulip,lfranchi/zulip,jphilipsen05/zulip,themass/zulip,kaiyuanheshang/zulip,jimmy54/zulip,moria/zulip,KJin99/zulip,Drooids/zulip,cosmicAsymmetry/zulip,noroot/zulip,karamcnair/zulip,seapasulli/zulip,j831/zulip,krtkmj/zulip,hayderimran7/zulip,bluesea/zulip,ApsOps/zulip,yocome/zulip,dattatreya303/zulip,aakash-cr7/zulip,Qgap/zulip,xuanhan863/zulip,AZtheAsian/zulip,guiquanz/zulip,technicalpickles/zulip,yocome/zulip,dattatreya303/zulip,dwrpayne/zulip,hackerkid/zulip,susansls/zulip,so0k/zulip,RobotCaleb/zulip,m1ssou/zulip,wweiradio/zulip,gkotian/zulip,JanzTam/zulip,suxinde2009/zulip,Galexrt/zulip,stamhe/zulip,peguin40/zulip,amyliu345/zulip,bluesea/zulip,tdr130/zulip,ApsOps/zulip,jerryge/zulip,ryansnowboarder/zulip,armooo/zulip,amyliu345/zulip,ericzhou2008/zulip,ryanbackman/zulip,itnihao/zulip,hafeez3000/zulip,calvinleenyc/zulip,hustlzp/zulip,proliming/zulip,DazWorrall/zulip,firstblade/zulip,littledogboy/zulip,ahmadassaf/zulip,bssrdf/zulip,jeffcao/zulip,praveenaki/zulip,paxapy/zulip,reyha/zulip,mdavid/zulip,Frouk/zulip,synicalsyntax/zulip,peiwei/zulip,showell/zulip,vakila/zulip,deer-hope/zulip,johnny9/zulip,voidException/zulip,bastianh/zulip,codeKonami/zulip,pradiptad/zulip,xuxiao/zulip,PhilSk/zulip,bowlofstew/zulip,babbage/zulip,saitodisse/zulip,calvinleenyc/zulip,esander91/zulip,schatt/zulip,reyha/zulip,souravbadami/zulip,RobotCaleb/zulip,verma-varsha/zulip,ikasumiwt/zulip,eastlhu/zulip,zulip/zulip,amallia/zulip,huangkebo/zulip,avastu/zulip,deer-hope/zulip,vaidap/zulip,dxq-git/zulip,Gabriel0402/zulip,verma-varsha/zulip,easyfmxu/zulip,vakila/zulip,stamhe/zulip,luyifan/zulip,sharmaeklavya2/zulip,zacps/zulip,vabs22/zulip,cosmicAsymmetry/zulip,natanovia/zulip,wangdeshui/zulip,dhcrzf/zulip,MayB/zulip,KJin99/zulip,littledogboy/zulip,jessedhillon/zulip,alliejones/zulip,so0k/zulip,huangkebo/zulip,luyifan/zulip,ikasumiwt/zulip,PaulPetring/zulip,DazWorrall/zulip,zorojean/zulip,zofuthan/zulip,grave-w-grave/zulip,bssrdf/zulip,avastu/zulip,lfranchi/zulip,yuvipanda/zulip,xuanhan863/zulip,yocome/zulip,praveenaki/zulip,dattatreya303/zulip,schatt/zulip,jrowan/zulip,hengqujushi/zulip,paxapy/zulip,ufosky-server/zulip,saitodisse/zulip,schatt/zulip,mansilladev/zulip,jrowan/zulip,joshisa/zulip,zachallaun/zulip,susansls/zulip,adnanh/zulip,udxxabp/zulip,atomic-labs/zulip,Jianchun1/zulip,jimmy54/zulip,easyfmxu/zulip,avastu/zulip,jphilipsen05/zulip,lfranchi/zulip,punchagan/zulip,atomic-labs/zulip,joyhchen/zulip,niftynei/zulip,stamhe/zulip,johnnygaddarr/zulip,jimmy54/zulip,Batterfii/zulip,amanharitsh123/zulip,rht/zulip,arpitpanwar/zulip,jimmy54/zulip,deer-hope/zulip,ahmadassaf/zulip,mansilladev/zulip,bowlofstew/zulip,j831/zulip,hafeez3000/zulip,bluesea/zulip,zhaoweigg/zulip,MariaFaBella85/zulip,PhilSk/zulip,akuseru/zulip,LAndreas/zulip,Qgap/zulip,LeeRisk/zulip,jimmy54/zulip,hackerkid/zulip,jessedhillon/zulip,rishig/zulip,dhcrzf/zulip,amallia/zulip,levixie/zulip,shubhamdhama/zulip,brockwhittaker/zulip,mohsenSy/zulip,wdaher/zulip,Gabriel0402/zulip,DazWorrall/zulip,vikas-parashar/zulip,sonali0901/zulip,vikas-parashar/zulip,voidException/zulip,dotcool/zulip,andersk/zulip,ericzhou2008/zulip,technicalpickles/zulip,joyhchen/zulip,ikasumiwt/zulip,hj3938/zulip,niftynei/zulip,rht/zulip,dawran6/zulip,ryansnowboarder/zulip,so0k/zulip,hj3938/zulip,technicalpickles/zulip,eastlhu/zulip,jackrzhang/zulip,zwily/zulip,dhcrzf/zulip,amanharitsh123/zulip,udxxabp/zulip,niftynei/zulip,wweiradio/zulip,alliejones/zulip,verma-varsha/zulip,alliejones/zulip,brockwhittaker/zulip,akuseru/zulip,thomasboyt/zulip,SmartPeople/zulip,karamcnair/zulip,technicalpickles/zulip,shaunstanislaus/zulip,themass/zulip,zorojean/zulip,PhilSk/zulip,susansls/zulip,samatdav/zulip,mahim97/zulip,guiquanz/zulip,arpith/zulip,amyliu345/zulip,LeeRisk/zulip,adnanh/zulip,xuxiao/zulip,peguin40/zulip,fw1121/zulip,brockwhittaker/zulip,brainwane/zulip,firstblade/zulip,punchagan/zulip,pradiptad/zulip,zofuthan/zulip,mdavid/zulip,dattatreya303/zulip,MayB/zulip,SmartPeople/zulip,ahmadassaf/zulip,vakila/zulip,sharmaeklavya2/zulip,hafeez3000/zulip,praveenaki/zulip,babbage/zulip,timabbott/zulip,j831/zulip,tdr130/zulip,lfranchi/zulip,tbutter/zulip,glovebx/zulip,itnihao/zulip,jackrzhang/zulip,arpitpanwar/zulip,atomic-labs/zulip,dawran6/zulip,KingxBanana/zulip,adnanh/zulip,qq1012803704/zulip,dotcool/zulip,moria/zulip,qq1012803704/zulip,zulip/zulip,wweiradio/zulip,Cheppers/zulip,dawran6/zulip,synicalsyntax/zulip,udxxabp/zulip,wweiradio/zulip,kokoar/zulip,Vallher/zulip,gigawhitlocks/zulip,LAndreas/zulip,qq1012803704/zulip,esander91/zulip,moria/zulip,KingxBanana/zulip,zacps/zulip,isht3/zulip,johnnygaddarr/zulip,codeKonami/zulip,dnmfarrell/zulip,gigawhitlocks/zulip,jessedhillon/zulip,codeKonami/zulip,zhaoweigg/zulip,wangdeshui/zulip,Gabriel0402/zulip,dawran6/zulip,mahim97/zulip,JanzTam/zulip,showell/zulip,natanovia/zulip,m1ssou/zulip,wangdeshui/zulip,timabbott/zulip,sonali0901/zulip,TigorC/zulip,glovebx/zulip,moria/zulip,wweiradio/zulip,arpitpanwar/zulip,ipernet/zulip,bitemyapp/zulip,Galexrt/zulip,willingc/zulip,easyfmxu/zulip,schatt/zulip,suxinde2009/zulip,umkay/zulip,souravbadami/zulip,brainwane/zulip,sup95/zulip,zwily/zulip,dnmfarrell/zulip,yocome/zulip,avastu/zulip,huangkebo/zulip,littledogboy/zulip,jerryge/zulip,swinghu/zulip,christi3k/zulip,reyha/zulip,PaulPetring/zulip,Suninus/zulip,sup95/zulip,zofuthan/zulip,jackrzhang/zulip,ashwinirudrappa/zulip,samatdav/zulip,alliejones/zulip,Galexrt/zulip,joshisa/zulip,noroot/zulip,ashwinirudrappa/zulip,luyifan/zulip,tommyip/zulip,tbutter/zulip,bssrdf/zulip,showell/zulip,EasonYi/zulip,LAndreas/zulip,stamhe/zulip,PaulPetring/zulip,hj3938/zulip,tiansiyuan/zulip,MariaFaBella85/zulip,umkay/zulip,levixie/zulip,guiquanz/zulip,levixie/zulip,KingxBanana/zulip,showell/zulip,suxinde2009/zulip,levixie/zulip,johnnygaddarr/zulip,ipernet/zulip,eeshangarg/zulip,MariaFaBella85/zulip,punchagan/zulip,MariaFaBella85/zulip,ipernet/zulip,bastianh/zulip,amyliu345/zulip,ipernet/zulip,nicholasbs/zulip,PhilSk/zulip,kou/zulip,dattatreya303/zulip,mahim97/zulip,tbutter/zulip,bastianh/zulip,alliejones/zulip,aakash-cr7/zulip,yocome/zulip,wdaher/zulip,bowlofstew/zulip,qq1012803704/zulip,hackerkid/zulip,fw1121/zulip,developerfm/zulip,mahim97/zulip,brainwane/zulip,RobotCaleb/zulip,TigorC/zulip,kou/zulip,bastianh/zulip,arpitpanwar/zulip,vaidap/zulip,Qgap/zulip,hustlzp/zulip,andersk/zulip,calvinleenyc/zulip,guiquanz/zulip,Juanvulcano/zulip,thomasboyt/zulip,dnmfarrell/zulip,willingc/zulip,vaidap/zulip,joshisa/zulip,tommyip/zulip,jerryge/zulip,firstblade/zulip,ashwinirudrappa/zulip,synicalsyntax/zulip,eastlhu/zulip,proliming/zulip,amanharitsh123/zulip,peiwei/zulip,ashwinirudrappa/zulip,vikas-parashar/zulip,atomic-labs/zulip,arpith/zulip,LeeRisk/zulip,ericzhou2008/zulip,jrowan/zulip,babbage/zulip,mansilladev/zulip,rishig/zulip,rishig/zulip,m1ssou/zulip,arpith/zulip,calvinleenyc/zulip,gigawhitlocks/zulip,sharmaeklavya2/zulip,vikas-parashar/zulip,kokoar/zulip,kaiyuanheshang/zulip,amallia/zulip,he15his/zulip,dotcool/zulip,RobotCaleb/zulip,zofuthan/zulip,eeshangarg/zulip,kou/zulip,sonali0901/zulip,hackerkid/zulip,synicalsyntax/zulip,Batterfii/zulip,eeshangarg/zulip,tbutter/zulip,brockwhittaker/zulip,amanharitsh123/zulip,hustlzp/zulip,shrikrishnaholla/zulip,brainwane/zulip,gkotian/zulip,ufosky-server/zulip,vabs22/zulip,umkay/zulip,tommyip/zulip,ufosky-server/zulip,glovebx/zulip,arpith/zulip,blaze225/zulip,hackerkid/zulip,niftynei/zulip,kou/zulip,yuvipanda/zulip,swinghu/zulip,johnny9/zulip,hj3938/zulip,bluesea/zulip,hayderimran7/zulip,zachallaun/zulip,pradiptad/zulip,umkay/zulip,Gabriel0402/zulip,wavelets/zulip,joshisa/zulip,shrikrishnaholla/zulip,ryansnowboarder/zulip,zacps/zulip,voidException/zulip,DazWorrall/zulip,Galexrt/zulip,akuseru/zulip,shrikrishnaholla/zulip,Frouk/zulip,levixie/zulip,mohsenSy/zulip,glovebx/zulip,isht3/zulip,qq1012803704/zulip,Jianchun1/zulip,bowlofstew/zulip,wavelets/zulip,glovebx/zulip,johnnygaddarr/zulip,dwrpayne/zulip,hengqujushi/zulip,KingxBanana/zulip,hayderimran7/zulip,eeshangarg/zulip,PhilSk/zulip,atomic-labs/zulip,bluesea/zulip,voidException/zulip,peguin40/zulip,dwrpayne/zulip,bitemyapp/zulip,susansls/zulip,jphilipsen05/zulip,dnmfarrell/zulip,KJin99/zulip,dotcool/zulip,krtkmj/zulip,PaulPetring/zulip,huangkebo/zulip,alliejones/zulip,mohsenSy/zulip,tdr130/zulip,zwily/zulip,sup95/zulip,eeshangarg/zulip,TigorC/zulip,developerfm/zulip,KJin99/zulip,isht3/zulip,KingxBanana/zulip,niftynei/zulip,peguin40/zulip,Batterfii/zulip,bssrdf/zulip,atomic-labs/zulip,mahim97/zulip,zwily/zulip,zhaoweigg/zulip,voidException/zulip,dxq-git/zulip,MayB/zulip,PaulPetring/zulip,Jianchun1/zulip,vaidap/zulip,tiansiyuan/zulip,moria/zulip,rht/zulip,bluesea/zulip,Drooids/zulip,bowlofstew/zulip,grave-w-grave/zulip,RobotCaleb/zulip,jphilipsen05/zulip,saitodisse/zulip,xuxiao/zulip,hengqujushi/zulip,jonesgithub/zulip,technicalpickles/zulip,Qgap/zulip,aps-sids/zulip,ApsOps/zulip,suxinde2009/zulip,aliceriot/zulip,AZtheAsian/zulip,tbutter/zulip,Batterfii/zulip,jessedhillon/zulip,zofuthan/zulip,guiquanz/zulip,bluesea/zulip,paxapy/zulip,kaiyuanheshang/zulip,he15his/zulip,Juanvulcano/zulip,jerryge/zulip,praveenaki/zulip,babbage/zulip,LeeRisk/zulip,hayderimran7/zulip,xuanhan863/zulip,ahmadassaf/zulip,MariaFaBella85/zulip,johnny9/zulip,joshisa/zulip,mohsenSy/zulip,fw1121/zulip,johnnygaddarr/zulip,natanovia/zulip,Qgap/zulip,akuseru/zulip,samatdav/zulip,babbage/zulip,Drooids/zulip,kokoar/zulip,babbage/zulip,wavelets/zulip,christi3k/zulip,SmartPeople/zulip,arpitpanwar/zulip,isht3/zulip,calvinleenyc/zulip,jainayush975/zulip,mohsenSy/zulip,adnanh/zulip,AZtheAsian/zulip,bastianh/zulip,Diptanshu8/zulip,zwily/zulip,Gabriel0402/zulip,jackrzhang/zulip,avastu/zulip,hafeez3000/zulip,verma-varsha/zulip,wdaher/zulip,Frouk/zulip,Vallher/zulip,Qgap/zulip,amyliu345/zulip,vabs22/zulip,Vallher/zulip,JPJPJPOPOP/zulip,esander91/zulip,aliceriot/zulip,suxinde2009/zulip,mansilladev/zulip,j831/zulip,joyhchen/zulip,deer-hope/zulip,Suninus/zulip,jphilipsen05/zulip,karamcnair/zulip,so0k/zulip,adnanh/zulip,gigawhitlocks/zulip,amallia/zulip,rht/zulip,sharmaeklavya2/zulip,tommyip/zulip,wavelets/zulip,xuxiao/zulip,zorojean/zulip,isht3/zulip,paxapy/zulip,xuxiao/zulip,easyfmxu/zulip,EasonYi/zulip,ahmadassaf/zulip,krtkmj/zulip,tdr130/zulip,Cheppers/zulip,yocome/zulip,hayderimran7/zulip,jainayush975/zulip,wangdeshui/zulip,shaunstanislaus/zulip,jainayush975/zulip,timabbott/zulip,Batterfii/zulip,hustlzp/zulip,themass/zulip,noroot/zulip,tdr130/zulip,deer-hope/zulip,aakash-cr7/zulip,ikasumiwt/zulip,Galexrt/zulip,kaiyuanheshang/zulip,bssrdf/zulip,arpitpanwar/zulip,he15his/zulip,wangdeshui/zulip,wavelets/zulip,krtkmj/zulip,joyhchen/zulip,nicholasbs/zulip,bitemyapp/zulip,jonesgithub/zulip,wdaher/zulip,Juanvulcano/zulip,souravbadami/zulip,zacps/zulip,tiansiyuan/zulip,andersk/zulip,bitemyapp/zulip,schatt/zulip,Suninus/zulip,ipernet/zulip,m1ssou/zulip,jrowan/zulip,zacps/zulip,easyfmxu/zulip,atomic-labs/zulip,thomasboyt/zulip,krtkmj/zulip,ikasumiwt/zulip,ufosky-server/zulip,itnihao/zulip,AZtheAsian/zulip,developerfm/zulip,hafeez3000/zulip,shaunstanislaus/zulip,thomasboyt/zulip,yuvipanda/zulip,Cheppers/zulip,ryansnowboarder/zulip,johnny9/zulip,willingc/zulip,LeeRisk/zulip,jackrzhang/zulip,mansilladev/zulip,mdavid/zulip,developerfm/zulip,schatt/zulip,easyfmxu/zulip,shaunstanislaus/zulip,technicalpickles/zulip,AZtheAsian/zulip,Batterfii/zulip,noroot/zulip,blaze225/zulip,armooo/zulip,hustlzp/zulip,joyhchen/zulip,fw1121/zulip,souravbadami/zulip,jeffcao/zulip,Frouk/zulip,firstblade/zulip,samatdav/zulip,umkay/zulip,joshisa/zulip,SmartPeople/zulip,eeshangarg/zulip,codeKonami/zulip,verma-varsha/zulip,arpitpanwar/zulip,JanzTam/zulip,zachallaun/zulip,aps-sids/zulip,showell/zulip,tommyip/zulip,rht/zulip,andersk/zulip,ApsOps/zulip,ryanbackman/zulip,ikasumiwt/zulip,ericzhou2008/zulip,hustlzp/zulip,JPJPJPOPOP/zulip,tiansiyuan/zulip,esander91/zulip,dotcool/zulip,tdr130/zulip,jainayush975/zulip,peguin40/zulip,wweiradio/zulip,KJin99/zulip,grave-w-grave/zulip,vaidap/zulip,armooo/zulip,EasonYi/zulip,johnny9/zulip,praveenaki/zulip,Diptanshu8/zulip,moria/zulip,fw1121/zulip,AZtheAsian/zulip,brainwane/zulip,dotcool/zulip,eastlhu/zulip,bssrdf/zulip,mdavid/zulip,paxapy/zulip,dxq-git/zulip,shrikrishnaholla/zulip,Frouk/zulip,jonesgithub/zulip,zhaoweigg/zulip,aakash-cr7/zulip,m1ssou/zulip,reyha/zulip,vabs22/zulip,wdaher/zulip,littledogboy/zulip,amallia/zulip,Diptanshu8/zulip,jainayush975/zulip,lfranchi/zulip,TigorC/zulip,natanovia/zulip,developerfm/zulip,gkotian/zulip,Suninus/zulip,aliceriot/zulip,johnnygaddarr/zulip,he15his/zulip,hayderimran7/zulip,qq1012803704/zulip,vakila/zulip,karamcnair/zulip,nicholasbs/zulip,hafeez3000/zulip,grave-w-grave/zulip,bowlofstew/zulip,kaiyuanheshang/zulip,SmartPeople/zulip,sup95/zulip,themass/zulip,armooo/zulip,Suninus/zulip,seapasulli/zulip,dxq-git/zulip,saitodisse/zulip,jeffcao/zulip,luyifan/zulip,LeeRisk/zulip,tiansiyuan/zulip,gigawhitlocks/zulip,thomasboyt/zulip,ashwinirudrappa/zulip,seapasulli/zulip,thomasboyt/zulip,vakila/zulip,Galexrt/zulip,willingc/zulip,shubhamdhama/zulip,kokoar/zulip,tiansiyuan/zulip,willingc/zulip,Suninus/zulip,swinghu/zulip,zwily/zulip,susansls/zulip,tommyip/zulip,JanzTam/zulip,he15his/zulip,suxinde2009/zulip,rishig/zulip,Vallher/zulip,mohsenSy/zulip,krtkmj/zulip,Drooids/zulip,hustlzp/zulip,Vallher/zulip,praveenaki/zulip,j831/zulip,peguin40/zulip,alliejones/zulip,proliming/zulip,LAndreas/zulip,karamcnair/zulip,vabs22/zulip,Qgap/zulip,saitodisse/zulip,esander91/zulip | zerver/management/commands/rename-stream.py | zerver/management/commands/rename-stream.py | from __future__ import absolute_import
from django.core.management.base import BaseCommand
from zerver.lib.actions import do_rename_stream
from zerver.models import Realm, get_realm
class Command(BaseCommand):
help = """Change the stream name for a realm.
Usage: python manage.py rename-stream <domain> <old name> <new name>"""
def handle(self, *args, **options):
if len(args) != 3:
print "Please provide a domain and the old and new names."
exit(1)
domain, old_name, new_name = args
try:
realm = get_realm(domain)
except Realm.DoesNotExist:
print "Unknown domain %s" % (domain,)
exit(1)
do_rename_stream(realm, old_name, new_name)
| apache-2.0 | Python | |
9d7acb2b629667e166464bb9b8c43922ed21c1d9 | add utility to copy events to another ES server | mozilla/MozDef,mpurzynski/MozDef,gsssrao/MozDef,eXcomm/MozDef,netantho/MozDef,triplekill/MozDef,abhijithch/MozDef,mpurzynski/MozDef,serbyy/MozDef,netantho/MozDef,DarkPrince304/MozDef,jeffbryner/MozDef,eXcomm/MozDef,netantho/MozDef,Phrozyn/MozDef,jeffbryner/MozDef,jeffbryner/MozDef,526avijitgupta/MozDef,jvehent/MozDef,triplekill/MozDef,jvehent/MozDef,eXcomm/MozDef,DarkPrince304/MozDef,Phrozyn/MozDef,abhijithch/MozDef,ameihm0912/MozDef,jvehent/MozDef,gdestuynder/MozDef,ameihm0912/MozDef,eXcomm/MozDef,gdestuynder/MozDef,jvehent/MozDef,mozilla/MozDef,mozilla/MozDef,serbyy/MozDef,526avijitgupta/MozDef,gsssrao/MozDef,triplekill/MozDef,gdestuynder/MozDef,526avijitgupta/MozDef,mozilla/MozDef,gsssrao/MozDef,Phrozyn/MozDef,gdestuynder/MozDef,526avijitgupta/MozDef,gsssrao/MozDef,abhijithch/MozDef,mpurzynski/MozDef,serbyy/MozDef,jeffbryner/MozDef,ameihm0912/MozDef,DarkPrince304/MozDef,ameihm0912/MozDef,Phrozyn/MozDef,DarkPrince304/MozDef,mpurzynski/MozDef,abhijithch/MozDef,serbyy/MozDef,netantho/MozDef,triplekill/MozDef | mq/cpEvents.py | mq/cpEvents.py | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import sys
import pyes
from kombu import Connection,Queue,Exchange
from kombu.mixins import ConsumerMixin
from configlib import getConfig,OptionParser
def esConnect():
'''open or re-open a connection to elastic search'''
return pyes.ES((list('{0}'.format(s) for s in options.esservers)))
class eventConsumer(ConsumerMixin):
'''kombu mixin to receive events and copy them to an elastic search server.
Helpful when testing new clusters, for failover,etc.
Does not ack messages, deletes queues on exit so not guaranteed to copy all messages
'''
def __init__(self, mqConnection,eventQueue,eventExchange,esConnection):
self.connection = mqConnection
self.esConnection=esConnection
self.eventQueue=eventQueue
self.eventExchange=eventExchange
def get_consumers(self, Consumer, channel):
consumer=Consumer(self.eventQueue, no_ack=True,callbacks=[self.on_message], accept=['json'])
consumer.qos(prefetch_count=options.prefetch)
return [consumer]
def on_message(self, body, message):
try:
print("RECEIVED MESSAGE: %r" % (body, ))
#copy event to es cluster
try:
res=self.esConnection.index(index='events',doc_type='event',doc=body)
#handle loss of server or race condition with index rotation/creation/aliasing
except (pyes.exceptions.NoServerAvailable,pyes.exceptions.InvalidIndexNameException) as e:
pass
except Exception as e:
sys.stderr.write("exception in events queue %r\n"%e)
def main():
#connect and declare the message queue/kombu objects.
connString='amqp://{0}:{1}@{2}:{3}//'.format(options.mquser,options.mqpassword,options.mqserver,options.mqport)
mqConn=Connection(connString)
#topic exchange for listening to mozdef.event
eventExchange=Exchange(name=options.eventexchange,type='topic',durable=False,delivery_mode=1)
eventExchange(mqConn).declare()
#Queue for the exchange
eventQueue=Queue('',exchange=eventExchange,routing_key=options.routingkey,durable=False,exclusive=True,auto_delete=True)
#eventQueue(mqConn).declare()
#consume our queue and publish on the topic exchange
eventConsumer(mqConn,eventQueue,eventExchange,es).run()
def initConfig():
options.mqserver=getConfig('mqserver','localhost',options.configfile)
options.eventexchange=getConfig('eventexchange','events',options.configfile)
options.routingkey=getConfig('routingkey','mozdef.event',options.configfile)
options.esservers=list(getConfig('esservers','http://localhost:9200',options.configfile).split(','))
#how many messages to ask for at once.
options.prefetch=getConfig('prefetch',1,options.configfile)
options.mquser=getConfig('mquser','guest',options.configfile)
options.mqpassword=getConfig('mqpassword','guest',options.configfile)
options.mqport=getConfig('mqport',5672,options.configfile)
if __name__ == '__main__':
parser=OptionParser()
parser.add_option("-c", dest='configfile' , default=sys.argv[0].replace('.py','.conf'), help="configuration file to use")
(options,args) = parser.parse_args()
initConfig()
#open ES connection globally so we don't waste time opening it per message
es=esConnect()
main() | mpl-2.0 | Python | |
9f3b3f068bfb53b00a2ec8420816c6deb02729e3 | Add a configuration to fetch "ios_internal" project. | primiano/depot_tools,primiano/depot_tools,CoherentLabs/depot_tools,CoherentLabs/depot_tools,primiano/depot_tools | fetch_configs/ios_internal.py | fetch_configs/ios_internal.py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import config_util # pylint: disable=F0401
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=W0232
class IOSInternal(config_util.Config):
"""Basic Config class for Chrome on iOS."""
@staticmethod
def fetch_spec(props):
url = 'https://chrome-internal.googlesource.com/chrome/ios_internal.git'
solution = { 'name' :'src/ios_internal',
'url' : url,
'deps_file': 'DEPS',
'managed' : False,
'custom_deps': {},
'safesync_url': '',
}
spec = {
'solutions': [solution],
}
if props.get('target_os'):
spec['target_os'] = props['target_os'].split(',')
else:
spec['target_os'] = ['ios']
if props.get('target_os_only'):
spec['target_os_only'] = props['target_os_only']
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'src'
def main(argv=None):
return IOSInternal().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause | Python | |
583155db6a85808c69fa25ba4959ebd370aa2fba | Add script to check if modules up | dimkarakostas/unimeet,dimkarakostas/unimeet,dimkarakostas/unimeet,dimkarakostas/unimeet | etc/config/check_modules.py | etc/config/check_modules.py | from subprocess import call
import os
modules = [
'backend.unichat.eu',
'realtime.unichat.eu',
'presence.unichat.eu',
'matchmaker.unichat.eu',
]
for m in modules:
with open(os.devnull, 'w') as devnull:
call(
['curl', '-m', '1', m],
stdout=devnull,
stderr=devnull
)
| mit | Python | |
c641456dc339169eb7bb4fb6ddc6e5e14cfba80e | add ROACH monitor | HERA-Team/hera_mc,HERA-Team/hera_mc,HERA-Team/Monitor_and_Control | scripts/mc_monitor_roach_temps.py | scripts/mc_monitor_roach_temps.py | #! /usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2017 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""Gather temperatures from the correlator ROACH devices and log them into M&C
The temperatures cycle out of the Redis store every minute, so cron can't
sample quickly enough (and starting a new process every minute feels like a
bit much).
"""
from __future__ import absolute_import, division, print_function
import sqlalchemy.exc
import sys
import time
import traceback
from hera_mc import mc
MONITORING_INTERVAL = 45 # seconds
parser = mc.get_mc_argument_parser()
args = parser.parse_args()
db = mc.connect_to_mc_db(args)
with db.sessionmaker() as session:
try:
while True:
time.sleep(MONITORING_INTERVAL)
try:
session.add_roach_temperature_from_redis()
except Exception as e:
print('%s -- error adding ROACH temperatures' % time.asctime(), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
continue
try:
session.commit()
except sqlalchemy.exc.SQLAlchemyError as e:
print('%s -- SQL error committing new temperature data' % time.asctime(), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
session.rollback()
continue
except Exception as e:
print('%s -- error committing new temperature data' % time.asctime(), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
continue
except KeyboardInterrupt:
pass
| bsd-2-clause | Python | |
fbf6542af6001e385612d0e2e98dbae357a52e77 | use setuptools to dynamic load freeze pyzmq egg | Mustard-Systems-Ltd/pyzmq,caidongyun/pyzmq,swn1/pyzmq,caidongyun/pyzmq,swn1/pyzmq,dash-dash/pyzmq,dash-dash/pyzmq,dash-dash/pyzmq,swn1/pyzmq,ArvinPan/pyzmq,caidongyun/pyzmq,yyt030/pyzmq,ArvinPan/pyzmq,Mustard-Systems-Ltd/pyzmq,yyt030/pyzmq,Mustard-Systems-Ltd/pyzmq,yyt030/pyzmq,ArvinPan/pyzmq | zmq/__init__.py | zmq/__init__.py | """Python bindings for 0MQ."""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2012 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import sys
import glob
# load bundled libzmq, if there is one:
here = os.path.dirname(__file__)
bundled = []
for ext in ('pyd', 'so', 'dll', 'dylib'):
bundled.extend(glob.glob(os.path.join(here, 'libzmq*.%s*' % ext)))
if bundled:
import ctypes
if bundled[0].endswith('.pyd'):
# a Windows Extension
_libzmq = ctypes.cdll.LoadLibrary(bundled[0])
else:
_libzmq = ctypes.CDLL(bundled[0], mode=ctypes.RTLD_GLOBAL)
del ctypes
else:
import imp
pkg_resources = None
ext_type = 0
ext_path = ''
try:
import pkg_resources
for ext, _, ext_type in imp.get_suffixes():
if ext_type == 3:
ext_path = pkg_resources.resource_filename('zmq.libzmq', 'libzmq'+ext)
imp.load_dynamic('zmq.libzmq', ext_path)
break
except ImportError as e:
pass
finally:
del imp, pkg_resources, ext_type, ext_path
# init Python threads
if 'PyPy' not in sys.version:
try:
from zmq.utils import initthreads # initialize threads
except ImportError as e:
raise ImportError("%s\nAre you trying to `import zmq` from the pyzmq source dir?" % e)
else:
initthreads.init_threads()
del os, sys, glob, here, bundled, ext
# zmq top-level imports
from zmq.backend import *
from zmq import sugar
from zmq.sugar import *
from zmq import devices
def get_includes():
"""Return a list of directories to include for linking against pyzmq with cython."""
from os.path import join, dirname, abspath, pardir
base = dirname(__file__)
parent = abspath(join(base, pardir))
return [ parent ] + [ join(parent, base, subdir) for subdir in ('utils',) ]
__all__ = ['get_includes'] + sugar.__all__ + backend.__all__
| """Python bindings for 0MQ."""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2012 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import sys
import glob
# load bundled libzmq, if there is one:
here = os.path.dirname(__file__)
bundled = []
for ext in ('pyd', 'so', 'dll', 'dylib'):
bundled.extend(glob.glob(os.path.join(here, 'libzmq*.%s*' % ext)))
if bundled:
import ctypes
if bundled[0].endswith('.pyd'):
# a Windows Extension
_libzmq = ctypes.cdll.LoadLibrary(bundled[0])
else:
_libzmq = ctypes.CDLL(bundled[0], mode=ctypes.RTLD_GLOBAL)
del ctypes
else:
import imp
pkg_resources = None
try:
import pkg_resources
ext = next(ext for ext, _, _type in imp.get_suffixes() if _type == 3)
imp.load_dynamic('zmq.libzmq', pkg_resources.resource_filename('zmq.libzmq', 'libzmq'+ext))
except ImportError as e:
pass
finally:
del imp, pkg_resources
# init Python threads
if 'PyPy' not in sys.version:
try:
from zmq.utils import initthreads # initialize threads
except ImportError as e:
raise ImportError("%s\nAre you trying to `import zmq` from the pyzmq source dir?" % e)
else:
initthreads.init_threads()
del os, sys, glob, here, bundled, ext
# zmq top-level imports
from zmq.backend import *
from zmq import sugar
from zmq.sugar import *
from zmq import devices
def get_includes():
"""Return a list of directories to include for linking against pyzmq with cython."""
from os.path import join, dirname, abspath, pardir
base = dirname(__file__)
parent = abspath(join(base, pardir))
return [ parent ] + [ join(parent, base, subdir) for subdir in ('utils',) ]
__all__ = ['get_includes'] + sugar.__all__ + backend.__all__
| bsd-3-clause | Python |
50db2fa37aab219c9273cf3f76269de11e2dc86b | Add a migration to allow Auditor role to see mappings defined in the Audit context. | andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,prasannav7/ggrc-core,josthkko/ggrc-core,hyperNURb/ggrc-core,jmakov/ggrc-core,josthkko/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,hyperNURb/ggrc-core,kr41/ggrc-core,NejcZupec/ggrc-core,jmakov/ggrc-core,prasannav7/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core,edofic/ggrc-core,uskudnik/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,jmakov/ggrc-core,hasanalom/ggrc-core,VinnieJohns/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,vladan-m/ggrc-core,NejcZupec/ggrc-core,VinnieJohns/ggrc-core,hasanalom/ggrc-core,josthkko/ggrc-core,vladan-m/ggrc-core,josthkko/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,jmakov/ggrc-core,AleksNeStu/ggrc-core,uskudnik/ggrc-core,prasannav7/ggrc-core,plamut/ggrc-core,uskudnik/ggrc-core,hasanalom/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,uskudnik/ggrc-core,vladan-m/ggrc-core,kr41/ggrc-core,plamut/ggrc-core,vladan-m/ggrc-core,hasanalom/ggrc-core,uskudnik/ggrc-core,vladan-m/ggrc-core,hyperNURb/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,kr41/ggrc-core,AleksNeStu/ggrc-core,jmakov/ggrc-core,NejcZupec/ggrc-core,selahssea/ggrc-core,hasanalom/ggrc-core,hyperNURb/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,hyperNURb/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,prasannav7/ggrc-core | src/ggrc_basic_permissions/migrations/versions/20131204014446_54b6efd65a93_add_mappings_to_audi.py | src/ggrc_basic_permissions/migrations/versions/20131204014446_54b6efd65a93_add_mappings_to_audi.py |
"""Add mappings to Auditor role
Revision ID: 54b6efd65a93
Revises: 13b49798db19
Create Date: 2013-12-04 01:44:46.023974
"""
# revision identifiers, used by Alembic.
revision = '54b6efd65a93'
down_revision = '13b49798db19'
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column, select
import json
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.String)
)
mapping_types = [
'ObjectControl', 'ObjectDocument', 'ObjectObjective', 'ObjectPerson',
'ObjectSection', 'Relationship',
]
def get_auditor_permissions():
connection = op.get_bind()
auditor_role = connection.execute(
select([roles_table.c.id, roles_table.c.permissions_json])\
.where(roles_table.c.name == 'Auditor')).fetchone()
return json.loads(auditor_role.permissions_json)
def update_auditor_permissions(permissions):
op.execute(roles_table\
.update()\
.values(permissions_json = json.dumps(permissions))\
.where(roles_table.c.name == 'Auditor'))
def upgrade():
permissions = get_auditor_permissions()
permissions['read'].extend(mapping_types)
update_auditor_permissions(permissions)
def downgrade():
permissions = get_auditor_permissions()
for resource_type in mapping_types:
permissions['read'].remove(resource_type)
update_auditor_permissions(permissions)
| apache-2.0 | Python | |
17ae440f509ada010ee3e3b84f0a2c50b196ff82 | Add test for pickle | spacy-io/thinc,explosion/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc,spacy-io/thinc,explosion/thinc | thinc/tests/integration/test_pickle.py | thinc/tests/integration/test_pickle.py | from __future__ import unicode_literals
import pytest
import pickle
from ...api import with_flatten
from ...v2v import Affine
@pytest.fixture
def affine():
return Affine(5, 3)
def test_pickle_with_flatten(affine):
Xs = [affine.ops.allocate((2, 3)),
affine.ops.allocate((4, 3))]
model = with_flatten(affine)
pickled = pickle.dumps(model)
loaded = pickle.loads(pickled)
Ys = loaded(Xs)
assert len(Ys) == 2
assert Ys[0].shape == (Xs[0].shape[0], affine.nO)
assert Ys[1].shape == (Xs[1].shape[0], affine.nO)
| mit | Python | |
6be2917b4f46c55eefcbfe57bdc517f9316fe897 | Add the generated version file | jupyter/jupyterlab,charnpreetsingh185/jupyterlab,charnpreetsingh185/jupyterlab,eskirk/jupyterlab,eskirk/jupyterlab,eskirk/jupyterlab,jupyter/jupyterlab,charnpreetsingh185/jupyterlab,jupyter/jupyterlab,eskirk/jupyterlab,jupyter/jupyterlab,jupyter/jupyterlab,charnpreetsingh185/jupyterlab,eskirk/jupyterlab,charnpreetsingh185/jupyterlab | jupyterlab/_version.py | jupyterlab/_version.py | # This file is auto-generated, do not edit!
__version__ = "0.7.0"
| bsd-3-clause | Python | |
c74ce3d4561a7367903863aaabe1af113d43aa0c | Add base model obect to project. | 0xporky/mgnemu-python | mgnemu/models/BaseModel.py | mgnemu/models/BaseModel.py | # -*- coding: utf-8 -*-
"""
Base model of project.
Contains to basic methods:
dumps(object_data) - writes object into json.
loads(json_data) - converts json into model object.
"""
import json
class BaseModel():
def dumps(object_data):
return json.dumps(object_data)
def loads(json_data):
return json.loads(json_data)
| mit | Python | |
4bbdf36d7f001d9dc7f6e451bd733698895646b6 | Create carpenter.py | 11harrisonh/school-stuff | carpenter.py | carpenter.py | # -*- coding: cp1252 -*-
print("Welcome to Harry Harrison's Carpentization IV: Beyond the Window\n'Build a Window to Stand the Test of Time'")
raw_input("Press enter to start the program")
priceOfWood = int( input("How expensive is the wood per metre? (in pounds): ") * 100)
windows = int( input("How many windows would you like?: "))
firstHeight = int( input("How high is your first window? (in meters): "))
firstWidth = int( input("How wide is your first window? (in meters): "))
heights = [firstHeight]
widths = [firstWidth]
totalCost = priceOfWood * firstHeight * firstWidth
windowsDone = 1
while windowsDone != windows:
heights.extend([int(input("(Window %d) How high is this next window?:" % windowsDone + 1))])
widths.extend([int(input("How wide is this window?: "))])
totalCost = (totalCost + heights[windowsDone] * widths[windowsDone] * priceOfWood)
windowsDone = windowsDone + 1
time = int( input("How long will it take to make a window? (hours): "))
labor = int( input("What are the labor costs per hour? (pounds): "))
totalCost = totalCost + (windows * time * labor)
tC_float = float(totalCost / 100)
print("The total cost is: %f pounds" % tC_float)
| mit | Python | |
a574db21148de297970647d137d0f9b094a4dc46 | add local storage driver for grypedb files | anchore/anchore-engine,anchore/anchore-engine,anchore/anchore-engine | anchore_engine/services/policy_engine/engine/feeds/storage.py | anchore_engine/services/policy_engine/engine/feeds/storage.py | import hashlib
import io
import tempfile
from contextlib import contextmanager
from os import path
from types import TracebackType
from typing import Generator, Optional, Type
class ChecksumMismatchError(Exception):
"""
Exception raised when file data is corrupt (calculated checksum does not match expected value)
:param expected_checksum: the expected checksum value
:type expected_checksum: str
:param actual_checksum: the calculated checksum value
:type actual_checksum: str
"""
def __init__(self, expected_checksum: str, actual_checksum: str) -> None:
super().__init__(
f"GrypeDB Checksum does not match! Expected: {expected_checksum}, Actual: {actual_checksum}"
)
class GrypeDBFile:
"""
Class for reading/writing an individual grype db file. Should only be instantiated by GrypeDBStorage.
:param parent_directory_path: The path of the parent directory (temp dir for caching Grype DB)
:type parent_directory_path: str
"""
@classmethod
def verify_integrity(cls, file_data: bytes, expected_checksum: str) -> None:
"""
Classmethod, calculates sha256 checksum of bytes passed in against provided expected checksum.
Raises ChecksumMismatchError if not equivalent.
:param file_data: the raw file data
:type file_data: bytes
:param expected_checksum: expected sha256 checksum value
:type expected_checksum: str
"""
actual_checksum = hashlib.sha256(file_data).hexdigest()
if actual_checksum != expected_checksum:
raise ChecksumMismatchError(expected_checksum, actual_checksum)
def __init__(self, parent_directory_path: str) -> None:
self.root_directory = parent_directory_path
self._file_path: Optional[str] = None
@contextmanager
def create_file(self, checksum: str) -> Generator[io.BufferedIOBase, None, None]:
"""
Context manager, yields open file handle to write data to and closes it on context exit.
:param checksum: the sha256 checksum of the file, will also be used as file name (with .tar.gz ext)
:type checksum: str
:return: generator yields file handle opened in "wb" mode
:rtype: Generator[io.BufferedIOBase, None, None]
"""
self._file_path = path.join(self.root_directory, f"{checksum}.tar.gz")
temp_file = open(self._file_path, "wb")
try:
yield temp_file
finally:
temp_file.close()
self._verify_integrity(checksum)
def _verify_integrity(self, expected_checksum: str) -> None:
"""
Calculates sha256 checksum of the file created by `create_file()` and compares against provided expected value.
Raises ChecksumMismatchError if not equivalent.
:param expected_checksum: expected sha256 checksum value
:type expected_checksum: str
"""
with open(self._file_path, "rb") as temp_file:
data = temp_file.read()
self.verify_integrity(data, expected_checksum)
@property
def path(self) -> Optional[str]:
"""
Getter for path of file created by `create_file()`
:return: path of the file created if created, otherwise None
:rtype: Optional[str]
"""
return self._file_path
class GrypeDBStorage:
"""
Disk cache for Grype DB.
Context manager wrapping tempfile.TemporaryDirectory.
Abstracts actual temp dir creation/destruction logic and returns instance of GrypeDBFile on context entry.
"""
def __init__(self):
self.directory: Optional[tempfile.TemporaryDirectory] = None
self.grypedbfile: Optional[GrypeDBFile] = None
def _create(self) -> None:
"""
Create the temp dir and instantiate the GrypeDBFile
"""
self.directory = tempfile.TemporaryDirectory()
self.grypedbfile = GrypeDBFile(self.directory.name)
def __enter__(self) -> GrypeDBFile:
"""
Setup if the directory has not been created, yield instance of GrypeDBFile initialized with this temp dir
location
:return: instance of GrypeDBFile
:rtype: GrypeDBFile
"""
if not self.directory:
self._create()
return self.grypedbfile
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
"""
Context exit, calls teardown
:param exc_type: the exception type
:type exc_type: Optional[Type[BaseException]]
:param exc_value: the exception instance
:type exc_value: Optional[BaseException]
:param traceback: the traceback
:type traceback: Optional[TracebackType]
"""
self._delete()
def _delete(self) -> None:
"""
Teardown the temp dir.
"""
if self.directory:
self.directory.cleanup()
self.directory = None
def __del__(self) -> None:
"""
Overridden deletion handler, ensures teardown of temp dir is called before deletion.
"""
self._delete()
| apache-2.0 | Python | |
4f105f48b20e70415ede60c19cd7d6cdea07fc28 | Add tests for monitors | google/openhtf,fahhem/openhtf,grybmadsci/openhtf,ShaperTools/openhtf,ShaperTools/openhtf,google/openhtf,grybmadsci/openhtf,fahhem/openhtf,jettisonjoe/openhtf,jettisonjoe/openhtf,amyxchen/openhtf,ShaperTools/openhtf,google/openhtf,amyxchen/openhtf,jettisonjoe/openhtf,grybmadsci/openhtf,fahhem/openhtf,google/openhtf,jettisonjoe/openhtf,fahhem/openhtf,grybmadsci/openhtf,ShaperTools/openhtf,ShaperTools/openhtf | test/util/monitors_test.py | test/util/monitors_test.py | # Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import time
import mock
from openhtf import plugs
from openhtf.util import monitors
class EmptyPlug(plugs.BasePlug):
pass
class TestMonitors(unittest.TestCase):
def setUp(self):
self.phase_data = mock.MagicMock()
def ProvidePlugs(plugs):
return {name: cls() for name, cls in plugs}
self.phase_data.plug_manager.ProvidePlugs = ProvidePlugs
def testBasics(self):
def monitor_func(test):
return 1
@monitors.monitors('meas', monitor_func, poll_interval_ms=10)
def phase(test):
time.sleep(0.01)
phase(self.phase_data)
_, first_meas, _ = self.phase_data.measurements.meas.__setitem__.mock_calls[0]
assert first_meas[0] == 0, 'At time 0, there should be a call made.'
assert first_meas[1] == 1, "And it should be the monitor func's return val"
def testPlugs(self):
@plugs.plug(empty=EmptyPlug)
def monitor(test, empty):
return 2
@monitors.monitors('meas', monitor, poll_interval_ms=10)
def phase(test):
time.sleep(0.01)
phase(self.phase_data)
_, first_meas, _ = self.phase_data.measurements.meas.__setitem__.mock_calls[0]
assert first_meas[0] == 0, 'At time 0, there should be a call made.'
assert first_meas[1] == 2, "And it should be the monitor func's return val"
| apache-2.0 | Python | |
51ee3dae7cab41ff46cbd1cde87db9cb7997e5f9 | Add a separate file for loading app views | gr1d99/shopping-list,gr1d99/shopping-list,gr1d99/shopping-list | views.py | views.py | from functools import wraps
from flask import flash, redirect, render_template, request, session, url_for
from flask.views import View
def check_shoppinglist(func):
@wraps(func)
def _wrapped(view):
my_shopping_list = session.get('shopping_list', {})
if not my_shopping_list:
flash('You do not have any shopping list, add some through your dashboard')
return func(view)
return _wrapped
class IndexView(View):
methods = ['GET', ]
@check_shoppinglist
def dispatch_request(self):
my_shopping_list = session.get('shopping_list', {})
return render_template('index.html', shopping_list=my_shopping_list)
class DashboardView(View):
methods = ['GET', ]
@check_shoppinglist
def dispatch_request(self):
my_shopping_list = session.get('shopping_list', {})
return render_template('dashboard.html', shopping_list=my_shopping_list)
class LoginView(View):
methods = ['GET', 'POST']
def dispatch_request(self):
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
session['users'] = {}
session.get('users').update({'username': username, 'password': password})
flash('You are logged in')
return redirect(url_for('index'))
return render_template('login.html')
class RegisterView(View):
def dispatch_request(self):
return render_template('register.html')
class AddItemsView(View):
methods = ['GET', 'POST']
def dispatch_request(self):
if request.method == 'POST':
if 'shopping_list' not in session:
session['shopping_list'] = {}
shopping_list_name = request.form.get('name')
item_names = ['item1', 'item2', 'item3']
items = [request.form.get(item) for item in item_names]
session['shopping_list'].update({shopping_list_name: items})
flash('Sucess!, you items are %(items)s, to view them go to your dashboard' % dict(items=session['shopping_list']))
return redirect(url_for('create-shopping-list'))
return render_template('shopping_list/create_shopping_list.html')
class ShoppingListDetail(View):
methods = ['GET', ]
def dispatch_request(self):
if 'shopping_list' not in session:
flash('There seems not to be anything in your shopping list')
return redirect(url_for('dashboard'))
list_name = request.args.get('name')
shopping_list_obj = session['shopping_list'].get(list_name, None)
if not shopping_list_obj:
flash('Oops it looks like your shopping list does not exist.')
return redirect(url_for('dashboard'))
return render_template('shopping_list/shopping_list_detail.html', list_name=list_name, obj=shopping_list_obj)
class RemoveSingleItem(View):
methods = ['POST', ]
def dispatch_request(self):
shopping_list = request.form['shopping_list_name']
item_name = request.form['item_name']
user_shopping_list = session['shopping_list']
target_shopping_list = user_shopping_list.get(shopping_list)
flash(target_shopping_list)
return redirect(url_for('dashboard')) | mit | Python | |
ffb69023f1399d345a4d389f6864bd25c9285c18 | add if_for_while.py | hewentian/python-learning | src/python27/basic/if_for_while.py | src/python27/basic/if_for_while.py | # -*- coding: utf-8 -*-
age = 20
if age >= 18:
print 'your age is', age
print 'adult'
else:
print 'your age is', age
print 'teenager'
age = 3
if age >= 18:
print 'adult'
elif age >= 6:
print 'teenage'
else:
print 'kid'
if 'non null':
print 'True'
names = ['Michael', 'Bob', 'Tracy']
for name in names:
print name
sum1 = 0
for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
sum1 = sum1 + x;
print sum1
print range(5)
sum1 = 0
for x in range(101):
sum1 = sum1 + x;
print sum1
sum1 = 0
n = 99
while n > 0:
sum1 = sum1 + n
n = n - 2
print sum1
birth = raw_input('birth:')
if birth < 2000:
print '00ๅ'
else:
print '00ๅ'
birth = int(raw_input('birth:'))
if birth < 2000:
print '00ๅ'
else:
print '00ๅ'
| apache-2.0 | Python | |
cead4c6e8508f1504c57e3dfdc919ee88ee4cbbb | mark unuploaded | commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot | selfdrive/loggerd/tools/mark_unuploaded.py | selfdrive/loggerd/tools/mark_unuploaded.py | #!/usr/bin/env python3
import sys
from common.xattr import removexattr
from selfdrive.loggerd.uploader import UPLOAD_ATTR_NAME
for fn in sys.argv[1:]:
print("unmarking %s" % fn)
removexattr(fn, UPLOAD_ATTR_NAME)
| mit | Python | |
acd843632e7e8608bef2d56eb2c805acf08602d2 | add phase estimation routine for pulse calibration | BBN-Q/Auspex,BBN-Q/Auspex,BBN-Q/Auspex,BBN-Q/Auspex | src/auspex/pulsecal/phase_estimation.py | src/auspex/pulsecal/phase_estimation.py | # Copyright 2016 Raytheon BBN Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
import numpy as np
import matplotlib.pyplot as plt
def restrict(phase):
out = np.mod( phase + np.pi, 2*np.pi, ) - np.pi
return out
def phase_estimation( data_in, vardata_in, verbose=False):
"""Estimates pulse rotation angle from a sequence of P^k experiments, where
k is of the form 2^n. Uses the modified phase estimation algorithm from
Kimmel et al, quant-ph/1502.02677 (2015). Every experiment i doubled.
vardata should be the variance of the mean"""
#average together pairs of data points
avgdata = (data_in[0::2] + data_in[1::2])/2
# normalize data using the first two pulses to calibrate the "meter"
data = 1 + np.divide( 2*(avgdata[2:] - avgdata[0]), (avgdata[0] - avgdata[1]))
zdata = data[0::2]
xdata = data[1::2]
# similar scaling with variances
vardata = (vardata_in[0::2] + vardata_in[1::2])/2
vardata = np.divide( vardata[2:] * 2, abs(avgdata[0] - avgdata[1])**2)
zvar = vardata[0::2]
xvar = vardata[1::2]
phases = np.arctan2(xdata, zdata)
print(phases)
distances = np.sqrt(xdata**2 + zdata**2);
print(distances)
curGuess = phases[0]
phase = curGuess
sigma = np.pi
if verbose == True:
print('Current Guess: %f'%(curGuess))
for k in np.arange(1,len(phases)):
if verbose == True:
print('k: %d'%(k))
# Each step of phase estimation needs to assign the measured phase to
# the correct half circle. We will conservatively require that the
# (x,z) tuple is long enough that we can assign it to the correct
# quadrant of the circle with 2ฯ confidence
if distances[k] < 2*np.sqrt(xvar[k] + zvar[k]):
print('Phase estimation terminated at %dth pulse because the (x,z) vector is too short'%(k));
break
lowerBound = restrict(curGuess - np.pi/2**(k));
upperBound = restrict(curGuess + np.pi/2**(k));
possiblesTest = [ restrict((phases[k] + 2*n*np.pi)/2**(k)) for n in np.arange(0,2**(k)+1)]
if verbose == True:
print('Lower Bound: %f'%lowerBound)
print('Upper Bound: %f'%upperBound)
possibles=[]
for p in possiblesTest:
# NOTE: previous code did not handle upperbound == lowerBound
if lowerBound >= upperBound:
satisfiesLB = p > lowerBound or p < 0.
satisfiesUP = p < upperBound or p > 0.
else:
satisfiesLB = p > lowerBound
satisfiesUP = p < upperBound
if satisfiesLB == True and satisfiesUP == True:
possibles.append(p)
curGuess = possibles[0];
if verbose == True:
print('Current Guess: %f'%(curGuess))
phase = curGuess;
sigma = np.max(np.abs(restrict(curGuess - lowerBound)), np.abs(restrict(curGuess - upperBound)));
return phase,sigma
def simulate_measurement(amp, target, numPulses):
idealAmp = 0.34
noiseScale = 0.05
polarization = 0.99 # residual polarization after each pulse
# data representing over/under rotation of pi/2 pulse
# theta = pi/2 * (amp/idealAmp);
theta = target * (amp/idealAmp)
ks = [ 2**k for k in np.arange(0,numPulses+1)]
xdata = [ polarization**x * np.sin(x*theta) for x in ks];
xdata = np.insert(xdata,0,-1.0)
zdata = [ polarization**x * np.cos(x*theta) for x in ks];
zdata = np.insert(zdata,0,1.0)
data = np.array([zdata,xdata]).flatten('F')
data = np.tile(data,(2,1)).flatten('F')
# add noise
#data += noiseScale * np.random.randn(len(data));
vardata = noiseScale**2 * np.ones((len(data,)));
return data, vardata
def main():
numPulses = 9
amp = .55
direction = 'X'
target = np.pi
data, vardata = simulate_measurement(amp, target, numPulses)
#print(data,vardata)
#plt.figure()
#plt.plot(data)
#plt.show()
phase, sigma = phase_estimation(data, vardata, verbose=True)
print(phase,sigma)
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
9d667d2d194a9bb7a3c166e47b2c32cf33c7b19c | add code samples for recipe 1.19 | ordinary-developer/book_python_cookbook_3_ed_d_beazley_b_k_jones | code/ch_1-DATA_STRUCTURES_AND_ALGORITHMS/19-transforming_and_reducing_data_at_the_same_time/main.py | code/ch_1-DATA_STRUCTURES_AND_ALGORITHMS/19-transforming_and_reducing_data_at_the_same_time/main.py | def example_1():
nums = [1, 2, 3, 4, 5]
s = sum(x * x for x in nums)
print(s)
def example_2():
import os
files = os.listdir('./')
if any(name.endswith('.py') for name in files):
print('Ther be python!')
else:
print('Sorry, no python.')
def example_3():
s = ('ACME', 50, 123.45)
print(','.join(str(x) for x in s))
def example_4():
portfolio = [
{ 'name': 'GOOG', 'shares': 50 },
{ 'name': 'YHOO', 'shares': 75 },
{ 'name': 'AOL', 'shares': 20 },
{ 'name': 'SCOX', 'shares': 65 }
]
min_shares = min(s['shares'] for s in portfolio)
print(min_shares)
min_shares2 = min(portfolio, key = lambda d: d['shares'])
print(min_shares2)
def example_5():
nums = [1, 2, 3, 4, 5]
s1 = sum((x * x for x in nums))
s2 = sum(x * x for x in nums)
s3 = sum([x * x for x in nums])
print(s1)
print(s2)
print(s3)
if __name__ == '__main__':
example_1()
example_2()
example_3()
example_4()
example_5()
| mit | Python | |
d5c96dbd94119d10bd8fcf506ba389d56b5e0fca | Add new package py-libensemble (#6525) | matthiasdiener/spack,mfherbst/spack,mfherbst/spack,krafczyk/spack,mfherbst/spack,LLNL/spack,krafczyk/spack,mfherbst/spack,matthiasdiener/spack,EmreAtes/spack,krafczyk/spack,tmerrick1/spack,matthiasdiener/spack,iulian787/spack,iulian787/spack,tmerrick1/spack,LLNL/spack,tmerrick1/spack,LLNL/spack,EmreAtes/spack,LLNL/spack,mfherbst/spack,krafczyk/spack,tmerrick1/spack,EmreAtes/spack,krafczyk/spack,EmreAtes/spack,matthiasdiener/spack,iulian787/spack,EmreAtes/spack,tmerrick1/spack,iulian787/spack,iulian787/spack,LLNL/spack,matthiasdiener/spack | var/spack/repos/builtin/packages/py-libensemble/package.py | var/spack/repos/builtin/packages/py-libensemble/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyLibensemble(PythonPackage):
"""Library for managing ensemble-like collections of computations."""
homepage = "https://libensemble.readthedocs.io"
url = "https://pypi.io/packages/source/l/libensemble/libensemble-0.1.0.tar.gz"
version('develop', git='https://github.com/Libensemble/libensemble.git', branch='master')
version('0.1.0', '0c3d45dd139429de1a5273e5bd8e46ec')
depends_on('python@2.7:2.8,3.3:')
depends_on('py-setuptools', type='build')
depends_on('mpi')
depends_on('py-mpi4py@2.0:', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-petsc4py@3.5:', type=('build', 'run'))
depends_on('nlopt', type=('build', 'run'))
| lgpl-2.1 | Python | |
17fe9d01b6771888a44d6a039b337a84c32e64e8 | Add test case for interval_sum | JaviMerino/bart,ARM-software/bart | tests/test_common_utils.py | tests/test_common_utils.py | from bart.common import Utils
import unittest
import pandas as pd
class TestCommonUtils(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestCommonUtils, self).__init__(*args, **kwargs)
def test_interval_sum(self):
"""Test Utils Function: interval_sum"""
array = [0, 0, 1, 1, 1, 1, 0, 0]
series = pd.Series(array)
self.assertEqual(Utils.interval_sum(series, 1), 3)
array = [False, False, True, True, True, True, False, False]
series = pd.Series(array)
self.assertEqual(Utils.interval_sum(series), 3)
array = [0, 0, 1, 0, 0, 0]
series = pd.Series(array)
self.assertEqual(Utils.interval_sum(series, 1), 0)
array = [0, 0, 1, 0, 1, 1]
series = pd.Series(array)
self.assertEqual(Utils.interval_sum(series, 1), 1)
| apache-2.0 | Python | |
5ac6848633d9eb373cf44aed77645449a5a926cf | Create session class | trond-snekvik/pyUV | session.py | session.py | import hashlib
import binascii
import json
import os
from os import path, environ
class Session:
def __init__(self, directory):
self.directory = directory
self.options = {}
h = hashlib.new("SHA1")
h.update(self.directory)
self.tempstore = path.join(environ["TEMP"], binascii.hexlify(h.digest())[:16] + ".json")
if path.exists(self.tempstore):
print self.tempstore
with open(self.tempstore, "r") as f:
self.options = json.load(f)
def set(self, key, value):
self.options[key] = value
self.store()
def store(self):
j = None
if os.path.exists(self.tempstore):
with open(self.tempstore, "r") as f:
j = json.load(f)
if j:
j.update(self.options)
with open(self.tempstore, "w") as f:
json.dump(j, f)
else:
with open(self.tempstore, "w") as f:
f.write(json.JSONEncoder().encode(self.options))
| mit | Python | |
c10244d72051325659a85ac46207d25102ef225b | Create HBV_APRI_FIB4.py | kyokyos/bioinform | HBV_APRI_FIB4.py | HBV_APRI_FIB4.py | # -*- coding: utf-8 -*-
"""
Spyder Editor
APRIๅFIB4ๆจๆต่็บค็ปดๅๆ่็กฌๅๆ
ๅต
This is a temporary script file.
"""
import math
#APRI็ผฉๅ๏ผAST to Platelet Ratio Index
#ASTๅไฝiu/l
#PRIๅไฝ10**9/L
#ๅฆๆAPRI>2๏ผๅฏ่ฝๆ่็กฌๅ
def APRI(AST,upper_AST,PRI):
apri=((AST*1.0/upper_AST)*100)/PRI
return apri
#FIB-4็ผฉๅFibrosis-4
#ageๅไฝ๏ผๅนด
#ASTๅALTๅไฝ๏ผU/L๏ผ๏ผU/Lๅiu/Lไธ่ฌๅฏไปฅ้็จ๏ผๅ่
ๆฏไธญๅฝๅไฝ๏ผๅ่
ๆฏๅฝ้
ๅไฝ๏ผ
def FIB4(age,AST,ALT,PRI):
fib4=(age*AST)/(PRI*math.sqrt(ALT))
return fib4
#่ๆ
ๅตๆจๆต
def Liver_condition(apri,fib4):
if apri>2:
print ("ๅฏ่ฝๅ็่็กฌๅ")
print("ๅฆๆๆฏๆ
ขๆงไน่ๆๆ่
๏ผ้่ฆ่่ๆ็
ๆฏ่ฏ็ฉๆฒป็")
if fib4<1.45:
print("ๆ ๆๆพ่็บค็ปดๅๆ2็บงไปฅไธ่็บค็ปดๅ๏ผ่ฝปๅบฆ็บค็ปดๅ๏ผ")
if fib4>3.25:
print("่็บค็ปดๅ็จๅบฆไธบ3๏ฝ4็บงๆไปฅไธ")
#ๆ็คบ
def Print_warming():
print("ๅ ็ฎๆณไธๆญๆน่ฟ๏ผ่ฎก็ฎ็ปๆไป
ไพๅ่ใ่ฏท้่ฎฟๆๆ็งๆ่็
็งไธไธๅป็")
def Print_unit():
print("็ๅๆๆ ๆฅ่ช่ๅๆฃๆตๅ่กๅธธ่งๆฃๆต")
print("ASTๅไฝ๏ผiu/l")
print("ALTๅไฝ๏ผU/L")
print("PRIๅไฝ๏ผ10**9/L")
print("ๅนด้พๅไฝ๏ผๅนด")
print("U/Lๅiu/Lไธ่ฌๅฏไปฅ้็จ๏ผๅ่
ๆฏไธญๅฝๅไฝ๏ผๅ่
ๆฏๅฝ้
ๅไฝ")
#ๆ็คบ
Print_warming()
#่พๅบ็ๅๅผๅไฝ
print("-"*30)
Print_unit()
print("-"*30)
print("")
print("")
#่พๅ
ฅๅๆฐ
print("่ฏท่พๅ
ฅไปฅไธๅๆฐ๏ผไพๅฆ10,23.5็ญ็ญ๏ผ๏ผ")
AST=float(input("ๅคฉ้จๅฌๆฐจ้
ธ่ฝฌ็งป้
ถๅผ๏ผAST๏ผ:"))
upper_AST=float(input("ๅคฉ้จๅฌๆฐจ้
ธ่ฝฌ็งป้
ถ๏ผAST๏ผไธ้ๅผ:"))
ALT=float(input("ไธๆฐจ้
ธๆฐจๅบ่ฝฌ็งป้
ถๅผ๏ผALT๏ผ:"))
PRI=float(input("่กๅฐๆฟ่ฎกๆฐๅผ๏ผPRI๏ผ:"))
age=float(input("ๅนด้พ:"))
apri=APRI(AST,upper_AST,PRI)
fib4=FIB4(age,AST,ALT,PRI)
print("-"*30)
print("")
print("")
print("ๆจๆต็ปๆ:")
#่ๆ
ๅตๆจๆต
Liver_condition(apri,fib4)
| unlicense | Python | |
91347ba6d18706cb791ea3d9063392671c9f653f | add tests for load utils | robinandeer/chanjo | tests/load/test_load_utils.py | tests/load/test_load_utils.py | # -*- coding: utf-8 -*-
from chanjo.load.utils import exon, _exon_kwargs
DATA = {'chrom': 'chr1', 'chromStart': 100, 'chromEnd': 220, 'name': 'exon1',
'score': 0, 'strand': '+', 'sampleName': 'sample1', 'readCount': 10,
'meanCoverage': 6.341, 'thresholds': {10: 95.421, 20: 86.21, 100: 10.21}}
def test_exon():
exon_obj = exon(DATA)
assert exon_obj.chromosome == 'chr1'
assert exon_obj.exon_id == 'exon1'
def test__exon_kwargs():
kwargs = _exon_kwargs(DATA)
assert kwargs['chromosome'] == 'chr1'
assert kwargs['exon_id'] == 'exon1'
| mit | Python | |
8b0a2abaf9c942f2fd49827e898700de54fdb8af | Add failing 2.6 test | pre-commit/pre-commit,philipgian/pre-commit,pre-commit/pre-commit,chriskuehl/pre-commit-1,chriskuehl/pre-commit-1,Teino1978-Corp/pre-commit,Teino1978-Corp/pre-commit,barrysteyn/pre-commit,philipgian/pre-commit,chriskuehl/pre-commit,philipgian/pre-commit,beni55/pre-commit,philipgian/pre-commit,pre-commit/pre-commit,Lucas-C/pre-commit,chriskuehl/pre-commit,pre-commit/pre-commit,chriskuehl/pre-commit,pre-commit/pre-commit,philipgian/pre-commit,barrysteyn/pre-commit,Lucas-C/pre-commit,pre-commit/pre-commit,dnephin/pre-commit,chriskuehl/pre-commit-1,Teino1978-Corp/pre-commit,Lucas-C/pre-commit,Lucas-C/pre-commit,dnephin/pre-commit,chriskuehl/pre-commit,Teino1978-Corp/pre-commit,pre-commit/pre-commit,pre-commit/pre-commit,pre-commit/pre-commit,beni55/pre-commit,barrysteyn/pre-commit,beni55/pre-commit,dnephin/pre-commit,pre-commit/pre-commit,barrysteyn/pre-commit,pre-commit/pre-commit,dnephin/pre-commit,Lucas-C/pre-commit,chriskuehl/pre-commit-1,beni55/pre-commit,Lucas-C/pre-commit,philipgian/pre-commit | tests/logging_handler_test.py | tests/logging_handler_test.py | import __builtin__
import mock
import pytest
from pre_commit import color
from pre_commit.logging_handler import LoggingHandler
@pytest.yield_fixture
def print_mock():
with mock.patch.object(__builtin__, 'print', autospec=True) as print_mock:
yield print_mock
class FakeLogRecord(object):
def __init__(self, message, levelname, levelno):
self.message = message
self.levelname = levelname
self.levelno = levelno
def getMessage(self):
return self.message
def test_logging_handler_color(print_mock):
handler = LoggingHandler(True)
handler.emit(FakeLogRecord('hi', 'WARNING', 30))
print_mock.assert_called_once_with(
color.YELLOW + '[WARNING]' + color.NORMAL + ' hi',
)
def test_logging_handler_no_color(print_mock):
handler = LoggingHandler(False)
handler.emit(FakeLogRecord('hi', 'WARNING', 30))
print_mock.assert_called_once_with(
'[WARNING] hi',
)
| mit | Python | |
5c282b901986e689c68f0a43dbf2cf37b977d1b7 | fix tests | sumit12dec/pyquora,iammxt/pyquora,rohithpr/pyquora | tests/test_user_statistics.py | tests/test_user_statistics.py | from quora import User
expected_user_stat_keys = ['answers',
'edits',
'followers',
'following',
'questions',
'name',
'username'
]
class TestUserStatistics:
test_stats = []
test_stats.append(User('Christopher-J-Su').stats)
# test_stats.append(User('Aaron-Ounn').stats())
# test_stats.append(User('Elynn-Lee').stats())
# test_stats.append(User('Jennifer-Apacible-1').stats)
# TODO: add tests for nonexistant users and other edge cases
def test_exists(self):
for stat in self.test_stats:
for key in expected_user_stat_keys:
assert stat[key]
def test_type(self):
for stat in self.test_stats:
assert isinstance(stat['answers'], (int, long))
assert isinstance(stat['edits'], (int, long))
assert isinstance(stat['followers'], (int, long))
assert isinstance(stat['following'], (int, long))
assert isinstance(stat['posts'], (int, long))
assert isinstance(stat['questions'], (int, long))
assert isinstance(stat['name'], str)
assert isinstance(stat['username'], str)
| from quora import User
expected_user_stat_keys = ['answers',
'edits',
'followers',
'following',
'questions',
'name',
'username'
]
class TestUserStatistics:
test_stats = []
test_stats.append(User('Christopher-J-Su').stats)
# test_stats.append(User('Aaron-Ounn').stats())
# test_stats.append(User('Elynn-Lee').stats())
test_stats.append(User('Jennifer-Apacible-1').stats)
# TODO: add tests for nonexistant users and other edge cases
def test_exists(self):
for stat in self.test_stats:
for key in expected_user_stat_keys:
assert stat[key]
def test_type(self):
for stat in self.test_stats:
assert isinstance(stat['answers'], (int, long))
assert isinstance(stat['edits'], (int, long))
assert isinstance(stat['followers'], (int, long))
assert isinstance(stat['following'], (int, long))
assert isinstance(stat['posts'], (int, long))
assert isinstance(stat['questions'], (int, long))
assert isinstance(stat['name'], str)
assert isinstance(stat['username'], str)
| agpl-3.0 | Python |
1f0e023e972954a2f654705f2c7b596fc56e90b8 | Add missing __init__.py file for proper PyPi inclusion of task. | google/starthinker,google/starthinker,google/starthinker | starthinker/task/drive/__init__.py | starthinker/task/drive/__init__.py | ###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
| apache-2.0 | Python | |
6e083bce7328dba9e84f16e9c12e0e129658176a | Create config618.py | davidfergusonaz/davidtest,davidfergusonaz/davidtest | config618.py | config618.py | provider "aws" {
access_key = "AKIAIDPZGSRRD6Q6XFLA"
secret_key = "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"
region = "${var.region}"
}
| mit | Python | |
689083a97625dd99633c3116565cdb498d63abd2 | Add functions to crosswalk file to select checkboxes | RyanJennings1/crosswalk | crosswalk.py | crosswalk.py | #!/usr/bin/python
from sys import argv
from selenium import webdriver
from bs4 import BeautifulSoup
import requests, time, time
class Crosswalk(object):
def getEmail(self):
# Select email address to use
email = raw_input("Enter email address: ")
return email
def openBrowser(self):
#email = self.getEmail()
email = str(raw_input("Enter email address: "))
# Open up webpage using selenium
driver = webdriver.Firefox()
driver.get("http://www.crosswalk.com/newsletters/")
list1 = []
list2 = self.bSoup()
self.getCheckboxes(list1, list2)
self.adjustCheckboxes(list1, driver)
self.enterEmail(driver, email)
def bSoup(self):
# Get the source text and create a list of
# checkboxes using BeautifulSoup
url = "http://www.crosswalk.com/newsletters/"
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, "html.parser")
list2 = soup.findAll("input", {"type":"checkbox"})
return list2
def getCheckboxes(self, list1, list2):
# For each <input> create a string of the
# checkbox number
for i in range(185):
alpha = list2[i]
beta = str(alpha)
gamma = beta[11:22]
list1.append(gamma)
def adjustCheckboxes(self, list1, driver):
# Adjust the checkbox number if there
# is a space or quote mark in string
for i in range(len(list1)):
lst = list(list1[i])
for j in range(len(lst)):
completed_item = "".join(lst)
if completed_item[-1] == '"':
completed_item = completed_item[0:-1]
elif completed_item[-2] == '"':
completed_item = completed_item[0:-2]
elif completed_item[-1] == ' ':
completed_item = completed_item[0:-1]
elif completed_item[-2] == ' ':
completed_item = completed_item[0:-2]
# Selected checkbox and click using selenium
checkbox = driver.find_element_by_id(completed_item)
checkbox.click()
print "Box checking complete"
def enterEmail(self, driver, email):
# Select and print email using selenium
muhemail = str(raw_input("Enter email address: "))
emailAddress = driver.find_element_by_class_name("emailAddress")
emailAddress.send_keys(muhemail)
print "Email address submitted"
crosswalk = Crosswalk()
crosswalk.openBrowser()
| mit | Python | |
2acaec94042d1a7db93d0ccc0ed06672174f73fc | add tf dataset examples | jeffzhengye/pylearn,jeffzhengye/pylearn,jeffzhengye/pylearn,jeffzhengye/pylearn | tensorflow_learning/tf2/dataset.py | tensorflow_learning/tf2/dataset.py | # -*- coding: utf-8 -*-
'''
@author: jeffzhengye
@contact: yezheng@scuec.edu.cn
@file: dataset.py
@time: 2021/1/7 14:20
@desc:
'''
import tensorflow as tf
def test_cache_dataset():
def map_fun(x):
print(x, type(x))
return 2 * x
d = tf.data.Dataset.range(5)
d = d.map(map_fun).cache("test.tfdataset")
for i in d:
# print(i)
pass
if __name__=='__main__':
test_cache_dataset() | unlicense | Python | |
e6a3e2ac8267ae3a0f361138bd8cb25f82b12b9d | Create a tool-info module for AVR | sosy-lab/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,sosy-lab/benchexec | benchexec/tools/avr.py | benchexec/tools/avr.py | # This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.result as result
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for AVR -- Abstractly Verifying Reachability
URL: https://github.com/aman-goel/avr
"""
def executable(self, tool_locator):
return tool_locator.find_executable("avr.py")
def name(self):
return "AVR"
def cmdline(self, executable, options, task, rlimits):
return [executable] + options + [task.single_input_file]
def determine_result(self, run):
"""
@return: status of AVR after executing a run
"""
if run.was_timeout:
return "TIMEOUT"
status = None
for line in run.output:
if "avr-h" in line:
status = result.RESULT_TRUE_PROP
if "avr-v" in line:
status = result.RESULT_FALSE_PROP
if not status:
status = result.RESULT_ERROR
return status
| apache-2.0 | Python | |
270261ea47190e411076a7216190e1a488e7e7db | Update the extract_lhl_structures_in_a_cluster.py script | Kortemme-Lab/protein_feature_analysis,Kortemme-Lab/protein_feature_analysis | inputs/loop_helix_loop/extract_lhl_structures_in_a_cluster.py | inputs/loop_helix_loop/extract_lhl_structures_in_a_cluster.py | #!/usr/bin/env python3
'''Extract LHL structures in a cluster
Save the structures into a directory called clustered_lhl_structure.
Also save the information of insertion points.
Usage:
./extract_lhl_structures_in_a_cluster.py pdbs_path cluster_file
'''
import os
import sys
import json
import pyrosetta
from pyrosetta import rosetta
def slice_peptide(pose, start, stop):
'''Slice a peptide from a pose into a new pose.'''
seqposes = rosetta.utility.vector1_unsigned_long()
for seqpos in range(start, stop + 1):
seqposes.append(seqpos)
pp_pose = rosetta.core.pose.Pose()
rosetta.core.pose.pdbslice(pp_pose, pose, seqposes)
return pp_pose
def find_ss_region_of_position_pose(pose, seqpos):
'''Find the start and stop of the secondary structure
element that a seqpos is within.
'''
dssp_str = rosetta.core.scoring.dssp.Dssp(pose).get_dssp_secstruct()
start = seqpos
while start > 1 and dssp_str[start - 1] == dssp_str[start - 2]:
start -= 1
stop = seqpos
while stop < pose.size() and dssp_str[stop - 1] == dssp_str[stop]:
stop += 1
return start, stop
def extract_lhl_structures_in_a_cluster(pdbs_path, cluster_file):
'''Get LHL distributions'''
# Load the cluster
with open(cluster_file, 'r') as f:
cluster = json.load(f)
# Dump the lhl units in the cluster
#print(len(cluster))
flanking_residues = []
os.makedirs('clustered_lhl_structure', exist_ok=True)
for i, lhl_unit in enumerate(cluster):
pose = rosetta.core.import_pose.pose_from_file(os.path.join(pdbs_path, lhl_unit['pdb_file']))
pre_ss_start = find_ss_region_of_position_pose(pose, lhl_unit['start'] - 1)[0]
post_ss_stop = find_ss_region_of_position_pose(pose, lhl_unit['stop'] + 1)[1]
pp_pose = slice_peptide(pose, pre_ss_start, post_ss_stop)
pp_pose.dump_pdb(os.path.join('clustered_lhl_structure', 'model_{0}.pdb.gz'.format(i)))
with open(os.path.join('clustered_lhl_structure', 'insertion_points_{0}.json'.format(i)), 'w') as f:
json.dump([{'start':lhl_unit['start'] - pre_ss_start + 1, 'stop':lhl_unit['stop'] - pre_ss_start + 1}], f)
for j in range(pre_ss_start, lhl_unit['start']):
flanking_residues.append((i, pose.pdb_info().pose2pdb(j).split(' ')[0]))
for j in range(lhl_unit['stop'] + 1, post_ss_stop + 1):
flanking_residues.append((i, pose.pdb_info().pose2pdb(j).split(' ')[0]))
# Print the PyMol selection command for the flanking residues
pymol_str = 'sele flanking_residues,'
for i, j in flanking_residues:
pymol_str += ' (model_{0} and res {1})'.format(i, j)
print(pymol_str)
if __name__ == '__main__':
pdbs_path = sys.argv[1]
cluster_file = sys.argv[2]
pyrosetta.init(options='-ignore_unrecognized_res true')
extract_lhl_structures_in_a_cluster(pdbs_path, cluster_file)
| mit | Python | |
e2994c09ecf0e5b18ad587cb656ce014e009d99f | fix library name on macos | freedomtan/tensorflow,arborh/tensorflow,adit-chandra/tensorflow,ghchinoy/tensorflow,tensorflow/tensorflow-pywrap_saved_model,ppwwyyxx/tensorflow,adit-chandra/tensorflow,cxxgtxy/tensorflow,adit-chandra/tensorflow,xzturn/tensorflow,davidzchen/tensorflow,arborh/tensorflow,alsrgv/tensorflow,gunan/tensorflow,petewarden/tensorflow,renyi533/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,jhseu/tensorflow,annarev/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,aldian/tensorflow,petewarden/tensorflow,chemelnucfin/tensorflow,gautam1858/tensorflow,renyi533/tensorflow,arborh/tensorflow,karllessard/tensorflow,petewarden/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,freedomtan/tensorflow,petewarden/tensorflow,alsrgv/tensorflow,ghchinoy/tensorflow,ppwwyyxx/tensorflow,yongtang/tensorflow,ppwwyyxx/tensorflow,paolodedios/tensorflow,adit-chandra/tensorflow,gautam1858/tensorflow,jhseu/tensorflow,petewarden/tensorflow,tensorflow/tensorflow,davidzchen/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred,jhseu/tensorflow,paolodedios/tensorflow,freedomtan/tensorflow,adit-chandra/tensorflow,chemelnucfin/tensorflow,gunan/tensorflow,ghchinoy/tensorflow,annarev/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,xzturn/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,arborh/tensorflow,xzturn/tensorflow,DavidNorman/tensorflow,freedomtan/tensorflow,frreiss/tensorflow-fred,gunan/tensorflow,chemelnucfin/tensorflow,DavidNorman/tensorflow,aam-at/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,cxxgtxy/tensorflow,alsrgv/tensorflow,freedomtan/tensorflow,gunan/tensorflow,sarvex/tensorflow,xzturn/tensorflow,annarev/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-pywrap_saved_model,DavidNorman/tensorflow,gunan/tensorflow,davidzchen/tensorflow,aam-at/tensorflow,paolodedios/tensorflow,frreiss/tensorflow-fred,sarvex/tensorflow,davidzchen/tensorflow,gautam1858/tensorflow,Intel-Corporation/tensorflow,xzturn/tensorflow,arborh/tensorflow,DavidNorman/tensorflow,gunan/tensorflow,ghchinoy/tensorflow,DavidNorman/tensorflow,DavidNorman/tensorflow,annarev/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,frreiss/tensorflow-fred,renyi533/tensorflow,Intel-tensorflow/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,renyi533/tensorflow,ppwwyyxx/tensorflow,petewarden/tensorflow,ghchinoy/tensorflow,adit-chandra/tensorflow,Intel-Corporation/tensorflow,arborh/tensorflow,frreiss/tensorflow-fred,alsrgv/tensorflow,DavidNorman/tensorflow,ppwwyyxx/tensorflow,yongtang/tensorflow,karllessard/tensorflow,alsrgv/tensorflow,petewarden/tensorflow,davidzchen/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,chemelnucfin/tensorflow,davidzchen/tensorflow,jhseu/tensorflow,Intel-Corporation/tensorflow,ppwwyyxx/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,adit-chandra/tensorflow,karllessard/tensorflow,arborh/tensorflow,tensorflow/tensorflow-pywrap_saved_model,ppwwyyxx/tensorflow,xzturn/tensorflow,xzturn/tensorflow,renyi533/tensorflow,chemelnucfin/tensorflow,alsrgv/tensorflow,ghchinoy/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,ghchinoy/tensorflow,alsrgv/tensorflow,annarev/tensorflow,jhseu/tensorflow,alsrgv/tensorflow,xzturn/tensorflow,petewarden/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,alsrgv/tensorflow,aam-at/tensorflow,annarev/tensorflow,renyi533/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,jhseu/tensorflow,arborh/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,aldian/tensorflow,petewarden/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,aam-at/tensorflow,davidzchen/tensorflow,ghchinoy/tensorflow,karllessard/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,gunan/tensorflow,Intel-Corporation/tensorflow,jhseu/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,adit-chandra/tensorflow,ppwwyyxx/tensorflow,gautam1858/tensorflow,DavidNorman/tensorflow,ghchinoy/tensorflow,annarev/tensorflow,gunan/tensorflow,chemelnucfin/tensorflow,tensorflow/tensorflow-pywrap_saved_model,DavidNorman/tensorflow,aldian/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,renyi533/tensorflow,adit-chandra/tensorflow,sarvex/tensorflow,sarvex/tensorflow,gautam1858/tensorflow,sarvex/tensorflow,ghchinoy/tensorflow,arborh/tensorflow,aldian/tensorflow,xzturn/tensorflow,gunan/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,renyi533/tensorflow,DavidNorman/tensorflow,aldian/tensorflow,Intel-Corporation/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,freedomtan/tensorflow,Intel-tensorflow/tensorflow,gunan/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,annarev/tensorflow,chemelnucfin/tensorflow,chemelnucfin/tensorflow,petewarden/tensorflow,Intel-tensorflow/tensorflow,xzturn/tensorflow,karllessard/tensorflow,adit-chandra/tensorflow,freedomtan/tensorflow,ghchinoy/tensorflow,chemelnucfin/tensorflow,arborh/tensorflow,Intel-Corporation/tensorflow,Intel-tensorflow/tensorflow,aam-at/tensorflow,Intel-tensorflow/tensorflow,annarev/tensorflow,gautam1858/tensorflow,jhseu/tensorflow,ppwwyyxx/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,DavidNorman/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,davidzchen/tensorflow,alsrgv/tensorflow,arborh/tensorflow,davidzchen/tensorflow,frreiss/tensorflow-fred,cxxgtxy/tensorflow,annarev/tensorflow,freedomtan/tensorflow,paolodedios/tensorflow,ppwwyyxx/tensorflow,adit-chandra/tensorflow,gautam1858/tensorflow,aldian/tensorflow,jhseu/tensorflow,renyi533/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,aam-at/tensorflow,renyi533/tensorflow,aam-at/tensorflow,Intel-Corporation/tensorflow,arborh/tensorflow,ppwwyyxx/tensorflow,alsrgv/tensorflow,karllessard/tensorflow,petewarden/tensorflow,sarvex/tensorflow,aldian/tensorflow,frreiss/tensorflow-fred,ppwwyyxx/tensorflow,aam-at/tensorflow,jhseu/tensorflow,paolodedios/tensorflow,frreiss/tensorflow-fred,davidzchen/tensorflow,karllessard/tensorflow,sarvex/tensorflow,gunan/tensorflow,renyi533/tensorflow,xzturn/tensorflow,paolodedios/tensorflow,chemelnucfin/tensorflow,xzturn/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,chemelnucfin/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gunan/tensorflow,frreiss/tensorflow-fred,karllessard/tensorflow,Intel-tensorflow/tensorflow,aldian/tensorflow,Intel-Corporation/tensorflow,karllessard/tensorflow,yongtang/tensorflow,cxxgtxy/tensorflow,alsrgv/tensorflow,jhseu/tensorflow,cxxgtxy/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,aam-at/tensorflow,davidzchen/tensorflow,renyi533/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,ghchinoy/tensorflow,aam-at/tensorflow,freedomtan/tensorflow,gautam1858/tensorflow,aam-at/tensorflow,frreiss/tensorflow-fred,sarvex/tensorflow,tensorflow/tensorflow,jhseu/tensorflow,DavidNorman/tensorflow,gautam1858/tensorflow,chemelnucfin/tensorflow,adit-chandra/tensorflow,cxxgtxy/tensorflow,aam-at/tensorflow,annarev/tensorflow,paolodedios/tensorflow | tensorflow/python/platform/sysconfig.py | tensorflow/python/platform/sysconfig.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""System configuration library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as _os_path
import platform as _platform
from tensorflow.python.framework.versions import CXX11_ABI_FLAG as _CXX11_ABI_FLAG
from tensorflow.python.framework.versions import MONOLITHIC_BUILD as _MONOLITHIC_BUILD
from tensorflow.python.framework.versions import VERSION as _VERSION
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=g-import-not-at-top
@tf_export('sysconfig.get_include')
def get_include():
"""Get the directory containing the TensorFlow C++ header files.
Returns:
The directory as string.
"""
# Import inside the function.
# sysconfig is imported from the tensorflow module, so having this
# import at the top would cause a circular import, resulting in
# the tensorflow module missing symbols that come after sysconfig.
import tensorflow as tf
return _os_path.join(_os_path.dirname(tf.__file__), 'include')
@tf_export('sysconfig.get_lib')
def get_lib():
"""Get the directory containing the TensorFlow framework library.
Returns:
The directory as string.
"""
import tensorflow as tf
return _os_path.join(_os_path.dirname(tf.__file__))
@tf_export('sysconfig.get_compile_flags')
def get_compile_flags():
"""Get the compilation flags for custom operators.
Returns:
The compilation flags.
"""
flags = []
flags.append('-I%s' % get_include())
flags.append('-D_GLIBCXX_USE_CXX11_ABI=%d' % _CXX11_ABI_FLAG)
return flags
@tf_export('sysconfig.get_link_flags')
def get_link_flags():
"""Get the link flags for custom operators.
Returns:
The link flags.
"""
is_mac = _platform.system() == 'Darwin'
ver = _VERSION.split('.')[0]
flags = []
if not _MONOLITHIC_BUILD:
flags.append('-L%s' % get_lib())
if is_mac:
flags.append('-l:libtensorflow_framework.%s.dylib' % ver)
else:
flags.append('-l:libtensorflow_framework.so.%s' % ver)
return flags
| # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""System configuration library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as _os_path
from tensorflow.python.framework.versions import CXX11_ABI_FLAG as _CXX11_ABI_FLAG
from tensorflow.python.framework.versions import MONOLITHIC_BUILD as _MONOLITHIC_BUILD
from tensorflow.python.framework.versions import VERSION as _VERSION
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=g-import-not-at-top
@tf_export('sysconfig.get_include')
def get_include():
"""Get the directory containing the TensorFlow C++ header files.
Returns:
The directory as string.
"""
# Import inside the function.
# sysconfig is imported from the tensorflow module, so having this
# import at the top would cause a circular import, resulting in
# the tensorflow module missing symbols that come after sysconfig.
import tensorflow as tf
return _os_path.join(_os_path.dirname(tf.__file__), 'include')
@tf_export('sysconfig.get_lib')
def get_lib():
"""Get the directory containing the TensorFlow framework library.
Returns:
The directory as string.
"""
import tensorflow as tf
return _os_path.join(_os_path.dirname(tf.__file__))
@tf_export('sysconfig.get_compile_flags')
def get_compile_flags():
"""Get the compilation flags for custom operators.
Returns:
The compilation flags.
"""
flags = []
flags.append('-I%s' % get_include())
flags.append('-D_GLIBCXX_USE_CXX11_ABI=%d' % _CXX11_ABI_FLAG)
return flags
@tf_export('sysconfig.get_link_flags')
def get_link_flags():
"""Get the link flags for custom operators.
Returns:
The link flags.
"""
flags = []
if not _MONOLITHIC_BUILD:
flags.append('-L%s' % get_lib())
flags.append('-l:libtensorflow_framework.so.%s' % _VERSION.split('.')[0])
return flags
| apache-2.0 | Python |
4a10969e62475bba4bbf7fe441a0880dd8842bc2 | Add simple type system | Inaimathi/pykit,flypy/pykit,flypy/pykit,Inaimathi/pykit,ContinuumIO/pykit,ContinuumIO/pykit | pykit/types.py | pykit/types.py | from collections import namedtuple, defaultdict, deque, Set, Mapping
from pykit.ir import parser
alltypes = frozenset(['Bool', 'Int', 'Real', 'Complex', 'Array', 'Struct',
'Typedef', 'Object', 'Tuple', 'List'])
def typetuple(name, elems):
ty = namedtuple(name, elems)
for tyname in alltypes:
setattr(ty, 'is_' + tyname, False)
setattr(ty, 'is_' + name.lower(), True)
return ty
Boolean = typetuple('Boolean', [])
Int = typetuple('Int', ['bits', 'signed'])
Real = typetuple('Real', ['bits'])
Complex = typetuple('Complex', ['base'])
Array = typetuple('Array', ['base', 'ndim', 'order']) # order in 'C', 'F', 'A'
Struct = typetuple('Struct', ['types'])
Pointer = typetuple('Pointer', ['base'])
ObjectT = typetuple('Object', [])
Tuple = typetuple('Tuple', ['base', 'count']) # count == -1 if unknown
List = typetuple('List', ['base', 'count'])
Dict = typetuple('Dict', ['key', 'value', 'count'])
SumType = typetuple('SumType', ['types'])
Partial = typetuple('Partial', ['fty', 'bound']) # bound = { 'myparam' }
Function = typetuple('Function', ['res', 'argtypes', 'argnames'])
Typedef = typetuple('Typedef', ['type', 'name'])
# ______________________________________________________________________
# Types
Bool = Boolean()
Int8 = Int(8, False)
Int16 = Int(8, False)
Int32 = Int(8, False)
Int64 = Int(8, False)
UInt8 = Int(8, True)
UInt16 = Int(8, True)
UInt32 = Int(8, True)
UInt64 = Int(8, True)
Float32 = Real(32)
Float64 = Real(64)
Float128 = Real(128)
Complex64 = Complex(Float32)
Complex128 = Complex(Float64)
Complex256 = Complex(Float128)
# ______________________________________________________________________
signed_set = frozenset([Int8, Int16, Int32, Int64])
unsigned_set = frozenset([UInt8, UInt16, UInt32, UInt64])
int_set = signed_set | unsigned_set
float_set = frozenset([Float32, Float64, Float128])
complex_set = frozenset([Complex64, Complex128, Complex256])
bool_set = frozenset([Bool])
numeric_set = int_set | float_set | complex_set
scalar_set = numeric_set | bool_set
# ______________________________________________________________________
# Internal
VirtualTable = typetuple('VirtualTable', ['obj_type'])
VirtualMethod = typetuple('VirtualMethod', ['obj_type'])
# ______________________________________________________________________
# Parsing
def _from_ast(ty):
"""Convert a pykit.ir.parser Type AST to a Type"""
if isinstance(ty, parser.Struct):
return Struct(*map(_from_ast, ty.types))
elif isinstance(ty, parser.Pointer):
return Pointer(_from_ast(ty.base))
else:
return globals()[ty.name]
def parse_type(s):
ty_ast, = parser.from_assembly(s, parser.type_parser)
return _from_ast(ty_ast) | bsd-3-clause | Python | |
e0fccab95662dfca2c0f84b946517df4e85e2c34 | Add config for L/32 BatchEnsemble model. | google/uncertainty-baselines | baselines/jft/experiments/vit_be/jft300m_be_vit_large_32.py | baselines/jft/experiments/vit_be/jft300m_be_vit_large_32.py | # coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""ViT + BatchEnsemble.
"""
# pylint: enable=line-too-long
import ml_collections
def get_config():
"""Config for training on JFT300M. Batch size 4096 fits on DF4x4."""
config = ml_collections.ConfigDict()
config.seed = 0
# JFT parameters.
config.dataset = 'jft/entity:1.0.0'
config.val_split = 'test[:49511]' # aka tiny_test/test[:5%] in task_adapt
config.train_split = 'train' # task_adapt used train+validation so +64167
config.num_classes = 18291
config.init_head_bias = -10.0 # ~= ln(1/18k) ~= ln(1/num_classes)
config.loss_to_apply = 'softmax_xent'
pp_common = '|value_range(-1, 1)'
pp_common += f'|onehot({config.num_classes})'
# To use ancestor 'smearing', use this line instead:
# pp_common += f'|onehot({config.num_classes}, key='labels_extended', key_result='labels') # pylint: disable=line-too-long
pp_common += '|keep(["image", "labels"])'
config.pp_train = 'decode_jpeg_and_inception_crop(224)|flip_lr' + pp_common
config.pp_eval = 'decode|resize_small(256)|central_crop(224)' + pp_common
config.shuffle_buffer_size = 250_000 # Per host, so small-ish is ok.
# Model parameters.
config.model_name = 'PatchTransformerBE'
config.model = ml_collections.ConfigDict()
config.model.patch_size = (32, 32)
config.model.hidden_size = 1024
config.model.representation_size = 1024
config.model.classifier = 'token'
config.model.transformer = ml_collections.ConfigDict()
config.model.transformer.num_layers = 24
config.model.transformer.dropout_rate = 0.0
config.model.transformer.mlp_dim = 4096
config.model.transformer.num_heads = 16
config.model.transformer.attention_dropout_rate = 0.0
# BatchEnsemblee parameters.
config.model.transformer.be_layers = (21, 23)
config.model.transformer.ens_size = 4
config.model.transformer.random_sign_init = 0.5
config.fast_weight_lr_multiplier = 1.0
# Optimizer parameters.
config.optim_name = 'Adam'
config.optim = ml_collections.ConfigDict(dict(beta1=0.9, beta2=0.999))
config.weight_decay = 0.1
config.clip_grad_norm = None
config.lr = ml_collections.ConfigDict()
config.lr.base = 1e-3 # LR likely has to be lower for larger models!
config.lr.warmup_steps = 10_000
config.lr.decay_type = 'linear'
config.lr.linear_end = 1e-5
config.batch_size = 1024 # Global batch size.
config.batch_size_eval = 1024 # Global batch size.
config.num_epochs = 5
config.log_training_steps = 50
config.log_eval_steps = 1000
config.checkpoint_steps = 5000
config.checkpoint_timeout = 10
config.prefetch_to_device = 2
config.trial = 0
config.args = {}
return config
def get_sweep(hyper):
return hyper.product([
# Use this as a sensible sweep over other hyperparameters.
hyper.sweep('config.model.transformer.ens_size', [3]),
hyper.sweep('config.num_epochs', [7]),
hyper.sweep('config.model.transformer.be_layers',
[(21, 23)]), # Last two
hyper.sweep('config.model.transformer.random_sign_init',
[-0.5, 0.5]),
hyper.sweep('config.fast_weight_lr_multiplier', [0.5, 1.0, 2.0]),
hyper.sweep('config.lr.base', [6e-4]),
])
| apache-2.0 | Python | |
61ee58ae699c83574538ffedc4b4965acdb27f0c | Add basic Markov Chain generator. | Fifty-Nine/github_ebooks | Markov.py | Markov.py | from collections import deque, defaultdict
from random import choice
class SequenceGenerator:
def __init__(self, order):
self.order = order
self.table = defaultdict(list)
def addSample(self, sequence):
st = deque([None] * self.order, self.order)
len = 0
for v in sequence:
self.table[tuple(st)].append(v)
st.append(v)
len += 1
self.table[tuple(st)].append(None)
def next(self, state):
return choice(self.table.get(tuple(state)))
def generate(self):
state = deque([None] * self.order, self.order)
while True:
nt = self.next(state)
if nt is None:
raise StopIteration()
state.append(nt)
yield nt
| mit | Python | |
3df411232c6dda6692118bc4348099778c4b681a | Create db.py | Python-IoT/Smart-IoT-Planting-System,Python-IoT/Smart-IoT-Planting-System | cloud/db/db.py | cloud/db/db.py | #!/usr/bin/env python
import pymysql #Python3
db = pymysql.connect("localhost","sips","root","zaijian" )
cursor = db.cursor()
cursor.execute("SELECT VERSION()")
data = cursor.fetchone()
print ("Database version : %s " % data)
db.close()
def create_table():
db = pymysql.connect("localhost","sips","root","zaijian" )
cursor = db.cursor()
cursor.execute("DROP TABLE IF EXISTS EMPLOYEE")
sql = """CREATE TABLE EMPLOYEE (
FIRST_NAME CHAR(20) NOT NULL,
LAST_NAME CHAR(20),
AGE INT,
SEX CHAR(1),
INCOME FLOAT )"""
cursor.execute(sql)
db.close()
def db_insert():
db = pymysql.connect("localhost","sips","root","zaijian" )
cursor = db.cursor()
sql = """INSERT INTO EMPLOYEE(FIRST_NAME,
LAST_NAME, AGE, SEX, INCOME)
VALUES ('Mac', 'Mohan', 20, 'M', 2000)"""
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
db.close()
| mit | Python | |
b373eaab5918488292075c962f4374dc8815c395 | Add script to generate partition ids. | medallia/voldemort,HB-SI/voldemort,voldemort/voldemort,HB-SI/voldemort,cshaxu/voldemort,squarY/voldemort,medallia/voldemort,cshaxu/voldemort,stotch/voldemort,rickbw/voldemort,birendraa/voldemort,voldemort/voldemort,birendraa/voldemort,PratikDeshpande/voldemort,HB-SI/voldemort,null-exception/voldemort,mabh/voldemort,jalkjaer/voldemort,jalkjaer/voldemort,FelixGV/voldemort,bitti/voldemort,voldemort/voldemort,LeoYao/voldemort,squarY/voldemort,cshaxu/voldemort,arunthirupathi/voldemort,jeffpc/voldemort,dallasmarlow/voldemort,jeffpc/voldemort,birendraa/voldemort,bitti/voldemort,arunthirupathi/voldemort,bitti/voldemort,LeoYao/voldemort,arunthirupathi/voldemort,null-exception/voldemort,null-exception/voldemort,LeoYao/voldemort,dallasmarlow/voldemort,cshaxu/voldemort,squarY/voldemort,PratikDeshpande/voldemort,jeffpc/voldemort,voldemort/voldemort,dallasmarlow/voldemort,mabh/voldemort,stotch/voldemort,mabh/voldemort,jwlent55/voldemort,arunthirupathi/voldemort,dallasmarlow/voldemort,birendraa/voldemort,jwlent55/voldemort,medallia/voldemort,rickbw/voldemort,jalkjaer/voldemort,gnb/voldemort,stotch/voldemort,jalkjaer/voldemort,jalkjaer/voldemort,HB-SI/voldemort,bhasudha/voldemort,bhasudha/voldemort,cshaxu/voldemort,squarY/voldemort,jeffpc/voldemort,PratikDeshpande/voldemort,jalkjaer/voldemort,voldemort/voldemort,null-exception/voldemort,rickbw/voldemort,dallasmarlow/voldemort,arunthirupathi/voldemort,medallia/voldemort,medallia/voldemort,gnb/voldemort,birendraa/voldemort,FelixGV/voldemort,HB-SI/voldemort,medallia/voldemort,null-exception/voldemort,arunthirupathi/voldemort,bitti/voldemort,rickbw/voldemort,bhasudha/voldemort,mabh/voldemort,stotch/voldemort,jeffpc/voldemort,FelixGV/voldemort,PratikDeshpande/voldemort,gnb/voldemort,bhasudha/voldemort,jwlent55/voldemort,mabh/voldemort,squarY/voldemort,LeoYao/voldemort,jwlent55/voldemort,bitti/voldemort,FelixGV/voldemort,PratikDeshpande/voldemort,dallasmarlow/voldemort,FelixGV/voldemort,jalkjaer/voldemort,squarY/voldemort,gnb/voldemort,LeoYao/voldemort,PratikDeshpande/voldemort,birendraa/voldemort,LeoYao/voldemort,bhasudha/voldemort,bitti/voldemort,stotch/voldemort,gnb/voldemort,stotch/voldemort,FelixGV/voldemort,bitti/voldemort,arunthirupathi/voldemort,mabh/voldemort,jwlent55/voldemort,cshaxu/voldemort,bhasudha/voldemort,jeffpc/voldemort,HB-SI/voldemort,voldemort/voldemort,null-exception/voldemort,gnb/voldemort,squarY/voldemort,rickbw/voldemort,rickbw/voldemort,jwlent55/voldemort,voldemort/voldemort,FelixGV/voldemort | test/integration/generate_partitions.py | test/integration/generate_partitions.py | import sys
import random
if len(sys.argv) != 3:
print >> sys.stderr, "USAGE: python generate_partitions.py nodes partitions_per_node"
sys.exit()
FORMAT_WIDTH = 10
nodes = int(sys.argv[1])
partitions = int(sys.argv[2])
ids = range(nodes * partitions)
# use known seed so this is repeatable
random.seed(92873498274)
random.shuffle(ids)
for i in xrange(nodes):
print
print 'node', i
print '<partitions>'
print ' ',
node_ids = sorted(ids[i*partitions:(i+1)*partitions])
for j in xrange(len(node_ids)):
print str(node_ids[j]) + ',',
if j % FORMAT_WIDTH == FORMAT_WIDTH - 1:
print
print ' ',
print '</partitions>'
| apache-2.0 | Python | |
805f5abc98864d5543f9a5d94f9279926a8730fc | Add pppd.py back | frank-deng/retro-works,frank-deng/retro-works,frank-deng/retro-works,frank-deng/retro-works,frank-deng/retro-works,frank-deng/retro-works | misc/pppd.py | misc/pppd.py | #!/usr/bin/env python3
import os, sys, time, subprocess, pty, fcntl, socket, select, argparse;
parser = argparse.ArgumentParser();
parser.add_argument(
'--host',
'-H',
help='Specify binding host for the PPP server.',
default=''
);
parser.add_argument(
'--port',
'-P',
help='Specify port for the PPP server.',
type=int,
default=23
);
parser.add_argument('pppd_options', nargs=argparse.REMAINDER, help='Options for pppd');
args = parser.parse_args();
class Terminal:
__active = False;
def __init__(self):
self.__master, self.__slave = pty.openpty();
fcntl.fcntl(self.__master, fcntl.F_SETFL, fcntl.fcntl(self.__master, fcntl.F_GETFL) | os.O_NONBLOCK);
self.__startProc();
def __startProc(self):
global args;
ptyPath=f"/proc/{os.getpid()}/fd/{self.__slave}";
self.__proc=subprocess.Popen(['pppd', ptyPath]+args.pppd_options);
def close(self):
self.__active = False;
self.__proc.wait();
os.close(self.__slave);
os.close(self.__master);
def read(self):
if (None != self.__proc.poll()):
self.__startProc();
try:
return os.read(self.__master, 65536);
except BlockingIOError:
return b'';
def write(self, data):
if (None != self.__proc.poll()):
self.__startProc();
if self.__active:
os.write(self.__master, data);
else:
self.__active = True;
inputs = [];
outputs = [];
terms = {};
try:
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM);
server.setblocking(0);
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1);
server.bind((args.host, args.port));
server.listen(5);
inputs.append(server);
except OSError as e:
print(str(e));
exit(1);
try:
while True:
time.sleep(0.1);
readable, writable, exceptional = select.select(inputs, outputs, inputs);
for s in readable:
if s is server:
conn, addr = s.accept();
conn.setblocking(0);
inputs.append(conn);
terms[str(conn.fileno())] = Terminal();
else:
try:
data = s.recv(1024);
if data:
terms[str(s.fileno())].write(data);
if s not in outputs:
outputs.append(s);
except (ConnectionAbortedError, ConnectionResetError, KeyError):
if str(s.fileno()) in terms:
terms.pop(str(s.fileno())).close();
if s in inputs:
inputs.remove(s);
if s in outputs:
outputs.remove(s);
s.close();
for s in writable:
try:
s.sendall(terms[str(s.fileno())].read());
except (BrokenPipeError, KeyError):
if str(s.fileno()) in terms:
terms.pop(str(s.fileno())).close();
if s in inputs:
inputs.remove(s);
if s in outputs:
outputs.remove(s);
s.close();
for s in exceptional:
if str(s.fileno()) in terms:
terms.pop().close();
if s in inputs:
inputs.remove(s);
if s in outputs:
outputs.remove(s);
s.close();
except KeyboardInterrupt:
pass;
finally:
for key, term in terms.items():
term.close();
| mit | Python | |
28897fa74e1883388712704233e85ab60fe4d823 | Create Paddle.py | petehopkins/Untitled-CSET1100-Project | Paddle.py | Paddle.py | import pygame
class Paddle(pygame.sprite.Sprite):
def __init__(self, window):
pygame.sprite.Sprite.__init__(self)
self.limitLeft = 20
self.limitRight = window.get_width() - self.limitLeft
self.width = 75
self.height = 25
self.color = (0, 0 , 96)
self.image = pygame.Surface((self.width, self.height))
self.image.fill(self.color)
self.rect = self.image.get_rect()
def getPosition(self):
return (self.rect.x, self.rect.y)
def move(self, x):
self.rect.x = x
def drawPaddle(self, window):
window.blit(self)
| mit | Python | |
759dbe3f6b601be7d5560b610b6b429fdca4d8b8 | add gevent echo | supercocoa/HelloBackend,supercocoa/HelloBackend | net/python/gevent/simple/echo/echo.py | net/python/gevent/simple/echo/echo.py | import socket
import gevent
HOST = 'localhost'
PORT = 50009
def handleReq(conn, addr):
print 'handleReq'
while 1:
data = conn.recv(1024)
if not data:
break
conn.sendall(data)
conn.close()
def createSvr():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((HOST, PORT))
sock.listen(1)
while True:
conn, addr = sock.accept()
print 'conn by ', addr
gevent.spawn(handleReq, conn, addr)
if __name__ == '__main__':
createSvr()
| apache-2.0 | Python | |
11b6764eb4acef700e81a07ba7d68327374c8f20 | Test inactive_users job. | PyBossa/pybossa,jean/pybossa,OpenNewsLabs/pybossa,stefanhahmann/pybossa,jean/pybossa,geotagx/pybossa,inteligencia-coletiva-lsd/pybossa,stefanhahmann/pybossa,OpenNewsLabs/pybossa,PyBossa/pybossa,Scifabric/pybossa,geotagx/pybossa,Scifabric/pybossa,inteligencia-coletiva-lsd/pybossa | test/test_jobs/test_engage_old_users.py | test/test_jobs/test_engage_old_users.py | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2014 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from pybossa.jobs import get_inactive_users_jobs
from default import Test, with_context
from factories import TaskRunFactory
from pybossa.core import user_repo
# from mock import patch, MagicMock
class TestEngageUsers(Test):
@with_context
def test_get_inactive_users_jobs(self):
"""Test JOB get inactive users returns empty list."""
jobs = get_inactive_users_jobs()
msg = "There should not be any job."
assert len(jobs) == 0, msg
@with_context
def test_get_inactive_users_returns_jobs(self):
"""Test JOB get inactive users returns a list of jobs."""
tr = TaskRunFactory.create(finish_time="2010-07-07T17:23:45.714210")
user = user_repo.get(tr.user_id)
jobs = get_inactive_users_jobs()
msg = "There should not be one job."
assert len(jobs) == 1, msg
job = jobs[0]
args = job['args'][0]
assert job['queue'] == 'quaterly', job['queue']
assert len(args['recipients']) == 1
assert args['recipients'][0] == user.email_addr, args['recipients'][0]
| agpl-3.0 | Python | |
5caf67f27d82689babf5dcf0a234dfe3c261ff9c | Implement a framework for data retention policies | akuseru/zulip,AZtheAsian/zulip,KJin99/zulip,EasonYi/zulip,rishig/zulip,seapasulli/zulip,gigawhitlocks/zulip,dotcool/zulip,wdaher/zulip,dattatreya303/zulip,arpith/zulip,aps-sids/zulip,natanovia/zulip,wangdeshui/zulip,fw1121/zulip,JanzTam/zulip,natanovia/zulip,mansilladev/zulip,littledogboy/zulip,bowlofstew/zulip,dhcrzf/zulip,blaze225/zulip,kokoar/zulip,bowlofstew/zulip,ericzhou2008/zulip,he15his/zulip,Suninus/zulip,Galexrt/zulip,kaiyuanheshang/zulip,developerfm/zulip,kokoar/zulip,Suninus/zulip,shaunstanislaus/zulip,nicholasbs/zulip,jainayush975/zulip,stamhe/zulip,amallia/zulip,avastu/zulip,tiansiyuan/zulip,bitemyapp/zulip,luyifan/zulip,zachallaun/zulip,pradiptad/zulip,luyifan/zulip,krtkmj/zulip,stamhe/zulip,m1ssou/zulip,timabbott/zulip,fw1121/zulip,johnny9/zulip,gigawhitlocks/zulip,tommyip/zulip,hayderimran7/zulip,ericzhou2008/zulip,seapasulli/zulip,Cheppers/zulip,johnnygaddarr/zulip,souravbadami/zulip,tbutter/zulip,aakash-cr7/zulip,zacps/zulip,nicholasbs/zulip,Juanvulcano/zulip,swinghu/zulip,umkay/zulip,aliceriot/zulip,johnnygaddarr/zulip,Galexrt/zulip,timabbott/zulip,ApsOps/zulip,PaulPetring/zulip,reyha/zulip,qq1012803704/zulip,pradiptad/zulip,SmartPeople/zulip,kou/zulip,hayderimran7/zulip,cosmicAsymmetry/zulip,jerryge/zulip,PaulPetring/zulip,jeffcao/zulip,sonali0901/zulip,Qgap/zulip,tiansiyuan/zulip,avastu/zulip,karamcnair/zulip,Galexrt/zulip,amanharitsh123/zulip,Frouk/zulip,Frouk/zulip,zofuthan/zulip,schatt/zulip,Qgap/zulip,so0k/zulip,umkay/zulip,ashwinirudrappa/zulip,zorojean/zulip,wdaher/zulip,Juanvulcano/zulip,thomasboyt/zulip,dotcool/zulip,ahmadassaf/zulip,zofuthan/zulip,lfranchi/zulip,jphilipsen05/zulip,hj3938/zulip,joyhchen/zulip,shaunstanislaus/zulip,babbage/zulip,bssrdf/zulip,Vallher/zulip,suxinde2009/zulip,shrikrishnaholla/zulip,ikasumiwt/zulip,huangkebo/zulip,hengqujushi/zulip,Juanvulcano/zulip,codeKonami/zulip,MariaFaBella85/zulip,akuseru/zulip,wangdeshui/zulip,guiquanz/zulip,jessedhillon/zulip,jeffcao/zulip,brockwhittaker/zulip,LeeRisk/zulip,umkay/zulip,natanovia/zulip,jimmy54/zulip,sup95/zulip,brainwane/zulip,stamhe/zulip,johnny9/zulip,esander91/zulip,jimmy54/zulip,suxinde2009/zulip,zwily/zulip,adnanh/zulip,KingxBanana/zulip,brockwhittaker/zulip,jrowan/zulip,willingc/zulip,Batterfii/zulip,PhilSk/zulip,Cheppers/zulip,kaiyuanheshang/zulip,pradiptad/zulip,ikasumiwt/zulip,seapasulli/zulip,aakash-cr7/zulip,jrowan/zulip,littledogboy/zulip,kou/zulip,johnnygaddarr/zulip,huangkebo/zulip,samatdav/zulip,stamhe/zulip,arpith/zulip,atomic-labs/zulip,natanovia/zulip,akuseru/zulip,sonali0901/zulip,KJin99/zulip,hackerkid/zulip,peiwei/zulip,alliejones/zulip,MariaFaBella85/zulip,TigorC/zulip,PaulPetring/zulip,hayderimran7/zulip,kou/zulip,PaulPetring/zulip,wweiradio/zulip,ahmadassaf/zulip,susansls/zulip,PhilSk/zulip,zacps/zulip,m1ssou/zulip,andersk/zulip,christi3k/zulip,JanzTam/zulip,amyliu345/zulip,themass/zulip,Drooids/zulip,swinghu/zulip,peguin40/zulip,mdavid/zulip,wavelets/zulip,kaiyuanheshang/zulip,ryanbackman/zulip,wangdeshui/zulip,levixie/zulip,dhcrzf/zulip,grave-w-grave/zulip,codeKonami/zulip,Cheppers/zulip,udxxabp/zulip,zwily/zulip,jeffcao/zulip,itnihao/zulip,dotcool/zulip,arpitpanwar/zulip,eeshangarg/zulip,rht/zulip,shrikrishnaholla/zulip,vabs22/zulip,souravbadami/zulip,ryansnowboarder/zulip,jonesgithub/zulip,yuvipanda/zulip,Galexrt/zulip,reyha/zulip,EasonYi/zulip,punchagan/zulip,mohsenSy/zulip,Diptanshu8/zulip,schatt/zulip,Frouk/zulip,shaunstanislaus/zulip,showell/zulip,verma-varsha/zulip,firstblade/zulip,samatdav/zulip,babbage/zulip,ipernet/zulip,Vallher/zulip,Suninus/zulip,guiquanz/zulip,itnihao/zulip,ApsOps/zulip,proliming/zulip,tbutter/zulip,zorojean/zulip,dawran6/zulip,proliming/zulip,sharmaeklavya2/zulip,krtkmj/zulip,bastianh/zulip,he15his/zulip,johnny9/zulip,kou/zulip,KJin99/zulip,bastianh/zulip,armooo/zulip,gigawhitlocks/zulip,mansilladev/zulip,krtkmj/zulip,joyhchen/zulip,aps-sids/zulip,gigawhitlocks/zulip,dnmfarrell/zulip,thomasboyt/zulip,levixie/zulip,babbage/zulip,punchagan/zulip,moria/zulip,jessedhillon/zulip,johnny9/zulip,shrikrishnaholla/zulip,dwrpayne/zulip,he15his/zulip,adnanh/zulip,mahim97/zulip,andersk/zulip,vaidap/zulip,ericzhou2008/zulip,PaulPetring/zulip,adnanh/zulip,aliceriot/zulip,wavelets/zulip,christi3k/zulip,he15his/zulip,noroot/zulip,udxxabp/zulip,avastu/zulip,he15his/zulip,bssrdf/zulip,arpitpanwar/zulip,shaunstanislaus/zulip,blaze225/zulip,ericzhou2008/zulip,kaiyuanheshang/zulip,eastlhu/zulip,shrikrishnaholla/zulip,KJin99/zulip,ApsOps/zulip,souravbadami/zulip,Drooids/zulip,Frouk/zulip,JanzTam/zulip,jackrzhang/zulip,dxq-git/zulip,suxinde2009/zulip,verma-varsha/zulip,developerfm/zulip,niftynei/zulip,zacps/zulip,umkay/zulip,KingxBanana/zulip,hackerkid/zulip,amyliu345/zulip,akuseru/zulip,KingxBanana/zulip,luyifan/zulip,zhaoweigg/zulip,dotcool/zulip,dattatreya303/zulip,zulip/zulip,DazWorrall/zulip,shrikrishnaholla/zulip,atomic-labs/zulip,jainayush975/zulip,deer-hope/zulip,dotcool/zulip,MariaFaBella85/zulip,atomic-labs/zulip,verma-varsha/zulip,themass/zulip,isht3/zulip,schatt/zulip,JanzTam/zulip,zhaoweigg/zulip,esander91/zulip,adnanh/zulip,vakila/zulip,aps-sids/zulip,littledogboy/zulip,sonali0901/zulip,dxq-git/zulip,showell/zulip,deer-hope/zulip,MariaFaBella85/zulip,guiquanz/zulip,nicholasbs/zulip,aps-sids/zulip,sharmaeklavya2/zulip,SmartPeople/zulip,noroot/zulip,hj3938/zulip,niftynei/zulip,rishig/zulip,souravbadami/zulip,nicholasbs/zulip,bowlofstew/zulip,easyfmxu/zulip,tommyip/zulip,timabbott/zulip,joshisa/zulip,tommyip/zulip,praveenaki/zulip,kokoar/zulip,amyliu345/zulip,calvinleenyc/zulip,peguin40/zulip,jerryge/zulip,krtkmj/zulip,zhaoweigg/zulip,udxxabp/zulip,synicalsyntax/zulip,bssrdf/zulip,stamhe/zulip,susansls/zulip,easyfmxu/zulip,eastlhu/zulip,zwily/zulip,timabbott/zulip,Jianchun1/zulip,Qgap/zulip,yocome/zulip,PhilSk/zulip,gkotian/zulip,umkay/zulip,hafeez3000/zulip,hj3938/zulip,guiquanz/zulip,punchagan/zulip,bitemyapp/zulip,MariaFaBella85/zulip,wangdeshui/zulip,technicalpickles/zulip,punchagan/zulip,aps-sids/zulip,sharmaeklavya2/zulip,jphilipsen05/zulip,armooo/zulip,levixie/zulip,arpitpanwar/zulip,pradiptad/zulip,karamcnair/zulip,seapasulli/zulip,mahim97/zulip,bitemyapp/zulip,vikas-parashar/zulip,LAndreas/zulip,grave-w-grave/zulip,shaunstanislaus/zulip,wweiradio/zulip,LeeRisk/zulip,Diptanshu8/zulip,hayderimran7/zulip,peiwei/zulip,jerryge/zulip,jimmy54/zulip,ikasumiwt/zulip,noroot/zulip,Suninus/zulip,karamcnair/zulip,gigawhitlocks/zulip,Drooids/zulip,synicalsyntax/zulip,vaidap/zulip,peiwei/zulip,showell/zulip,gigawhitlocks/zulip,Suninus/zulip,Batterfii/zulip,saitodisse/zulip,bowlofstew/zulip,aliceriot/zulip,ipernet/zulip,andersk/zulip,MariaFaBella85/zulip,johnny9/zulip,Gabriel0402/zulip,zulip/zulip,Drooids/zulip,KJin99/zulip,themass/zulip,itnihao/zulip,LAndreas/zulip,thomasboyt/zulip,qq1012803704/zulip,levixie/zulip,calvinleenyc/zulip,ashwinirudrappa/zulip,glovebx/zulip,Batterfii/zulip,sup95/zulip,sharmaeklavya2/zulip,ikasumiwt/zulip,babbage/zulip,guiquanz/zulip,eeshangarg/zulip,shaunstanislaus/zulip,lfranchi/zulip,proliming/zulip,andersk/zulip,praveenaki/zulip,kaiyuanheshang/zulip,saitodisse/zulip,saitodisse/zulip,amyliu345/zulip,hafeez3000/zulip,mdavid/zulip,dnmfarrell/zulip,LeeRisk/zulip,ahmadassaf/zulip,johnnygaddarr/zulip,bitemyapp/zulip,zorojean/zulip,zulip/zulip,punchagan/zulip,themass/zulip,jessedhillon/zulip,xuanhan863/zulip,natanovia/zulip,dxq-git/zulip,esander91/zulip,Frouk/zulip,themass/zulip,reyha/zulip,firstblade/zulip,PhilSk/zulip,tdr130/zulip,jeffcao/zulip,adnanh/zulip,wavelets/zulip,hj3938/zulip,dawran6/zulip,LAndreas/zulip,Batterfii/zulip,rht/zulip,tommyip/zulip,aakash-cr7/zulip,praveenaki/zulip,zachallaun/zulip,mansilladev/zulip,natanovia/zulip,AZtheAsian/zulip,isht3/zulip,vabs22/zulip,hj3938/zulip,aakash-cr7/zulip,timabbott/zulip,deer-hope/zulip,tiansiyuan/zulip,zorojean/zulip,willingc/zulip,tdr130/zulip,j831/zulip,ufosky-server/zulip,zorojean/zulip,tiansiyuan/zulip,verma-varsha/zulip,noroot/zulip,mdavid/zulip,JPJPJPOPOP/zulip,qq1012803704/zulip,hengqujushi/zulip,jonesgithub/zulip,hafeez3000/zulip,hafeez3000/zulip,hackerkid/zulip,glovebx/zulip,Juanvulcano/zulip,deer-hope/zulip,dawran6/zulip,joyhchen/zulip,babbage/zulip,dnmfarrell/zulip,xuanhan863/zulip,zofuthan/zulip,arpith/zulip,gkotian/zulip,easyfmxu/zulip,atomic-labs/zulip,tommyip/zulip,zorojean/zulip,susansls/zulip,hackerkid/zulip,zachallaun/zulip,tdr130/zulip,christi3k/zulip,easyfmxu/zulip,showell/zulip,xuxiao/zulip,arpitpanwar/zulip,TigorC/zulip,proliming/zulip,dhcrzf/zulip,aakash-cr7/zulip,bluesea/zulip,LAndreas/zulip,isht3/zulip,Gabriel0402/zulip,j831/zulip,mdavid/zulip,MariaFaBella85/zulip,huangkebo/zulip,bluesea/zulip,Cheppers/zulip,jonesgithub/zulip,dwrpayne/zulip,ryansnowboarder/zulip,punchagan/zulip,easyfmxu/zulip,bssrdf/zulip,MayB/zulip,mansilladev/zulip,dawran6/zulip,SmartPeople/zulip,Cheppers/zulip,mohsenSy/zulip,zulip/zulip,paxapy/zulip,natanovia/zulip,hengqujushi/zulip,bluesea/zulip,saitodisse/zulip,seapasulli/zulip,SmartPeople/zulip,yuvipanda/zulip,ryanbackman/zulip,EasonYi/zulip,qq1012803704/zulip,arpitpanwar/zulip,dnmfarrell/zulip,tiansiyuan/zulip,dxq-git/zulip,avastu/zulip,mdavid/zulip,bastianh/zulip,calvinleenyc/zulip,paxapy/zulip,vikas-parashar/zulip,qq1012803704/zulip,voidException/zulip,calvinleenyc/zulip,zofuthan/zulip,guiquanz/zulip,levixie/zulip,mdavid/zulip,jphilipsen05/zulip,jainayush975/zulip,zacps/zulip,sonali0901/zulip,voidException/zulip,glovebx/zulip,DazWorrall/zulip,rishig/zulip,technicalpickles/zulip,bitemyapp/zulip,ikasumiwt/zulip,huangkebo/zulip,moria/zulip,firstblade/zulip,reyha/zulip,tbutter/zulip,MayB/zulip,Batterfii/zulip,saitodisse/zulip,Gabriel0402/zulip,qq1012803704/zulip,lfranchi/zulip,jackrzhang/zulip,JPJPJPOPOP/zulip,krtkmj/zulip,hafeez3000/zulip,vabs22/zulip,dhcrzf/zulip,aliceriot/zulip,proliming/zulip,amallia/zulip,Qgap/zulip,avastu/zulip,hustlzp/zulip,willingc/zulip,bastianh/zulip,nicholasbs/zulip,glovebx/zulip,pradiptad/zulip,luyifan/zulip,xuxiao/zulip,kaiyuanheshang/zulip,vabs22/zulip,susansls/zulip,proliming/zulip,shubhamdhama/zulip,Qgap/zulip,brockwhittaker/zulip,so0k/zulip,babbage/zulip,kokoar/zulip,jessedhillon/zulip,thomasboyt/zulip,souravbadami/zulip,PaulPetring/zulip,dwrpayne/zulip,esander91/zulip,shrikrishnaholla/zulip,karamcnair/zulip,m1ssou/zulip,developerfm/zulip,alliejones/zulip,paxapy/zulip,Jianchun1/zulip,codeKonami/zulip,moria/zulip,fw1121/zulip,jackrzhang/zulip,lfranchi/zulip,AZtheAsian/zulip,samatdav/zulip,DazWorrall/zulip,arpith/zulip,DazWorrall/zulip,yuvipanda/zulip,Galexrt/zulip,DazWorrall/zulip,tdr130/zulip,bastianh/zulip,umkay/zulip,tbutter/zulip,moria/zulip,ryanbackman/zulip,TigorC/zulip,dattatreya303/zulip,jessedhillon/zulip,kou/zulip,he15his/zulip,jphilipsen05/zulip,schatt/zulip,mahim97/zulip,jrowan/zulip,seapasulli/zulip,zacps/zulip,fw1121/zulip,dnmfarrell/zulip,dhcrzf/zulip,akuseru/zulip,adnanh/zulip,PaulPetring/zulip,fw1121/zulip,mahim97/zulip,jainayush975/zulip,brockwhittaker/zulip,Diptanshu8/zulip,bluesea/zulip,synicalsyntax/zulip,ufosky-server/zulip,thomasboyt/zulip,Gabriel0402/zulip,TigorC/zulip,amanharitsh123/zulip,vikas-parashar/zulip,shubhamdhama/zulip,ipernet/zulip,samatdav/zulip,stamhe/zulip,EasonYi/zulip,cosmicAsymmetry/zulip,aps-sids/zulip,nicholasbs/zulip,ryanbackman/zulip,bowlofstew/zulip,itnihao/zulip,bowlofstew/zulip,KingxBanana/zulip,bitemyapp/zulip,zorojean/zulip,paxapy/zulip,m1ssou/zulip,Vallher/zulip,kokoar/zulip,hustlzp/zulip,hj3938/zulip,Vallher/zulip,andersk/zulip,nicholasbs/zulip,amallia/zulip,rishig/zulip,dawran6/zulip,saitodisse/zulip,dnmfarrell/zulip,ryanbackman/zulip,LAndreas/zulip,luyifan/zulip,itnihao/zulip,armooo/zulip,ufosky-server/zulip,hustlzp/zulip,swinghu/zulip,grave-w-grave/zulip,jimmy54/zulip,mohsenSy/zulip,brockwhittaker/zulip,littledogboy/zulip,amanharitsh123/zulip,dwrpayne/zulip,zofuthan/zulip,MayB/zulip,eastlhu/zulip,bssrdf/zulip,sup95/zulip,ikasumiwt/zulip,vakila/zulip,wdaher/zulip,gkotian/zulip,fw1121/zulip,synicalsyntax/zulip,AZtheAsian/zulip,yuvipanda/zulip,mansilladev/zulip,he15his/zulip,brainwane/zulip,sup95/zulip,amanharitsh123/zulip,alliejones/zulip,xuxiao/zulip,gigawhitlocks/zulip,moria/zulip,vakila/zulip,rht/zulip,ipernet/zulip,Drooids/zulip,stamhe/zulip,armooo/zulip,praveenaki/zulip,schatt/zulip,JPJPJPOPOP/zulip,praveenaki/zulip,JanzTam/zulip,blaze225/zulip,jeffcao/zulip,wweiradio/zulip,willingc/zulip,blaze225/zulip,gkotian/zulip,vaidap/zulip,m1ssou/zulip,calvinleenyc/zulip,xuanhan863/zulip,vaidap/zulip,RobotCaleb/zulip,MayB/zulip,peiwei/zulip,Frouk/zulip,Diptanshu8/zulip,verma-varsha/zulip,hengqujushi/zulip,avastu/zulip,wdaher/zulip,johnny9/zulip,sonali0901/zulip,shubhamdhama/zulip,dotcool/zulip,mohsenSy/zulip,itnihao/zulip,kokoar/zulip,technicalpickles/zulip,susansls/zulip,RobotCaleb/zulip,rishig/zulip,zofuthan/zulip,amanharitsh123/zulip,praveenaki/zulip,wdaher/zulip,seapasulli/zulip,JanzTam/zulip,peguin40/zulip,andersk/zulip,jphilipsen05/zulip,SmartPeople/zulip,Jianchun1/zulip,glovebx/zulip,RobotCaleb/zulip,esander91/zulip,eeshangarg/zulip,joyhchen/zulip,rht/zulip,eastlhu/zulip,gkotian/zulip,wavelets/zulip,grave-w-grave/zulip,tbutter/zulip,atomic-labs/zulip,jerryge/zulip,johnnygaddarr/zulip,johnnygaddarr/zulip,eastlhu/zulip,Vallher/zulip,wweiradio/zulip,Qgap/zulip,jeffcao/zulip,mansilladev/zulip,yocome/zulip,zulip/zulip,jerryge/zulip,yocome/zulip,dwrpayne/zulip,littledogboy/zulip,voidException/zulip,amallia/zulip,bowlofstew/zulip,joyhchen/zulip,jessedhillon/zulip,themass/zulip,dhcrzf/zulip,tdr130/zulip,LeeRisk/zulip,dotcool/zulip,LeeRisk/zulip,jessedhillon/zulip,sup95/zulip,mahim97/zulip,firstblade/zulip,cosmicAsymmetry/zulip,shubhamdhama/zulip,tbutter/zulip,niftynei/zulip,wavelets/zulip,vaidap/zulip,bssrdf/zulip,niftynei/zulip,so0k/zulip,EasonYi/zulip,isht3/zulip,ikasumiwt/zulip,ufosky-server/zulip,synicalsyntax/zulip,levixie/zulip,tommyip/zulip,souravbadami/zulip,shaunstanislaus/zulip,Frouk/zulip,vabs22/zulip,jackrzhang/zulip,JPJPJPOPOP/zulip,glovebx/zulip,jainayush975/zulip,firstblade/zulip,JPJPJPOPOP/zulip,RobotCaleb/zulip,developerfm/zulip,ApsOps/zulip,alliejones/zulip,m1ssou/zulip,sonali0901/zulip,jrowan/zulip,christi3k/zulip,aliceriot/zulip,codeKonami/zulip,atomic-labs/zulip,Vallher/zulip,xuxiao/zulip,Gabriel0402/zulip,vakila/zulip,jonesgithub/zulip,zofuthan/zulip,vaidap/zulip,rishig/zulip,suxinde2009/zulip,willingc/zulip,so0k/zulip,tbutter/zulip,KJin99/zulip,eeshangarg/zulip,MayB/zulip,udxxabp/zulip,udxxabp/zulip,Diptanshu8/zulip,codeKonami/zulip,ryansnowboarder/zulip,zachallaun/zulip,blaze225/zulip,Gabriel0402/zulip,KJin99/zulip,Suninus/zulip,so0k/zulip,hustlzp/zulip,hafeez3000/zulip,dxq-git/zulip,pradiptad/zulip,shrikrishnaholla/zulip,esander91/zulip,EasonYi/zulip,zachallaun/zulip,esander91/zulip,zulip/zulip,zhaoweigg/zulip,voidException/zulip,zhaoweigg/zulip,Cheppers/zulip,zulip/zulip,vakila/zulip,jonesgithub/zulip,Drooids/zulip,johnnygaddarr/zulip,karamcnair/zulip,KingxBanana/zulip,RobotCaleb/zulip,alliejones/zulip,zwily/zulip,wweiradio/zulip,ryansnowboarder/zulip,rht/zulip,ahmadassaf/zulip,rishig/zulip,Jianchun1/zulip,KingxBanana/zulip,j831/zulip,xuanhan863/zulip,dnmfarrell/zulip,arpitpanwar/zulip,zwily/zulip,DazWorrall/zulip,MayB/zulip,yuvipanda/zulip,atomic-labs/zulip,so0k/zulip,schatt/zulip,jackrzhang/zulip,ipernet/zulip,reyha/zulip,paxapy/zulip,amyliu345/zulip,niftynei/zulip,krtkmj/zulip,wangdeshui/zulip,Diptanshu8/zulip,deer-hope/zulip,proliming/zulip,Galexrt/zulip,peguin40/zulip,codeKonami/zulip,bssrdf/zulip,jimmy54/zulip,ufosky-server/zulip,aakash-cr7/zulip,technicalpickles/zulip,zwily/zulip,LeeRisk/zulip,ericzhou2008/zulip,wavelets/zulip,technicalpickles/zulip,brainwane/zulip,swinghu/zulip,hackerkid/zulip,brainwane/zulip,RobotCaleb/zulip,joshisa/zulip,hustlzp/zulip,tdr130/zulip,eeshangarg/zulip,Qgap/zulip,itnihao/zulip,arpith/zulip,tiansiyuan/zulip,willingc/zulip,johnny9/zulip,christi3k/zulip,qq1012803704/zulip,suxinde2009/zulip,JPJPJPOPOP/zulip,hackerkid/zulip,samatdav/zulip,ashwinirudrappa/zulip,JanzTam/zulip,AZtheAsian/zulip,xuxiao/zulip,zhaoweigg/zulip,vakila/zulip,dattatreya303/zulip,kokoar/zulip,LAndreas/zulip,armooo/zulip,Gabriel0402/zulip,xuxiao/zulip,krtkmj/zulip,vikas-parashar/zulip,udxxabp/zulip,grave-w-grave/zulip,adnanh/zulip,showell/zulip,codeKonami/zulip,mansilladev/zulip,yuvipanda/zulip,hafeez3000/zulip,themass/zulip,vakila/zulip,Jianchun1/zulip,akuseru/zulip,jimmy54/zulip,amallia/zulip,cosmicAsymmetry/zulip,peiwei/zulip,moria/zulip,bastianh/zulip,sup95/zulip,pradiptad/zulip,grave-w-grave/zulip,praveenaki/zulip,synicalsyntax/zulip,eastlhu/zulip,eastlhu/zulip,joshisa/zulip,ashwinirudrappa/zulip,glovebx/zulip,jerryge/zulip,arpith/zulip,isht3/zulip,rht/zulip,andersk/zulip,joshisa/zulip,jrowan/zulip,bastianh/zulip,reyha/zulip,LAndreas/zulip,gkotian/zulip,timabbott/zulip,tiansiyuan/zulip,niftynei/zulip,dattatreya303/zulip,cosmicAsymmetry/zulip,susansls/zulip,dxq-git/zulip,noroot/zulip,peiwei/zulip,wweiradio/zulip,showell/zulip,hj3938/zulip,technicalpickles/zulip,karamcnair/zulip,j831/zulip,amallia/zulip,yuvipanda/zulip,blaze225/zulip,brainwane/zulip,brainwane/zulip,bluesea/zulip,wdaher/zulip,showell/zulip,sharmaeklavya2/zulip,hengqujushi/zulip,punchagan/zulip,peguin40/zulip,lfranchi/zulip,ryansnowboarder/zulip,lfranchi/zulip,shubhamdhama/zulip,eeshangarg/zulip,levixie/zulip,so0k/zulip,wavelets/zulip,ahmadassaf/zulip,RobotCaleb/zulip,SmartPeople/zulip,noroot/zulip,jeffcao/zulip,karamcnair/zulip,aps-sids/zulip,thomasboyt/zulip,developerfm/zulip,wangdeshui/zulip,guiquanz/zulip,jerryge/zulip,ApsOps/zulip,hackerkid/zulip,zhaoweigg/zulip,wdaher/zulip,lfranchi/zulip,tommyip/zulip,ApsOps/zulip,hustlzp/zulip,gkotian/zulip,hayderimran7/zulip,timabbott/zulip,Juanvulcano/zulip,eeshangarg/zulip,isht3/zulip,kaiyuanheshang/zulip,ashwinirudrappa/zulip,dattatreya303/zulip,hayderimran7/zulip,ahmadassaf/zulip,samatdav/zulip,umkay/zulip,dhcrzf/zulip,jackrzhang/zulip,dwrpayne/zulip,TigorC/zulip,Cheppers/zulip,saitodisse/zulip,bluesea/zulip,brockwhittaker/zulip,developerfm/zulip,jrowan/zulip,Jianchun1/zulip,ufosky-server/zulip,huangkebo/zulip,littledogboy/zulip,PhilSk/zulip,Batterfii/zulip,cosmicAsymmetry/zulip,j831/zulip,ipernet/zulip,hustlzp/zulip,calvinleenyc/zulip,joshisa/zulip,xuanhan863/zulip,ryansnowboarder/zulip,ipernet/zulip,TigorC/zulip,luyifan/zulip,jimmy54/zulip,firstblade/zulip,babbage/zulip,voidException/zulip,tdr130/zulip,noroot/zulip,huangkebo/zulip,shubhamdhama/zulip,ericzhou2008/zulip,suxinde2009/zulip,hayderimran7/zulip,willingc/zulip,synicalsyntax/zulip,yocome/zulip,voidException/zulip,zachallaun/zulip,armooo/zulip,avastu/zulip,mohsenSy/zulip,joshisa/zulip,armooo/zulip,ryanbackman/zulip,xuxiao/zulip,suxinde2009/zulip,Juanvulcano/zulip,thomasboyt/zulip,akuseru/zulip,aliceriot/zulip,deer-hope/zulip,verma-varsha/zulip,luyifan/zulip,m1ssou/zulip,Drooids/zulip,swinghu/zulip,ashwinirudrappa/zulip,EasonYi/zulip,ryansnowboarder/zulip,technicalpickles/zulip,ahmadassaf/zulip,joyhchen/zulip,peguin40/zulip,yocome/zulip,ashwinirudrappa/zulip,mohsenSy/zulip,wweiradio/zulip,developerfm/zulip,easyfmxu/zulip,jonesgithub/zulip,voidException/zulip,vikas-parashar/zulip,ApsOps/zulip,amyliu345/zulip,mdavid/zulip,jonesgithub/zulip,amallia/zulip,littledogboy/zulip,Suninus/zulip,dxq-git/zulip,mahim97/zulip,LeeRisk/zulip,moria/zulip,alliejones/zulip,Vallher/zulip,jainayush975/zulip,j831/zulip,kou/zulip,swinghu/zulip,hengqujushi/zulip,udxxabp/zulip,xuanhan863/zulip,easyfmxu/zulip,aliceriot/zulip,zacps/zulip,amanharitsh123/zulip,rht/zulip,peiwei/zulip,Batterfii/zulip,vabs22/zulip,schatt/zulip,brainwane/zulip,ericzhou2008/zulip,christi3k/zulip,zwily/zulip,jackrzhang/zulip,swinghu/zulip,yocome/zulip,alliejones/zulip,sharmaeklavya2/zulip,bluesea/zulip,deer-hope/zulip,zachallaun/zulip,bitemyapp/zulip,xuanhan863/zulip,AZtheAsian/zulip,DazWorrall/zulip,arpitpanwar/zulip,dawran6/zulip,paxapy/zulip,hengqujushi/zulip,shubhamdhama/zulip,kou/zulip,yocome/zulip,dwrpayne/zulip,Galexrt/zulip,huangkebo/zulip,PhilSk/zulip,ufosky-server/zulip,fw1121/zulip,joshisa/zulip,wangdeshui/zulip,vikas-parashar/zulip,MayB/zulip,jphilipsen05/zulip,firstblade/zulip | zephyr/retention_policy.py | zephyr/retention_policy.py | """
Implements the per-domain data retention policy.
The goal is to have a single place where the policy is defined. This is
complicated by needing to apply this policy both to the database and to log
files. Additionally, we want to use an efficient query for the database,
rather than iterating through messages one by one.
The code in this module does not actually remove anything; it just identifies
which items should be kept or removed.
"""
import sys
import operator
from django.utils import timezone
from django.db.models import Q
from datetime import datetime, timedelta
from zephyr.models import Realm, UserMessage
# Each domain has a maximum age for retained messages.
#
# FIXME: Move this into the database.
max_age = {
'customer1.invalid': timedelta(days=31),
}
def should_expunge_from_log(msg, now):
"""Should a particular log entry be expunged?
msg: a log entry dict
now: current time for purposes of determining log entry age"""
# This function will be called many times, but we want to compare all
# entries against a consistent "current time". So the caller passes
# that time as a parameter.
# FIXME: Yet another place where we compute the domain manually.
# See #260.
user = msg.get('sender_email')
if user is None:
user = msg.get('user')
if user is None:
# Avoid printing the entire message, but give enough information to find it later.
print >>sys.stderr, "WARNING: Can't get user for message at", msg['timestamp']
return False
domain = user.split('@', 1)[1]
if domain not in max_age:
# Keep forever.
return False
age = now - datetime.fromtimestamp(msg['timestamp'])
return age > max_age[domain]
def get_UserMessages_to_expunge():
"""Fetch all UserMessages which should be expunged from the database.
After deleting these, you may also want to call
Message.remove_unreachable()."""
# Unlike retain_in_log, this handles all messages at once, so we
# use the actual current time.
now = timezone.now()
queries = [Q(user_profile__realm = realm,
message__pub_date__lt = now - max_age[realm.domain])
for realm in Realm.objects.all()
if realm.domain in max_age]
if not queries:
return UserMessage.objects.none()
# Return all objects matching any of the queries in 'queries'.
return UserMessage.objects.filter(reduce(operator.or_, queries))
| apache-2.0 | Python | |
f4646f863cd72d4f788dfe35efe478dc85707d07 | add tool to generate lang to font table | moyogo/nototools,anthrotype/nototools,googlefonts/nototools,googlei18n/nototools,dougfelt/nototools,googlei18n/nototools,anthrotype/nototools,dougfelt/nototools,googlei18n/nototools,anthrotype/nototools,googlefonts/nototools,dougfelt/nototools,googlefonts/nototools,moyogo/nototools,moyogo/nototools,googlefonts/nototools,googlefonts/nototools | nototools/generate_lang_font_table.py | nototools/generate_lang_font_table.py | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generate a csv with the following columns:
- bcp-47 language code (minimal)
- script (most likely script for the code)
- style name (Serif, Sans, Naskh...)
- ui status (UI, <empty>)
- font name
This will start with a canned list of languages for now. We could
generate a more comprehensive list from our data.
"""
import collections
import os
from os import path
from nototools import cldr_data
from nototools import noto_fonts
LANGS = (
'af,am,ar,az,bg,bn,bs,ca,cs,da,de,el,en,en-US,es,es-419,et,eu,fa,fi,'
'fil,fr,gl,gu,hi,hr,hu,hy,id,is,it,iw,ja,ka,kk,km,kn,ko,ky,lo,lt,lv,'
'mk,ml,mn,mr,ms,my,ne,nl,no,pa,pl,pt-BR,pt-PT,ro,ru,si,sk,sl,sq,sr,'
'sv,sw,ta,te,th,tl,tr,uk,ur,uz,vi,zh-CN,zh-TW,zu').split(',')
def accept_font(f):
return (
f.family == 'Noto' and # exclude Arimo, Tinos, Cousine
f.style != 'Nastaliq' and # exclude Nastaliq, not suitable for maps
f.script != 'HST' and # exclude Historic, tool limitation
f.weight == 'Regular' and # to limit members of fonts, we don't
not f.slope and # care about weights
f.fmt in ['ttf', 'otf'] and # only support these formats
(not f.is_cjk or f.subset)) # 'small' language-specific CJK subsets
fonts = filter(accept_font, noto_fonts.get_noto_fonts())
families = noto_fonts.get_families(fonts).values()
def write_csv_header(outfile):
print >> outfile, 'Code,Script,Style,UI,Font Name'
def write_csv(outfile, lang, script, style, ui, members):
if members:
print >> outfile, ','.join(
[lang, script, style, ui,
noto_fonts.get_font_family_name(members[0].filepath)])
with open('lang_to_font_table.csv', 'w') as outfile:
write_csv_header(outfile)
for lang in LANGS:
script = cldr_data.get_likely_script(lang)
found_font = False
for family in sorted(families, key=lambda f: f.name):
if script not in noto_fonts.script_key_to_scripts(
family.rep_member.script):
continue
found_font = True
members = family.hinted_members or family.unhinted_members
ui_members = [m for m in members if m.is_UI]
non_ui_members = [m for m in members if not m.is_UI]
assert len(ui_members) <= 1
assert len(non_ui_members) <= 1
write_csv(outfile, lang, script, family.rep_member.style, '',
non_ui_members)
write_csv(outfile, lang, script, family.rep_member.style, 'UI',
ui_members)
if not found_font:
print '## no font found for lang %s' % lang
| apache-2.0 | Python | |
b1bc68413efcf757dc6430f33820d5ff27e22269 | Enhance testing for macro support | ssarangi/numba,stefanseefeld/numba,IntelLabs/numba,GaZ3ll3/numba,IntelLabs/numba,stuartarchibald/numba,pombredanne/numba,stonebig/numba,numba/numba,gdementen/numba,cpcloud/numba,jriehl/numba,numba/numba,GaZ3ll3/numba,cpcloud/numba,gmarkall/numba,pitrou/numba,seibert/numba,jriehl/numba,pombredanne/numba,gdementen/numba,IntelLabs/numba,stonebig/numba,pombredanne/numba,stuartarchibald/numba,stonebig/numba,pitrou/numba,GaZ3ll3/numba,ssarangi/numba,gmarkall/numba,gmarkall/numba,cpcloud/numba,ssarangi/numba,seibert/numba,sklam/numba,seibert/numba,ssarangi/numba,numba/numba,stonebig/numba,stefanseefeld/numba,jriehl/numba,seibert/numba,jriehl/numba,gdementen/numba,pitrou/numba,gmarkall/numba,GaZ3ll3/numba,gdementen/numba,pitrou/numba,sklam/numba,GaZ3ll3/numba,stefanseefeld/numba,pombredanne/numba,cpcloud/numba,IntelLabs/numba,sklam/numba,stuartarchibald/numba,ssarangi/numba,cpcloud/numba,seibert/numba,pitrou/numba,sklam/numba,stuartarchibald/numba,gmarkall/numba,gdementen/numba,pombredanne/numba,IntelLabs/numba,stuartarchibald/numba,stonebig/numba,numba/numba,stefanseefeld/numba,sklam/numba,numba/numba,jriehl/numba,stefanseefeld/numba | numba/cuda/tests/cudapy/test_macro.py | numba/cuda/tests/cudapy/test_macro.py | from __future__ import print_function, division, absolute_import
import numpy as np
from timeit import default_timer as time
from numba import cuda, float32
from numba.cuda.testing import unittest
GLOBAL_CONSTANT = 5
GLOBAL_CONSTANT_2 = 6
GLOBAL_CONSTANT_TUPLE = 5, 6
def udt_global_constants(A):
sa = cuda.shared.array(shape=GLOBAL_CONSTANT, dtype=float32)
i = cuda.grid(1)
A[i] = sa[i]
def udt_global_build_tuple(A):
sa = cuda.shared.array(shape=(GLOBAL_CONSTANT, GLOBAL_CONSTANT_2),
dtype=float32)
i, j = cuda.grid(2)
A[i, j] = sa[i, j]
def udt_global_build_list(A):
sa = cuda.shared.array(shape=[GLOBAL_CONSTANT, GLOBAL_CONSTANT_2],
dtype=float32)
i, j = cuda.grid(2)
A[i, j] = sa[i, j]
def udt_global_constant_tuple(A):
sa = cuda.shared.array(shape=GLOBAL_CONSTANT_TUPLE, dtype=float32)
i, j = cuda.grid(2)
A[i, j] = sa[i, j]
def udt_invalid_1(A):
sa = cuda.shared.array(shape=A[0], dtype=float32)
i = cuda.grid(1)
A[i] = sa[i]
def udt_invalid_2(A):
sa = cuda.shared.array(shape=(1, A[0]), dtype=float32)
i, j = cuda.grid(2)
A[i, j] = sa[i, j]
class TestMacro(unittest.TestCase):
def getarg(self):
return np.array(100, dtype=np.float32)
def test_global_constants(self):
udt = cuda.jit((float32[:],))(udt_global_constants)
udt(self.getarg())
def test_global_build_tuple(self):
udt = cuda.jit((float32[:, :],))(udt_global_build_tuple)
udt(self.getarg())
def test_global_build_list(self):
udt = cuda.jit((float32[:, :],))(udt_global_build_tuple)
udt(self.getarg())
def test_global_constant_tuple(self):
udt = cuda.jit((float32[:, :],))(udt_global_constant_tuple)
udt(self.getarg())
def test_invalid_1(self):
with self.assertRaises(ValueError) as raises:
udt = cuda.jit((float32[:],))(udt_invalid_1)
udt(self.getarg())
self.assertIn("Argument 'shape' must be a constant at",
str(raises.exception))
def test_invalid_2(self):
with self.assertRaises(ValueError) as raises:
udt = cuda.jit((float32[:, :],))(udt_invalid_2)
udt(self.getarg())
self.assertIn("Argument 'shape' must be a constant at",
str(raises.exception))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | Python | |
5ff659676f7c1164aeadba0ce7f7f41824bad5fd | update : minor changes | black-perl/ptop | ptop/plugins/cpu_sensor.py | ptop/plugins/cpu_sensor.py | '''
CPU sensor plugin
Generates the CPU usage stats
'''
from ptop.core import Plugin
import psutil
class CPUSensor(Plugin):
def __init__(self,**kwargs):
super(CPUSensor,self).__init__(**kwargs)
# overriding the update method
def update(self):
# there will be two parts of the returned value, one will be text and other graph
# there can be many text (key,value) pairs to display corresponding to each key
self.currentValue['text'] = {}
# there will be one averaged value
self.currentValue['graph'] = {'percentage' : ''}
# cpu usage
cpu_usage = psutil.cpu_percent(percpu=True)
num_cores = len(cpu_usage)
self.currentValue['text']['number_of_cores'] = str(num_cores)
for ctr in range(num_cores):
self.currentValue['text']['core{0}'.format(ctr+1)] = cpu_usage[ctr]
# average cpu usage
self.currentValue['graph']['percentage'] = sum(cpu_usage)/num_cores
cpu_sensor = CPUSensor(name='CPU',sensorType='chart',interval=0.5)
| mit | Python | |
c7557de36799bfbe8318e05e370cee0ad09262e4 | Add connection handler | devicehive/devicehive-python | devicehive/connection_handler.py | devicehive/connection_handler.py | from devicehive.handlers.base_handler import BaseHandler
from devicehive.api import Token
class ConnectionHandler(BaseHandler):
"""Connection handler class."""
def __init__(self, transport, handler_class, handler_options,
authentication):
BaseHandler.__init__(self, transport)
self._token = Token(transport, authentication)
self._handler = handler_class(transport, self._token, handler_options)
self._handle_connected = False
def handle_connected(self):
self._token.authenticate()
if not self._handle_connected:
self._handler.handle_connected()
self._handle_connected = True
def handle_event(self, event):
# TODO: handle events here.
pass
def handle_closed(self):
# TODO: handle closed here.
pass
| apache-2.0 | Python | |
c405f4afe48a6dfa0fa592c2b1a2ab6a621377ec | Add sample_crop() | raviqqe/tensorflow-extenteten,raviqqe/tensorflow-extenteten | nn/random.py | nn/random.py | import tensorflow as tf
from .util import static_shape
def sample_crop(xs, n):
return tf.random_crop(xs, [n, *static_shape(xs)[1:]])
| unlicense | Python | |
27a7bf62ee2ef7b42a659261f36890eb922c9747 | Create ex_read_log.py | mariuszha/SPSE | Module_2/Lesson_1/ex_read_log.py | Module_2/Lesson_1/ex_read_log.py | #!/usr/bin/env python
# ex_read_log.py by mariuszha
#
# Purpose:
# Find all the logs in the /var/log/syslog which pertain to CMD
# and print them out selectively
#
with open("/var/log/syslog") as f:
for line in f:
if "CMD" in line:
print line
| mit | Python | |
8953b336dfcb8bd6c69b2af8e960a215a47838f8 | Add reverse a list of characters in place | HKuz/Test_Code | Problems/reverseStringInPlace.py | Problems/reverseStringInPlace.py | #!/Applications/anaconda/envs/Python3/bin
def main():
# Test suite
tests = [ None, [''], ['f','o','o',' ','b','a','r']]
for t in tests:
print('Testing: {}'.format(t))
print('Result: {}'.format(reverse_string_in_place(t)))
return 0
def reverse_string_in_place(chars):
''' Reverses a string (input as a list of chars) in place '''
if chars is None:
return None
for i in range(len(chars) // 2):
chars[i], chars[-i-1] = chars[-i-1], chars[i]
return chars
if __name__ == '__main__':
main()
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.