text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = 'david'
from typing import List, Iterable
from multiprocessing import Pool
import time
def compute(a: float)->float:
"""
Here will just compute the result and return it
"""
a = a*2+1
a = 0.0+a-1
a = a/2
return a ** 2 + 1 + 0.6 ** a
def f1():
"""
first try is to run a simple loop over things and append them to local varible and then return them
"""
results = []
for a in range(-1000, 10000000):
results.append(compute(a))
return results
def f3():
"""
We'll use a global variable now.
"""
for a in range(-1000, 10000000):
res3.append(compute(a))
start_time = time.time()
res: List[float] = []
res = f1()
print("f1", time.time() - start_time, sum(res))
start_time = time.time()
res2: Iterable[float] = []
res2 = map(compute, range(-1000, 10000000))
print("f2", time.time() - start_time, sum(res))
start_time = time.time()
res3: List[float] = []
f3()
print("f3", time.time() - start_time, sum(res))
start_time = time.time()
res4: List[float] = []
res4 = [compute(x) for x in range(-1000, 10000000)]
print("f4", time.time() - start_time, sum(res))
start_time = time.time()
res5: Iterable[float] = []
_p = Pool()
res5 = _p.map(compute, range(-1000, 10000000))
print("f5", time.time() - start_time, sum(res))
| {
"repo_name": "sixhat/swipe",
"path": "python/map_speed.py",
"copies": "1",
"size": "1312",
"license": "bsd-2-clause",
"hash": -4580548918781323300,
"line_mean": 21.6206896552,
"line_max": 103,
"alpha_frac": 0.6089939024,
"autogenerated": false,
"ratio": 2.9351230425055927,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4044116944905593,
"avg_score": null,
"num_lines": null
} |
class dstat_plugin(dstat):
"""
Read and Write average wait times of block devices.
Displays the average read and write wait times of block devices
"""
def __init__(self):
self.nick = ('rawait', 'wawait')
self.type = 'f'
self.width = 4
self.scale = 1
self.diskfilter = re.compile('^([hsv]d[a-z]+\d+|cciss/c\d+d\d+p\d+|dm-\d+|md\d+|mmcblk\d+p\d0|VxVM\d+)$')
self.open('/proc/diskstats')
self.cols = 1
self.struct = dict( rd_ios=0, wr_ios=0, rd_ticks=0, wr_ticks=0 )
def discover(self, *objlist):
ret = []
for l in self.splitlines():
if len(l) < 13: continue
if l[3:] == ['0',] * 11: continue
name = l[2]
ret.append(name)
for item in objlist: ret.append(item)
if not ret:
raise Exception, "No suitable block devices found to monitor"
return ret
def vars(self):
ret = []
if op.disklist:
varlist = op.disklist
else:
varlist = []
blockdevices = [os.path.basename(filename) for filename in glob.glob('/sys/block/*')]
for name in self.discover:
if self.diskfilter.match(name): continue
if name not in blockdevices: continue
varlist.append(name)
varlist.sort()
for name in varlist:
if name in self.discover:
ret.append(name)
return ret
def name(self):
return self.vars
def extract(self):
for l in self.splitlines():
if len(l) < 13: continue
if l[5] == '0' and l[9] == '0': continue
if l[3:] == ['0',] * 11: continue
name = l[2]
if name not in self.vars: continue
self.set2[name] = dict(
rd_ios = long(l[3]),
wr_ios = long(l[7]),
rd_ticks = long(l[6]),
wr_ticks = long(l[10]),
)
for name in self.vars:
rd_tput = self.set2[name]['rd_ios'] - self.set1[name]['rd_ios']
wr_tput = self.set2[name]['wr_ios'] - self.set1[name]['wr_ios']
if rd_tput:
rd_wait = ( self.set2[name]['rd_ticks'] - self.set1[name]['rd_ticks'] ) * 1.0 / rd_tput
else:
rd_wait = 0
if wr_tput:
wr_wait = ( self.set2[name]['wr_ticks'] - self.set1[name]['wr_ticks'] ) * 1.0 / wr_tput
else:
wr_wait = 0
self.val[name] = ( rd_wait, wr_wait )
if step == op.delay:
self.set1.update(self.set2)
| {
"repo_name": "SpamapS/dstat-plugins",
"path": "dstat_plugins/plugins/dstat_disk_wait.py",
"copies": "4",
"size": "2773",
"license": "apache-2.0",
"hash": -685894545756549400,
"line_mean": 33.2345679012,
"line_max": 113,
"alpha_frac": 0.4933285251,
"autogenerated": false,
"ratio": 3.436183395291202,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5929511920391203,
"avg_score": null,
"num_lines": null
} |
__author__ = 'david'
import os
import sys
from ifind.seeker.trec_qrel_handler import TrecQrelHandler
def ratio(rels, nonrels):
""" expect two floats
"""
dem = rels + nonrels
if dem > 0.0:
return round((rels * rels) / dem, 2)
else:
return 0.0
def get_perf():
OUT_FILE = 'user_perf.txt'
out_f = open(OUT_FILE, 'w')
qrels = TrecQrelHandler('data/TREC2005.qrels.txt')
topics = [347, 435]
#users = get_newssearch_users()
users = UserProfile.objects.all()
for user in users:
username = user.user.username
condition = user.condition
for t in topics:
examined_docs = DocumentsExamined.objects.filter(user=user.user, topic_num=t)
total_docs = len(examined_docs)
rel_correct = 0
rel_incorrect = 0
for doc in examined_docs:
assessor_judgement = qrels.get_value(str(t), doc.doc_num)
user_judgement = doc.judgement
if assessor_judgement > 0:
assessor_judgement = True
else:
assessor_judgement = False
if user_judgement > 0:
user_judgement = True
else:
user_judgement = False
if assessor_judgement and user_judgement:
rel_correct = rel_correct + 1
if not assessor_judgement and user_judgement:
rel_incorrect = rel_incorrect + 1
out_f.write("{:<12}{:< 8}{:<8}{:<8}{:<8}{:10.2f}\n".format(
username,
t, # (topic number)
total_docs, # (total number of docs marked)
rel_correct, # (number of documents marked which were correct)
rel_incorrect, # (number of documents marked incorrectly)
ratio(float(rel_correct), rel_incorrect) # (ratio between correct/incorrect)
))
out_f.close()
def get_newssearch_users():
profiles = UserProfile.objects.all()
search = []
excluded = ['search12', 'search13', 'search26', 'search27', 'search40', 'search41', 'search54', 'search55']
for user in profiles:
if user.user.username.startswith('search'):
if user.user.username not in excluded:
search.append(user)
return search
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'treconomics_project.settings')
from django.contrib.auth.models import User
from treconomics.models import UserProfile, DocumentsExamined
get_perf()
| {
"repo_name": "leifos/treconomics",
"path": "treconomics_project/get_user_perf.py",
"copies": "1",
"size": "2651",
"license": "mit",
"hash": 4223220133843679000,
"line_mean": 28.797752809,
"line_max": 111,
"alpha_frac": 0.5582798944,
"autogenerated": false,
"ratio": 3.7763532763532766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9747887475174208,
"avg_score": 0.01734913911581375,
"num_lines": 89
} |
__author__ = 'david'
import RPi.GPIO as GPIO
import time
def updateLEDs(grid, x, y):
x_channel_list = x
y_channel_list = y
numx = numy = 0
for x in grid:
for y in x:
if (y):
GPIO.output(x_channel_list[numx], GPIO.HIGH)
GPIO.output(y_channel_list[numy], GPIO.LOW)
#time.sleep(0.00005)
numy += 1
numy = 0
time.sleep(0.001)
for y in x:
GPIO.output(y_channel_list[numy], GPIO.HIGH)
GPIO.output(x_channel_list[numx], GPIO.LOW)
numy += 1
numy =0
numx +=1
def updateLEDs2(grid, x, y, sleep, period, duration):
x_channel_list = x
y_channel_list = y
numx = numy = 0
start = time.time()
while timeDifference != duration:
for x in grid:
for y in x:
GPIO.output(x_channel_list[numx], GPIO.LOW)
if (y):
GPIO.output(y_channel_list[numy], GPIO.HIGH)
if(sleep):
sleep(period)
GPIO.output(y_channel_list[numy], GPIO.HIGH)
numy += 1
numy =0
numx +=1
end = time.time()
timeDifference = end - start
def flashLEDs(grid, x, y, period):
updateLEDs(grid, x, y, TRUE, period)
def setupLEDs(x, y):
x_channel_list = x
y_channel_list = y
GPIO.setmode(GPIO.BOARD)
GPIO.setup(x_channel_list, GPIO.OUT)
GPIO.setup(y_channel_list, GPIO.OUT) | {
"repo_name": "TheRedshift/AudioAffair",
"path": "src/lights.py",
"copies": "1",
"size": "1510",
"license": "mit",
"hash": -1390069222630456000,
"line_mean": 26.9814814815,
"line_max": 63,
"alpha_frac": 0.5132450331,
"autogenerated": false,
"ratio": 3.2683982683982684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4281643301498268,
"avg_score": null,
"num_lines": null
} |
__author__ = 'David'
class HexCharacterMapping:
characters = {}
def __init__(self):
self.characters[' '] = ' '
self.characters['!'] = '!'
self.characters['"'] = '"'
self.characters['#'] = '#'
self.characters['$'] = '$'
self.characters['%'] = '%'
self.characters['&'] = '&'
self.characters['''] = "'"
self.characters['('] = '('
self.characters[')'] = ')'
self.characters['*'] = '*'
self.characters['+'] = '+'
self.characters[','] = ','
self.characters['-'] = '-'
self.characters['.'] = '.'
self.characters['/'] = '/'
self.characters['0'] = '0'
self.characters['1'] = '1'
self.characters['2'] = '2'
self.characters['3'] = '3'
self.characters['4'] = '4'
self.characters['5'] = '5'
self.characters['6'] = '6'
self.characters['7'] = '7'
self.characters['8'] = '8'
self.characters['9'] = '9'
self.characters[':'] = ':'
self.characters[';'] = ';'
self.characters['<'] = '<'
self.characters['='] = '='
self.characters['>'] = '>'
self.characters['?'] = '?'
self.characters['@'] = '@'
self.characters['A'] = 'A'
self.characters['B'] = 'B'
self.characters['C'] = 'C'
self.characters['D'] = 'D'
self.characters['E'] = 'E'
self.characters['F'] = 'F'
self.characters['G'] = 'G'
self.characters['H'] = 'H'
self.characters['I'] = 'I'
self.characters['J'] = 'J'
self.characters['K'] = 'K'
self.characters['L'] = 'L'
self.characters['M'] = 'M'
self.characters['N'] = 'N'
self.characters['O'] = 'O'
self.characters['P'] = 'P'
self.characters['Q'] = 'Q'
self.characters['R'] = 'R'
self.characters['S'] = 'S'
self.characters['T'] = 'T'
self.characters['U'] = 'U'
self.characters['V'] = 'V'
self.characters['W'] = 'W'
self.characters['X'] = 'X'
self.characters['Y'] = 'Y'
self.characters['Z'] = 'Z'
self.characters['['] = '['
self.characters['\'] = '\\'
self.characters[']'] = ']'
self.characters['^'] = '^'
self.characters['_'] = '_'
self.characters['`'] = '`'
self.characters['a'] = 'a'
self.characters['b'] = 'b'
self.characters['c'] = 'c'
self.characters['d'] = 'd'
self.characters['e'] = 'e'
self.characters['f'] = 'f'
self.characters['g'] = 'g'
self.characters['h'] = 'h'
self.characters['i'] = 'i'
self.characters['j'] = 'j'
self.characters['k'] = 'k'
self.characters['l'] = 'l'
self.characters['m'] = 'm'
self.characters['n'] = 'n'
self.characters['o'] = 'o'
self.characters['p'] = 'p'
self.characters['q'] = 'q'
self.characters['r'] = 'r'
self.characters['s'] = 's'
self.characters['t'] = 't'
self.characters['u'] = 'u'
self.characters['v'] = 'v'
self.characters['w'] = 'w'
self.characters['x'] = 'x'
self.characters['y'] = 'y'
self.characters['z'] = 'z'
self.characters['{'] = '{'
self.characters['|'] = '|'
self.characters['}'] = '}'
self.characters['~'] = '~'
self.characters[' '] = ' '
self.characters['¡'] = '¡'
self.characters['¢'] = '¢'
self.characters['£'] = '£'
self.characters['¤'] = '¤'
self.characters['¥'] = '¥'
self.characters['¦'] = '¦'
self.characters['§'] = '§'
self.characters['¨'] = '¨'
self.characters['©'] = '©'
self.characters['ª'] = 'ª'
self.characters['«'] = '«'
self.characters['¬'] = '¬'
self.characters['­'] = ''
self.characters['®'] = '®'
self.characters['¯'] = '¯'
self.characters['°'] = '°'
self.characters['±'] = '±'
self.characters['²'] = '²'
self.characters['³'] = '³'
self.characters['´'] = '´'
self.characters['µ'] = 'µ'
self.characters['¶'] = '¶'
self.characters['·'] = '·'
self.characters['¸'] = '¸'
self.characters['¹'] = '¹'
self.characters['º'] = 'º'
self.characters['»'] = '»'
self.characters['¼'] = '¼'
self.characters['½'] = '½'
self.characters['¾'] = '¾'
self.characters['¿'] = '¿'
self.characters['À'] = 'À'
self.characters['Á'] = 'Á'
self.characters['Â'] = 'Â'
self.characters['Ã'] = 'Ã'
self.characters['Ä'] = 'Ä'
self.characters['Å'] = 'Å'
self.characters['Æ'] = 'Æ'
self.characters['Ç'] = 'Ç'
self.characters['È'] = 'È'
self.characters['É'] = 'É'
self.characters['Ê'] = 'Ê'
self.characters['Ë'] = 'Ë'
self.characters['Ì'] = 'Ì'
self.characters['Í'] = 'Í'
self.characters['Î'] = 'Î'
self.characters['Ï'] = 'Ï'
self.characters['Ð'] = 'Ð'
self.characters['Ñ'] = 'Ñ'
self.characters['Ò'] = 'Ò'
self.characters['Ó'] = 'Ó'
self.characters['Ô'] = 'Ô'
self.characters['Õ'] = 'Õ'
self.characters['Ö'] = 'Ö'
self.characters['×'] = '×'
self.characters['Ø'] = 'Ø'
self.characters['Ù'] = 'Ù'
self.characters['Ú'] = 'Ú'
self.characters['Û'] = 'Û'
self.characters['Ü'] = 'Ü'
self.characters['Ý'] = 'Ý'
self.characters['Þ'] = 'Þ'
self.characters['ß'] = 'ß'
self.characters['à'] = 'à'
self.characters['á'] = 'á'
self.characters['â'] = 'â'
self.characters['ã'] = 'ã'
self.characters['ä'] = 'ä'
self.characters['å'] = 'å'
self.characters['æ'] = 'æ'
self.characters['ç'] = 'ç'
self.characters['è'] = 'è'
self.characters['é'] = 'é'
self.characters['ê'] = 'ê'
self.characters['ë'] = 'ë'
self.characters['ì'] = 'ì'
self.characters['í'] = 'í'
self.characters['î'] = 'î'
self.characters['ï'] = 'ï'
self.characters['ð'] = 'ð'
self.characters['ñ'] = 'ñ'
self.characters['ò'] = 'ò'
self.characters['ó'] = 'ó'
self.characters['ô'] = 'ô'
self.characters['õ'] = 'õ'
self.characters['ö'] = 'ö'
self.characters['÷'] = '÷'
self.characters['ø'] = 'ø'
self.characters['ù'] = 'ù'
self.characters['ú'] = 'ú'
self.characters['û'] = 'û'
self.characters['ü'] = 'ü'
self.characters['ý'] = 'ý'
self.characters['þ'] = 'þ'
self.characters['ÿ'] = 'ÿ'
self.characters['Œ'] = 'Œ'
self.characters['œ'] = 'œ'
self.characters['Š'] = 'Š'
self.characters['š'] = 'š'
self.characters['Ÿ'] = 'Ÿ'
self.characters['ƒ'] = 'ƒ'
self.characters['ƒ'] = 'ƒ'
self.characters['ˆ'] = 'ˆ'
self.characters['˜'] = '˜'
self.characters['Α'] = 'Α'
self.characters['Β'] = 'Β'
self.characters['Γ'] = 'Γ'
self.characters['Δ'] = 'Δ'
self.characters['Ε'] = 'Ε'
self.characters['Ζ'] = 'Ζ'
self.characters['Η'] = 'Η'
self.characters['Θ'] = 'Θ'
self.characters['Ι'] = 'Ι'
self.characters['Κ'] = 'Κ'
self.characters['Λ'] = 'Λ'
self.characters['Μ'] = 'Μ'
self.characters['Ν'] = 'Ν'
self.characters['Ξ'] = 'Ξ'
self.characters['Ο'] = 'Ο'
self.characters['Π'] = 'Π'
self.characters['Ρ'] = 'Ρ'
self.characters['Σ'] = 'Σ'
self.characters['Τ'] = 'Τ'
self.characters['Υ'] = 'Υ'
self.characters['Φ'] = 'Φ'
self.characters['Χ'] = 'Χ'
self.characters['Ψ'] = 'Ψ'
self.characters['Ω'] = 'Ω'
self.characters['α'] = 'α'
self.characters['β'] = 'β'
self.characters['γ'] = 'γ'
self.characters['δ'] = 'δ'
self.characters['ε'] = 'ε'
self.characters['ζ'] = 'ζ'
self.characters['η'] = 'η'
self.characters['θ'] = 'θ'
self.characters['ι'] = 'ι'
self.characters['κ'] = 'κ'
self.characters['λ'] = 'λ'
self.characters['μ'] = 'μ'
self.characters['ν'] = 'ν'
self.characters['ξ'] = 'ξ'
self.characters['ο'] = 'ο'
self.characters['π'] = 'π'
self.characters['ρ'] = 'ρ'
self.characters['ς'] = 'ς'
self.characters['σ'] = 'σ'
self.characters['τ'] = 'τ'
self.characters['υ'] = 'υ'
self.characters['φ'] = 'φ'
self.characters['χ'] = 'χ'
self.characters['ψ'] = 'ψ'
self.characters['ω'] = 'ω'
self.characters['ϑ'] = 'ϑ'
self.characters['ϒ'] = 'ϒ'
self.characters['ϖ'] = 'ϖ'
self.characters['–'] = '–'
self.characters['—'] = '—'
self.characters['‘'] = '‘'
self.characters['’'] = '’'
self.characters['‚'] = '‚'
self.characters['“'] = '“'
self.characters['”'] = '”'
self.characters['„'] = '„'
self.characters['†'] = '†'
self.characters['‡'] = '‡'
self.characters['•'] = '•'
self.characters['…'] = '…'
self.characters['‰'] = '‰'
self.characters['€'] = '€'
self.characters['™'] = '™'
self.characters['"'] = '"'
self.characters['&'] = '&'
self.characters['<'] = '<'
self.characters['>'] = '>'
self.characters[' '] = ' '
self.characters['¡'] = '¡'
self.characters['¢'] = '¢'
self.characters['£'] = '£'
self.characters['¤'] = '¤'
self.characters['¥'] = '¥'
self.characters['¦'] = '¦'
self.characters['§'] = '§'
self.characters['¨'] = '¨'
self.characters['©'] = '©'
self.characters['ª'] = 'ª'
self.characters['«'] = '«'
self.characters['¬'] = '¬'
self.characters['­'] = ''
self.characters['®'] = '®'
self.characters['¯'] = '¯'
self.characters['°'] = '°'
self.characters['±'] = '±'
self.characters['²'] = '²'
self.characters['³'] = '³'
self.characters['´'] = '´'
self.characters['µ'] = 'µ'
self.characters['¶'] = '¶'
self.characters['·'] = '·'
self.characters['¸'] = '¸'
self.characters['¹'] = '¹'
self.characters['º'] = 'º'
self.characters['»'] = '»'
self.characters['¼'] = '¼'
self.characters['½'] = '½'
self.characters['¾'] = '¾'
self.characters['¿'] = '¿'
self.characters['À'] = 'À'
self.characters['Á'] = 'Á'
self.characters['Â'] = 'Â'
self.characters['Ã'] = 'Ã'
self.characters['Ä'] = 'Ä'
self.characters['Å'] = 'Å'
self.characters['Æ'] = 'Æ'
self.characters['Ç'] = 'Ç'
self.characters['È'] = 'È'
self.characters['É'] = 'É'
self.characters['Ê'] = 'Ê'
self.characters['Ë'] = 'Ë'
self.characters['Ì'] = 'Ì'
self.characters['Í'] = 'Í'
self.characters['Î'] = 'Î'
self.characters['Ï'] = 'Ï'
self.characters['Ð'] = 'Ð'
self.characters['Ñ'] = 'Ñ'
self.characters['Ò'] = 'Ò'
self.characters['Ó'] = 'Ó'
self.characters['Ô'] = 'Ô'
self.characters['Õ'] = 'Õ'
self.characters['Ö'] = 'Ö'
self.characters['×'] = '×'
self.characters['Ø'] = 'Ø'
self.characters['Ù'] = 'Ù'
self.characters['Ú'] = 'Ú'
self.characters['Û'] = 'Û'
self.characters['Ü'] = 'Ü'
self.characters['Ý'] = 'Ý'
self.characters['Þ'] = 'Þ'
self.characters['ß'] = 'ß'
self.characters['à'] = 'à'
self.characters['á'] = 'á'
self.characters['â'] = 'â'
self.characters['ã'] = 'ã'
self.characters['ä'] = 'ä'
self.characters['å'] = 'å'
self.characters['æ'] = 'æ'
self.characters['ç'] = 'ç'
self.characters['è'] = 'è'
self.characters['é'] = 'é'
self.characters['ê'] = 'ê'
self.characters['ë'] = 'ë'
self.characters['ì'] = 'ì'
self.characters['í'] = 'í'
self.characters['î'] = 'î'
self.characters['ï'] = 'ï'
self.characters['ð'] = 'ð'
self.characters['ñ'] = 'ñ'
self.characters['ò'] = 'ò'
self.characters['ó'] = 'ó'
self.characters['ô'] = 'ô'
self.characters['õ'] = 'õ'
self.characters['ö'] = 'ö'
self.characters['÷'] = '÷'
self.characters['ø'] = 'ø'
self.characters['ù'] = 'ù'
self.characters['ú'] = 'ú'
self.characters['û'] = 'û'
self.characters['ü'] = 'ü'
self.characters['ý'] = 'ý'
self.characters['þ'] = 'þ'
self.characters['ÿ'] = 'ÿ'
self.characters['€'] = '€'
self.characters['’;'] = '\''
self.characters['′'] = '′'
self.characters['″'] = '″'
self.characters['‹'] = '‹'
self.characters['›'] = '›'
self.characters['‾'] = '‾'
self.characters['⁄'] = '⁄'
self.characters['€'] = '€'
self.characters['ℑ'] = 'ℑ'
self.characters['℘'] = '℘'
self.characters['ℜ'] = 'ℜ'
self.characters['™'] = '™'
self.characters['ℵ'] = 'ℵ'
self.characters['←'] = '←'
self.characters['↑'] = '↑'
self.characters['→'] = '→'
self.characters['↓'] = '↓'
self.characters['↔'] = '↔'
self.characters['↵'] = '↵'
self.characters['⇐'] = '⇐'
self.characters['⇑'] = '⇑'
self.characters['⇒'] = '⇒'
self.characters['⇓'] = '⇓'
self.characters['⇔'] = '⇔'
self.characters['∀'] = '∀'
self.characters['∂'] = '∂'
self.characters['∃'] = '∃'
self.characters['∅'] = '∅'
self.characters['∇'] = '∇'
self.characters['∈'] = '∈'
self.characters['∉'] = '∉'
self.characters['∋'] = '∋'
self.characters['∏'] = '∏'
self.characters['∑'] = '∑'
self.characters['−'] = '−'
self.characters['∗'] = '∗'
self.characters['√'] = '√'
self.characters['∝'] = '∝'
self.characters['∞'] = '∞'
self.characters['∠'] = '∠'
self.characters['∧'] = '∧'
self.characters['∨'] = '∨'
self.characters['∩'] = '∩'
self.characters['∪'] = '∪'
self.characters['∫'] = '∫'
self.characters['∴'] = '∴'
self.characters['∼'] = '∼'
self.characters['≅'] = '≅'
self.characters['≈'] = '≈'
self.characters['≠'] = '≠'
self.characters['≡'] = '≡'
self.characters['≤'] = '≤'
self.characters['≥'] = '≥'
self.characters['⊂'] = '⊂'
self.characters['⊃'] = '⊃'
self.characters['⊄'] = '⊄'
self.characters['⊆'] = '⊆'
self.characters['⊇'] = '⊇'
self.characters['⊕'] = '⊕'
self.characters['⊗'] = '⊗'
self.characters['⊥'] = '⊥'
self.characters['⋅'] = '⋅'
self.characters['⌈'] = '⌈'
self.characters['⌉'] = '⌉'
self.characters['⌊'] = '⌊'
self.characters['⌋'] = '⌋'
self.characters['〈'] = '〈'
self.characters['〉'] = '〉'
self.characters['◊'] = '◊'
self.characters['♠'] = '♠'
self.characters['♣'] = '♣'
self.characters['♥'] = '♥'
self.characters['♦'] = '♦'
| {
"repo_name": "idcodeoverflow/SocialNetworkAnalyzer",
"path": "PreprocessingLayout/html/HexCharacterMapping.py",
"copies": "1",
"size": "18351",
"license": "mit",
"hash": -441409084347398000,
"line_mean": 38.3978021978,
"line_max": 43,
"alpha_frac": 0.4520807765,
"autogenerated": false,
"ratio": 2.881067180970749,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3833147957470749,
"avg_score": null,
"num_lines": null
} |
__author__ = 'david'
from ifind.common.language_model import LanguageModel
from simiir.text_classifiers.lm_classifier import LMTextClassifier
from simiir.utils.lm_methods import extract_term_dict_from_text
import logging
log = logging.getLogger('lm_classifer.TopicBasedLMTextClassifier')
class TopicBasedLMTextClassifier(LMTextClassifier):
"""
Extends the LM text classifier, but also considers topic background knowledge (if provided).
"""
def __init__(self, topic, search_context, stopword_file=[], background_file=[], topic_weighting=1, topic_background_weighting=1, document_weighting=1):
self.topic_weighting = topic_weighting # Weighting score for topic text
self.topic_background_weighting = topic_background_weighting # Weighting score for background topic text
self.document_weighting = document_weighting # Weighting score for examined snippet text
super(TopicBasedLMTextClassifier, self).__init__(topic, search_context, stopword_file, background_file)
def make_topic_language_model(self):
"""
Combines term counts from the topic and background to produce the language model.
"""
topic_text = self._make_topic_text()
# Get term counts from the TREC topic title and description.
topic_terms = extract_term_dict_from_text(topic_text, self._stopword_file)
# Get term counts from the topic background.
background_terms = self._topic.background_terms
combined_term_counts = {}
combined_term_counts = self._combine_dictionaries(combined_term_counts, topic_terms, self.topic_weighting)
combined_term_counts = self._combine_dictionaries(combined_term_counts, background_terms, self.topic_background_weighting)
# Build the LM from the combined count dictionary.
language_model = LanguageModel(term_dict=combined_term_counts)
self.topic_language_model = language_model
log.debug("Making topic {0}".format(self._topic.id))
def _update_topic_language_model(self, text_list):
"""
Updates the language model for the topic, given snippet/document text (text_list) and prior (knowledge) text.
"""
topic_text = self._make_topic_text()
document_text = ' '.join(text_list)
topic_term_counts = extract_term_dict_from_text(topic_text, self._stopword_file)
background_scores = self._topic.background_terms
document_term_counts = extract_term_dict_from_text(document_text, self._stopword_file)
combined_term_counts = {}
combined_term_counts = self._combine_dictionaries(combined_term_counts, topic_term_counts, self.topic_weighting)
combined_term_counts = self._combine_dictionaries(combined_term_counts, background_scores, self.topic_background_weighting)
combined_term_counts = self._combine_dictionaries(combined_term_counts, document_term_counts, self.document_weighting)
# Build the updated language model.
new_language_model = LanguageModel(term_dict=combined_term_counts)
self.topic_language_model = new_language_model
log.debug("Updating topic {0}".format(self._topic.id))
def _combine_dictionaries(self, src_dict, from_dict, weight):
"""
Takes from_dict, and multiples the values in that dictionary by weight, adding them to src_dict.
"""
for term, value in from_dict.iteritems():
weighted_score = value * weight
if term not in src_dict:
src_dict[term] = 0.0 # Create a zero value so then we only add it once below
src_dict[term] += weighted_score
return src_dict | {
"repo_name": "leifos/simiir",
"path": "simiir/text_classifiers/lm_topic_classifier.py",
"copies": "1",
"size": "3785",
"license": "mit",
"hash": -926621738521203500,
"line_mean": 48.8157894737,
"line_max": 155,
"alpha_frac": 0.6763540291,
"autogenerated": false,
"ratio": 4.078663793103448,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5255017822203448,
"avg_score": null,
"num_lines": null
} |
__author__ = 'david'
from ifind.common.query_generation import SingleQueryGeneration
from ifind.common.language_model import LanguageModel
from ifind.common.query_ranker import QueryRanker
def extract_term_dict_from_text(text, stopword_file):
"""
takes text, parses it, and counts how many times each term occurs.
:param text: a string
:return: a dict of {term, count}
"""
single_term_text_extractor = SingleQueryGeneration(minlen=3, stopwordfile=stopword_file)
single_term_text_extractor.extract_queries_from_text(text)
term_counts_dict = single_term_text_extractor.query_count
return term_counts_dict
def read_in_background(vocab_file):
"""
Helper method to read in a file containing terms and construct a background language model.
Returns a LanguageModel instance trained on the vocabulary file passed.
"""
vocab = {}
f = open(vocab_file, 'r')
for line in f:
tc = line.split(',')
vocab[tc[0]] = int(tc[1])
f.close()
return LanguageModel(term_dict=vocab)
def rank_terms(terms, **kwargs):
"""
Ranks a list of potential terms by their discriminatory power.
The length of the list returned == list of initial terms supplied.
"""
topic_language_model = kwargs.get('topic_language_model', None)
ranker = QueryRanker(smoothed_language_model=topic_language_model)
ranker.calculate_query_list_probabilities(terms)
return ranker.get_top_queries(len(terms)) | {
"repo_name": "leifos/simiir",
"path": "simiir/utils/lm_methods.py",
"copies": "1",
"size": "1478",
"license": "mit",
"hash": -7500718137630771000,
"line_mean": 33.3953488372,
"line_max": 95,
"alpha_frac": 0.7077131258,
"autogenerated": false,
"ratio": 3.667493796526055,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48752069223260547,
"avg_score": null,
"num_lines": null
} |
__author__ = 'David'
from peewee import *
from datetime import datetime
from loadextantdata import get_data
db = SqliteDatabase("Housing.db")
class Housing(Model):
"""
The base model for the housing database.
"""
building_name = CharField(max_length=100)
address = CharField(max_length=255)
neighborhood = CharField(max_length=255, default="-")
available_subsidized_units = IntegerField(default=0)
num_of_subsidized_units = IntegerField(default=0)
available_affordable_units = IntegerField(default=0)
num_of_affordable_units = IntegerField(default=0)
num_of_units = IntegerField(default=0)
property_management = CharField(max_length=255)
contact_info = CharField(max_length=20)
website = CharField(max_length=255)
types_of_housing = CharField(max_length=255)
housing_programs = CharField(max_length=100)
color = CharField(max_length=25)
number = IntegerField(default=0)
extra_1 = CharField(max_length=50, default=" ")
extra_2 = CharField(max_length=50, default=" ")
time = TimeField(default=datetime.now)
class Meta:
database = db
def add_housing_complex(building=None,
address=None,
neighbor=None,
asub=None,
sub=None,
aaff=None,
aff=None,
units=None,
management=None,
contact=None,
website=None,
types=None,
programs=None,
extra1=None,
extra2=None
):
"""
This will add a housing model from the Housing table.
:return:
"""
try:
Housing.create(building_name=building,
address=address,
neighborhood=neighbor,
available_subsidized_units=asub,
num_of_subsidized_units=sub,
available_affordable_units=aaff,
num_of_affordable_units=aff,
num_of_units=units,
property_management=management,
contact_info=contact,
website=website,
types_of_housing=types,
housing_programs=programs,
extra_1=extra1,
extra_2=extra2
)
return "Entry Added"
except:
return "Error"
def remove_housing_complex(entry):
"""
This will remove a housing model from the Housing table.
:param entry:
:return:
"""
if input("Are you sure? [yN]: ").lower == "y":
Housing.delete_instance(entry)
print("Entry deleted")
def modify_data(building, address, value, field):
"""
This will modify either the value of a given field checking against the building's name and address to ensure the
correct model is modified
:return:
"""
try:
Housing.update(**{field: value}).where(
Housing.building_name == building,
Housing.address == address).execute()
except ValueError:
return "Error updating entry for {}".format(building)
return "{}'s record successfully updated".format(building)
def search_entries():
"""
Search entries for a string.
:return:
"""
view_entry(input("search query: "))
def view_entry(search_query=None):
"""
View previous entries
:return:
"""
entries = Housing.select().order_by(Housing.building_name)
if search_query:
entries = entries.select().where(Housing.contains(search_query))
for entry in entries:
print(entry.building_name, entry.num_of_subsidized_units)
def return_specific(name=None):
"""
Looks for a specific entry by housing complex name and returns that complex' name and address
:param name:
:return:
"""
entries = Housing.select().order_by(Housing.building_name)
if name:
entries = Housing.select().where(Housing.building_name.contains(name),
Housing.address.contains("1"))
for entry in entries:
print(entry.building_name, entry.address)
def load_database():
"""
Initializes the data base loading it from the csv downloaded from google along with the loadextantdata module.
:return:
"""
data = get_data()
for name, address, units, subu, au, propm, cont, web, type, hp, col, num, ext1, ext2 in data:
Housing.create(
building_name=name,
address=address,
num_of_subsidized_units=subu,
num_of_affordable_units=au,
num_of_units=units,
property_management=propm,
contact_info=cont,
website=web,
types_of_housing=type,
housing_programs=hp,
color=col,
number=num,
extra_1=ext1,
extra_2=ext2,
safe=True
)
if __name__ == "__main__":
db.connect()
db.create_tables([Housing], safe=True)
# load_database()
| {
"repo_name": "katzwigmore/Portland-Housing-Tracker",
"path": "housingdatabase.py",
"copies": "1",
"size": "5247",
"license": "mit",
"hash": 6413536851545229000,
"line_mean": 29.6842105263,
"line_max": 117,
"alpha_frac": 0.5605107681,
"autogenerated": false,
"ratio": 4.0548686244204015,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5115379392520402,
"avg_score": null,
"num_lines": null
} |
__author__ = 'david'
import abc
from simiir.text_classifiers.base_classifier import BaseTextClassifier
from simiir.utils.data_handlers import get_data_handler
from random import random
import logging
log = logging.getLogger('base_informed_trec_classifier')
class BaseInformedTrecTextClassifier(BaseTextClassifier):
"""
Takes the TREC QREL file and loads it into a TrecQrelHandler
Abstract method is_relevant() needs to be implemented.
"""
def __init__(self, topic, search_context, qrel_file, host=None, port=0):
"""
Initialises an instance of the classifier.
"""
super(BaseInformedTrecTextClassifier, self).__init__(topic, search_context, stopword_file=[], background_file=[])
self._filename = qrel_file
self._host = host
self._port = port
self._data_handler = get_data_handler(filename=self._filename, host=self._host, port=self._port, key_prefix='informed')
def make_topic_language_model(self):
"""
"""
log.debug("No Topic model required for this TREC Classifier")
def _get_judgment(self, topic_id, doc_id):
"""
Helper function that returns the judgement of the document
If the value does not exist in the qrels, it checks topic '0' - a non-existant topic, which you can put pre-rolled relevance values
The default value returned is 0, indicated no gain/non-relevant.
topic_id (string): the TREC topic number
doc_id (srting): the TREC document number
"""
val = self._data_handler.get_value(topic_id, doc_id) # Does the document exist?
# Pulls the answer from the data handler.
if not val: # If not, we fall back to the generic topic.
val = self._data_handler.get_value('0', doc_id)
if not val: # if still no val, assume the document is not relevant.
val = 0
return val
@abc.abstractmethod
def is_relevant(self, document):
"""
Needs to be implemented:
Returns True if the document is considered relevant:
else False.
"""
pass
| {
"repo_name": "leifos/simiir",
"path": "simiir/text_classifiers/base_informed_trec_classifier.py",
"copies": "1",
"size": "2215",
"license": "mit",
"hash": 7621437724282774000,
"line_mean": 34.1587301587,
"line_max": 139,
"alpha_frac": 0.6239277652,
"autogenerated": false,
"ratio": 4.132462686567164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.032998733080823954,
"num_lines": 63
} |
__author__ = "David"
import requests
import json
import sys
from add_playlist import add_playlist
from youtube_playlist import load_config, send_get_request
uploader_id = str()
config = load_config()
def usage_message():
print("USAGE: {0} [id/username] uploader-details".format(sys.argv[0]))
exit(-1)
def process_pages(json_page):
playlists = dict()
finished = False
while not finished:
playlist_entries = json_page["items"]
for playlist in playlist_entries:
playlists[playlist["snippet"]["title"]] = playlist["id"]
if "nextPageToken" in json_page:
json_page = send_get_request("https://www.googleapis.com/youtube/v3/playlists?part=snippet&channelId={0}&"
"pageToken={1}&fields=items(id%2Csnippet)%2CnextPageToken%2CprevPageToken&"
"key={2}".format(uploader_id, json_page["nextPageToken"], config["api_key"]))
else:
finished = True
return playlists
def process_playlists(playlists):
playlists_to_add = list()
adding_all = False
with open(config["playlist_prefs_path"], "r") as data:
playlist_json = json.load(data)
for (playlist_title, playlist_id) in playlists.items():
decided = False
while not decided and not adding_all:
try:
should_add_bool = playlist_json[playlist_id]
if should_add_bool:
should_add = "y"
else:
should_add = "n"
except KeyError: # no preference for this playlist was found
should_add = input("Do you want to add %s to the collection? [Y/N/[A]dd All]: " % playlist_title)
if should_add.lower() == "y":
print("Okay, I'll add %s." % playlist_title)
playlist_json[playlist_id] = True
playlists_to_add.append(playlist_id)
decided = True
elif should_add.lower() == "n":
playlist_json[playlist_id] = False
decided = True
elif should_add.lower() == "a":
adding_all = True
playlists_to_add = playlists.values()
break
for playlist in playlists_to_add:
add_playlist(playlist)
with open(config["playlist_prefs_path"], "w") as playlist_prefs:
json.dump(playlist_json, playlist_prefs, indent=2)
def find_account_playlists(input_var, input_is_username):
global uploader_id
if input_is_username:
id_response_json = send_get_request("https://www.googleapis.com/youtube/v3/channels?part=snippet"
"&forUsername={0}&fields=items%2Fid&key={1}".format(input_var,
config["api_key"]))
if len(id_response_json["items"]) == 0:
print("Error finding playlists for", input_var)
return(-1)
uploader_id = id_response_json["items"][0]["id"]
else:
uploader_id = input_var
playlists_response_json = send_get_request("https://www.googleapis.com/youtube/v3/playlists?part=snippet&"
"channelId={0}&fields=items(id%2Csnippet)%2CnextPageToken%2C"
"prevPageToken&key={1}".format(uploader_id, config["api_key"]))
playlists = process_pages(playlists_response_json)
process_playlists(playlists)
if __name__ == "__main__":
if len(sys.argv) != 3:
usage_message()
if sys.argv[1].lower() == "id":
find_account_playlists(sys.argv[2], False)
elif sys.argv[1].lower() == "username":
find_account_playlists(sys.argv[2], True)
else:
usage_message() | {
"repo_name": "Ratheronfire/YouTube-Playlist-Manager",
"path": "find_account_playlists.py",
"copies": "1",
"size": "3843",
"license": "mit",
"hash": -2401400945610634000,
"line_mean": 32.7192982456,
"line_max": 118,
"alpha_frac": 0.5576372626,
"autogenerated": false,
"ratio": 3.881818181818182,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9930944495114253,
"avg_score": 0.001702189860785727,
"num_lines": 114
} |
__author__ = 'David'
import sys
from PyQt4 import QtGui, QtCore, uic
from pgdb import PGDatabase
form_class = uic.loadUiType("App/mainUI.ui")[0]
class GUI(QtGui.QMainWindow, form_class):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.setupUi(self)
self.database = PGDatabase()
self.inventoryClicked = 0
self.catalogClicked = 0
self.buildAllTableViews()
self.buildInventoryTableView()
self.commitChangesButton.clicked.connect(self.database.commitChanges)
self.commitChangesButton.clicked.connect(self.buildAllTableViews)
self.rollbackChangesButton.clicked.connect(self.database.rollbackChanges)
self.rollbackChangesButton.clicked.connect(self.buildAllTableViews)
# Schools buttons
self.schoolAddButton.clicked.connect(self.createSchoolAddDialog)
self.removeSchoolButton.clicked.connect(
lambda: self.removeTableEntry(self.schoolsTable, "schools")
)
# Catalog table buttons
self.addToCatalogButton.clicked.connect(self.createCatalogAddDialog)
self.removeFromCatalogButton.clicked.connect(
lambda: self.removeTableEntry(self.catalogTable, "items"))
# Inventory table button
self.addInventoryButton.clicked.connect(self.createInventoryAddDialog)
self.removeInventoryButton.clicked.connect(
lambda: self.removeTableEntry(self.inventoryTable, "inventory")
)
# Orders table
self.filterByUserButton.clicked.connect(self.queryByPrompt)
self.filterByCompletedButton.clicked.connect(self.buildOrdersByCompleted)
self.restoreOrdersButton.clicked.connect(
lambda: self.buildTableView(self.orderTable, "orders")
)
# User table buttons
# self.addUsersButton.clicked.connect(self.createUserAddDialog)
self.removeUserButton.clicked.connect(
lambda: self.removeTableEntry(self.usersTable, "users")
)
# self.modifyInventoryButton.clicked.connect()
# self.removeInventoryButton.clicked.connect()
def initUI(self):
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('GEA Admin')
self.show()
def testFunc(self):
print("Yes")
def removeTableEntry(self, tableWidget, dbTableName):
selectedItems = tableWidget.selectedItems()
if selectedItems:
rows = []
for item in selectedItems:
rows.append(tableWidget.row(item))
rows = set(rows)
for row in rows:
toDelete = tableWidget.item(row, 0)
toDelete = int(toDelete.text())
self.database.removeItem(dbTableName, toDelete)
self.buildTableView(tableWidget, dbTableName)
def removeCatalogEntry(self):
selectedItems = self.catalogTable.selectedItems()
if selectedItems:
rows = []
for item in selectedItems:
rows.append(self.catalogTable.row(item))
rows = set(rows)
for row in rows:
toDelete = self.catalogTable.item(row, 0)
toDelete = int(toDelete.text())
self.database.removeItem("items", toDelete)
self.buildTableView(self.catalogTable, "items")
def modifyInventoryEntry(self, row, col):
if self.inventoryClicked:
self.inventoryClicked = 0
header = str(self.inventoryTable.horizontalHeaderItem(col).text())
toModifyID = str(self.inventoryTable.item(row, 0).text())
newValue = str(self.inventoryTable.item(row, col).text())
self.database.updateRecord("inventory", toModifyID, header, newValue)
self.buildInventoryTableView()
def modifyCatalogEntry(self, row, col):
if self.catalogClicked:
self.catalogClicked = 0
header = str(self.catalogTable.horizontalHeaderItem(col).text())
toModifyID = str(self.catalogTable.item(row, 0).text())
newValue = str(self.catalogTable.item(row, col).text())
self.database.updateRecord("items", toModifyID, header, newValue)
self.buildTableView(self.catalogTable, "items")
def buildAllTableViews(self):
self.buildTableView(self.catalogTable, "items")
self.buildTableView(self.inventoryTable, "inventory")
self.buildTableView(self.orderTable, "orders")
self.buildTableView(self.usersTable, "users")
self.buildTableView(self.schoolsTable, "schools")
def buildTableView(self, tableWidget, dbTableName):
tableWidget.clear()
# Get the headers and set them in the table
headers = self.database.getColumnHeaders(dbTableName)
tableWidget.setColumnCount(len(headers))
headerLabels = QtCore.QStringList()
for i, header in enumerate(headers):
headerLabels.append(header[0])
tableWidget.setHorizontalHeaderLabels(headerLabels)
# Populate the table
results = self.database.getAllRows(dbTableName)
tableWidget.setRowCount(len(results))
for i, row in enumerate(results):
for j, item in enumerate(row):
# self.inventoryTable.setItem(i, j, QtGui.QTableWidgetItem(item))
tableItem = QtGui.QTableWidgetItem(str(item))
tableWidget.setItem(i, j, tableItem)
if dbTableName is "inventory":
def inventoryClicked():
self.inventoryClicked = 1
tableWidget.cellDoubleClicked.connect(inventoryClicked)
tableWidget.cellChanged.connect(self.modifyInventoryEntry)
if dbTableName is "items":
def catalogClicked():
self.catalogClicked = 1
tableWidget.cellDoubleClicked.connect(catalogClicked)
tableWidget.cellChanged.connect(self.modifyCatalogEntry)
def buildInventoryTableView(self):
# Get the headers and set them in the table
headers = self.database.getColumnHeaders("inventory")
self.inventoryTable.setColumnCount(len(headers)+1)
headerLabels = QtCore.QStringList()
for i, header in enumerate(headers):
headerLabels.append(header[0])
headerLabels.append('name')
self.inventoryTable.setHorizontalHeaderLabels(headerLabels)
# Populate the table
results = self.database.getInventoryWithNames()
self.inventoryTable.setRowCount(len(results))
for i, row in enumerate(results):
for j, item in enumerate(row):
# self.inventoryTable.setItem(i, j, QtGui.Qself.inventoryTableItem(item))
tableItem = QtGui.QTableWidgetItem(str(item))
self.inventoryTable.setItem(i, j, tableItem)
def inventoryClicked():
self.inventoryClicked = 1
def inventoryDoubleClicked(row, col):
print row, col
self.inventoryTable.cellDoubleClicked.connect(inventoryClicked)
self.inventoryTable.cellChanged.connect(self.modifyInventoryEntry)
self.inventoryTable.cellDoubleClicked.connect(inventoryDoubleClicked)
def buildOrdersByUser(self):
text, ok = QtGui.QInputDialog.getText(self, 'Input Dialog',
'User id?:')
if ok:
# REPopulate the table
results = self.database.filterOrdersByUser(int(text))
self.usersTable.setRowCount(len(results))
for i, row in enumerate(results):
for j, item in enumerate(row):
# self.inventoryTable.setItem(i, j, QtGui.Qself.inventoryTableItem(item))
tableItem = QtGui.QTableWidgetItem(str(item))
self.usersTable.setItem(i, j, tableItem)
def buildOrdersByCompleted(self):
# REPopulate the table
results = self.database.filterOrdersByCompleted()
self.displayQueryResponse(self.orderTable, results)
def testDC(self, row, col):
print row, col
def createCatalogAddDialog(self):
dialog = CatalogAddDialog()
if dialog.exec_() == QtGui.QDialog.Accepted:
itemName, itemDesc, itemPrice = dialog.getTextFields()
self.database.addItemToCatalog(itemName, itemDesc, itemPrice)
self.buildTableView(self.catalogTable, "items")
def createInventoryModifyDialog(self):
dialog = InventoryModifyDialog()
if dialog.exec_() == QtGui.QDialog.Accepted:
itemName, itemDesc, itemPrice = dialog.getTextFields()
self.database.addItemToCatalog(itemName, itemDesc, itemPrice)
self.buildInveTableView(self.catalogTable, "items")
def createInventoryAddDialog(self):
dialog = InventoryAddDialog()
if dialog.exec_() == QtGui.QDialog.Accepted:
item, quant = dialog.getTextFields()
self.database.addItemToInventory(item, quant)
self.buildInventoryTableView()
def createSchoolAddDialog(self):
dialog = SchoolAddDialog()
if dialog.exec_() == QtGui.QDialog.Accepted:
name, address = dialog.getTextFields()
self.database.addSchool(name, address)
self.buildTableView(self.schoolsTable, "schools")
def displayQueryResponse(self, tableWidget, response):
if not response: return
# tableWidget = QtGui.QTableWidget()
oldRows = tableWidget.rowCount()
for i in range(oldRows):
tableWidget.removeRow(i)
tableWidget.setRowCount(len(response))
tableWidget.setColumnCount(len(response[0]))
for i, row in enumerate(response):
for j, item in enumerate(row):
tableWidget.setItem(i, j, QtGui.QTableWidgetItem(str(item)))
def queryByPrompt(self):
text, ok = QtGui.QInputDialog.getText(self, 'Input Dialog',
'User id?:')
if ok:
# REPopulate the table
results = self.database.filterOrdersByUser(int(text))
self.displayQueryResponse(self.orderTable, results)
class CatalogAddDialog(QtGui.QDialog, uic.loadUiType("App/catalog_add.ui")[0]):
def __init__(self, parent=None):
QtGui.QDialog.__init__(self, parent)
self.setupUi(self)
self.catalogDialogAccept.clicked.connect(self.handleInput)
self.show()
def handleInput(self):
self.itemName = self.itemNameInput.text()
self.itemDesc = self.itemDescInput.text()
self.itemPrice = self.itemPriceInput.text()
if self.itemName and self.itemDesc and self.itemPrice:
self.accept()
else:
QtGui.QMessageBox.warning(self, 'Error', 'Input all fields (correctly)')
def validateInput(self):
if not str(self.itemName).isdigit(): return False
if not str(self.itemDesc).isalpha(): return False
def getTextFields(self):
return str(self.itemName), str(self.itemDesc), str(self.itemPrice)
class InventoryAddDialog(QtGui.QDialog, uic.loadUiType("App/inventory_add.ui")[0]):
def __init__(self, parent=None):
QtGui.QDialog.__init__(self, parent)
self.setupUi(self)
self.catalogDialogAccept.clicked.connect(self.handleInput)
self.show()
def handleInput(self):
self.inventoryItemField = self.inventoryItemField.text()
self.quantityField = self.quantityField.text()
if self.inventoryItemField and self.quantityField:
self.accept()
else:
QtGui.QMessageBox.warning(self, 'Error', 'Input all fields (correctly)')
def getTextFields(self):
return str(self.inventoryItemField), str(self.quantityField)
class UserAddDialog(QtGui.QDialog, uic.loadUiType("App/inventory_add.ui")[0]):
def __init__(self, parent=None):
QtGui.QDialog.__init__(self, parent)
self.setupUi(self)
self.catalogDialogAccept.clicked.connect(self.handleInput)
self.show()
def handleInput(self):
self.inventoryItemField = self.inventoryItemField.text()
self.quantityField = self.quantityField.text()
if self.inventoryItemField and self.quantityField:
self.accept()
else:
QtGui.QMessageBox.warning(self, 'Error', 'Input all fields (correctly)')
def getTextFields(self):
return str(self.inventoryItemField), str(self.quantityField)
class SchoolAddDialog(QtGui.QDialog, uic.loadUiType("App/school_add.ui")[0]):
def __init__(self, parent=None):
QtGui.QDialog.__init__(self, parent)
self.setupUi(self)
self.schoolDialogAccept.clicked.connect(self.handleInput)
self.show()
def handleInput(self):
self.schoolName = self.schoolNameField.text()
self.schoolAddress = self.schoolAddressField.text()
if self.schoolName and self.schoolAddress:
self.accept()
else:
QtGui.QMessageBox.warning(self, 'Error', 'Input all fields (correctly)')
def getTextFields(self):
return str(self.schoolName), str(self.schoolAddress)
class TableAddDialog(QtGui.QDialog):
def __init__(self, tableName, parent=None):
QtGui.QDialog.__init__(self, parent)
self.tableName = tableName
self.show()
def handleInput(self):
headers = self.database.getColumnHeaders(self.tableName)
for i, header in enumerate(headers):
pass
| {
"repo_name": "lfgcodeLinc/JonsTeam",
"path": "Client/App/gui.py",
"copies": "1",
"size": "13436",
"license": "mit",
"hash": -3881752539859926000,
"line_mean": 39.5921450151,
"line_max": 93,
"alpha_frac": 0.653617148,
"autogenerated": false,
"ratio": 4.1139007960808325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5267517944080833,
"avg_score": null,
"num_lines": null
} |
__author__ = 'david'
import telnetlib
import socket
from time import sleep
class PioneerAvClientException(Exception):
pass
class VSX528Telnet(object):
"Telnet client to Pioneer VSX-528 AV"""
INPUTS = { "CD" : "01",
"TUNER" : "02",
"DVD" : "04",
"TV" : "05",
"SATCBL" : "06",
"VIDEO" : "10",
"DVR/BDR" : "15",
"IPOD/USB" : "17",
"BD" : "25",
"ADAPTER" : "33",
"NETRADIO" : "38",
"M.SERVER" : "44",
"FAVORITE" : "45",
"GAME" : "49" }
def __init__(self, ip, port=8102, timeout=10):
try:
self.tn = telnetlib.Telnet(ip, port)
except socket.timeout:
raise PioneerAvClientException("Error connecting to device")
def __sendcmd__(self, cmd):
"Sends single command to AV"""
command = cmd + '\r\n'
self.tn.read_eager() # Cleanup any pending output.
self.tn.write(command)
sleep(0.1) # Cool-down time (taken from github/PioneerRebel)
return self.tn.read_eager().replace('\r\n', '');
def setVolUp(self):
"Send request to increment volume by 1 unit"""
self.__sendcmd__("VU")
def setVolDown(self):
"Send request to decrease volume by 1 unit"""
self.__sendcmd__("VD")
def isOn(self):
"Returns true if device is on"""
status = self.__sendcmd__("?P")
if status == "PWR0":
return True
else:
return False
def switchOn(self):
"Turn on device"""
self.__sendcmd__("PO")
sleep(5) # Wait before allowing any other command.
def switchOff(self):
"Turn off device"""
self.__sendcmd__("PF")
sleep(5) # Wait before allowing any other command.
def mute(self):
"Mute sound"""
self.__sendcmd__("MO")
def getVol(self):
"Returns device volume in device scale 0-80"""
vol_string = self.__sendcmd__("?V")
vol_sub = vol_string[3:]
return ( int(vol_sub) - 1 ) / 2
def getVolPer(self):
"Returns device volume in 0-100 scale"""
vol = self.getVol( )
vol_dec = float(vol) + 1.25
return vol_dec
def setInput(self, input_selector):
"Send request to change input selector"""
requested_input = input_selector+"FN"
self.__sendcmd__(requested_input)
def getInput(self):
"Returns current input selector"""
current_input_raw = self.__sendcmd__("?F")
invd = { v:k for k,v in self.INPUTS.items() }
try:
return invd[current_input_raw[2:4]]
except KeyError:
raise PioneerAvClientException("Unknown input found " + current_input_raw)
def close(self):
self.tn.close() | {
"repo_name": "encinas/pioneeravclient",
"path": "pioneeravclient/clients.py",
"copies": "1",
"size": "3024",
"license": "bsd-3-clause",
"hash": 776796296398712000,
"line_mean": 28.0865384615,
"line_max": 86,
"alpha_frac": 0.4990079365,
"autogenerated": false,
"ratio": 3.7333333333333334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47323412698333334,
"avg_score": null,
"num_lines": null
} |
__author__ = 'David'
class FacebookUser:
facebookUserId = 0
userName = ''
name = ''
@classmethod
def ini(self, facebookuserid: int, firstname: str, gender: str, lastname: str, link: str, locale: str,
name: str, username: str):
self.facebookUserId = facebookuserid
self.firstName = firstname
self.gender = gender
self.lastName = lastname
self.link = link
self.locale = locale
self.name = name
self.userName = username
def __init__(self, mp):
try:
self.facebookUserId = mp['id']
self.firstName = mp['first_name']
self.gender = mp['gender']
self.lastName = mp['last_name']
self.link = mp['link']
self.locale = mp['locale']
self.name = mp['name']
self.userName = mp['username']
except KeyError:
print('This User register won\'t be stored.' + str(mp))
def __repr__(self):
return '(' + str(
self.facebookUserId) + ', ' + self.firstName + ', ' + self.gender + ', ' + self.lastName + ', ' + \
self.link + ', ' + self.locale + ', ' + self.name + ', ' + self.userName + ')'
| {
"repo_name": "idcodeoverflow/SocialNetworkAnalyzer",
"path": "EntitiesLayout/FacebookUser.py",
"copies": "1",
"size": "1240",
"license": "mit",
"hash": 4908739451044219000,
"line_mean": 29.243902439,
"line_max": 111,
"alpha_frac": 0.5177419355,
"autogenerated": false,
"ratio": 3.8271604938271606,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4844902429327161,
"avg_score": null,
"num_lines": null
} |
__author__ = 'David'
import psycopg2
class PGDatabase(object):
def __init__(self):
# Connect to a database
url = "postgres://vqesjlxdyoxqvq:HR5OD_Svzd48Nwzu6FN4-VTZd6@ec2-54-243-245-159.compute-1.amazonaws.com:5432/dabosh8r2vtap1"
self.connection = psycopg2.connect(url)
# Open a cursor to perform database operations
self.cursor = self.connection.cursor()
def __del__(self):
# Close communications with database
self.cursor.close()
self.connection.close()
def hasRecord(self, table, column, value):
cmd = "SELECT count(*) FROM {0} where {1}={2}".format(table, column, value)
self.cursor.execute(cmd)
return self.fetchAllResults()
def addItemToCatalog(self, name, desc, price):
self.cursor.execute("INSERT INTO items (name, description, price) VALUES (%s, %s, %s)",
(name, desc, price))
def addItemToInventory(self, item, quantity):
self.cursor.execute("INSERT INTO inventory (item_id, quantity) VALUES (%s, %s)",
(item, quantity))
def addSchool(self, name, address):
self.cursor.execute("INSERT INTO schools (name, address) VALUES (%s, %s)",
(name, address))
def commitChanges(self):
self.connection.commit()
def rollbackChanges(self):
self.connection.rollback()
def removeItem(self, table, id):
cmd = "DELETE FROM {0} WHERE id={1};".format(table, id)
try:
self.cursor.execute(cmd)
except Exception, e:
print e
def updateRecord(self, table, ID, field, value):
# self.cursor.execute("SELECT data_type FROM information_schema.columns WHERE table_name = '{0}';".format(table))
if type(value) is str:
if value.isalpha():
value = "'{0}'".format(value)
cmd = "UPDATE {0} SET {1}={2} WHERE id={3}".format(table, field, value, ID)
self.cursor.execute(cmd)
def fetchOneResult(self):
return self.cursor.fetchone()
def fetchAllResults(self):
return self.cursor.fetchall()
def getColumnHeaders(self, tableName):
self.cursor.execute("""SELECT column_name
FROM information_schema.columns
WHERE table_name = (%s);""", [tableName])
return self.fetchAllResults()
def getInventoryWithNames(self):
self.cursor.execute("""
SELECT inventory.id, inventory.item_id, inventory.quantity, items.name
FROM inventory JOIN items ON inventory.item_id = items.id""")
return self.fetchAllResults()
def filterOrdersByUser(self, user):
self.cursor.execute("SELECT * FROM orders WHERE user_id = %s", (user,))
return self.fetchAllResults()
def filterOrdersBySchool(self, school):
self.cursor.execute("SELECT * FROM users WHERE school_id = %s", (school,))
return self.fetchAllResults()
def filterOrdersByCompleted(self):
self.cursor.execute("SELECT * FROM orders WHERE completed = %s", (False,))
return self.fetchAllResults()
def getAllRows(self, tableName):
self.cursor.execute("SELECT * FROM {0};".format(tableName))
return self.fetchAllResults()
| {
"repo_name": "lfgcodeLinc/JonsTeam",
"path": "Client/App/pgdb.py",
"copies": "1",
"size": "3246",
"license": "mit",
"hash": 1154065018798875400,
"line_mean": 34.2826086957,
"line_max": 131,
"alpha_frac": 0.6247689464,
"autogenerated": false,
"ratio": 3.800936768149883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9857987885407353,
"avg_score": 0.013543565828505787,
"num_lines": 92
} |
__author__ = 'davidnovogrodsky_wrk'
# making a port scanner
__author__ = 'davidnovogrodsky_wrk'
import socket
import time
import threading
from queue import Queue
# the print command is not thread safe
# to prevent collisions use a lock
print_lock = threading.Lock()
target = 'pythonprogramming.net'
# define the port scanner function
def portscan(port):
# making a TCP connection
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# try to make a connection
# if there is a connection, print out port message
try:
con = s.connect((target,port))
with print_lock:
print('port ', port, 'is open')
con.close()
except:
with print_lock:
print('port ', port, 'is closed')
# define the threader that runs the port scan
def threader():
while True:
worker = q.get()
portscan(worker)
q.task_done()
q = Queue()
# number of threads
# each thread contains a threader
for x in range(30):
t = threading.Thread(target=threader)
# make sure the work dies when thread ends
t.daemon = True
t.start()
# each worker is sent against a port
# this defines the number of ports to scan
for worker in range(1,101):
q.put(worker)
q.join()
| {
"repo_name": "DavidNovo/ExplorationsWithPython",
"path": "threadedPortScanner.py",
"copies": "1",
"size": "1253",
"license": "mit",
"hash": 4935628672741614000,
"line_mean": 22.2037037037,
"line_max": 57,
"alpha_frac": 0.6568236233,
"autogenerated": false,
"ratio": 3.54957507082153,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9586905918378754,
"avg_score": 0.02389855514855515,
"num_lines": 54
} |
__author__ = 'David'
__version__ = 2
import os
import redis
import base64
import cPickle
from ifind.seeker.trec_qrel_handler import TrecQrelHandler
#
# Revised datahandler classes -- considering code refactoring in September 2017.
# Author: David Maxwell
# Date: 2017-09-24
#
def get_data_handler(filename=None, host=None, port=None, key_prefix=None):
"""
Factory function that returns an instance of a data handler class.
The exact type instantiated depends upon the arguments provided to the function.
If an invalid combination is supplied, a ValueError exception is raised.
"""
if filename is None:
raise ValueError("You need to supply a filename for a data handler to work.")
if host is not None:
if port is None or key_prefix is None:
raise ValueError("Please supply a host, port and key prefix for the redis handler.")
# All parameters are correct for a RedisDataHandler to be constructed.
return RedisDataHandler(filename=filename, host=host, port=port, key_prefix=key_prefix)
# If we get here, we will simply return a FileDataHandler.
# No other option exists.
return FileDataHandler(filename=filename)
class FileDataHandler(object):
"""
A simple, file-based data handler.
Assumes that the filename provided points to a TREC QREL formatted file.
"""
def __init__(self, filename):
self._trec_qrels = self._initialise_handler(filename)
def _initialise_handler(self, filename):
"""
Instantiates the data handler object.
Override this method to instantiate a different data handler, ensuring
that a TrecQrelHandler is returned.
"""
return TrecQrelHandler(filename)
def get_value(self, topic_id, doc_id):
"""
Given a topic and document combination, returns the corresponding
judgement for that topic/document combination.
"""
return self._trec_qrels.get_value_if_exists(topic_id, doc_id)
def get_value_fallback(self, topic_id, doc_id):
"""
Given a topic and document combination, returns the corresponding
judgement for that topic/document combination.
If the judgement does not exist, we fall back to the default topic of '0'.
"""
val = self.get_value(topic_id, doc_id) # Does the document exist?
# Pulls the answer from the data handler.
if not val: # If not, we fall back to the generic topic.
val = self.get_value('0', doc_id)
if not val: # if still no val, assume the document is not relevant.
val = 0
return val
class RedisDataHandler(FileDataHandler):
"""
Extends the FileDataHandler to consider a TrecQrelHandler object stored in
a Redis cache. If it is found that a TrecQrelHandler object does not exist for
the given key, a new TrecQrelHandler is instantiated using the filename given.
This handler is then placed in the Redis cache, ready for the next use.
"""
def __init__(self, filename, host='localhost', port=6379, key_prefix=None):
self._trec_qrels = self._initialise_handler(filename=filename, host=host, port=port, key_prefix=key_prefix)
def _initialise_handler(self, filename, host, port, key_prefix):
"""
Instantiates the handler if it is not in the cache, or loads from the cache if it is.
"""
key = os.path.split(filename)[-1] # Is there a better way to construct a unique key?
# Perhaps take the hash *from the file contents*.
# At present, the filename seems sufficient.
if key_prefix is None:
raise ValueError("A key prefix (string) must be specified for the RedisDataHandler.")
key = '{key_prefix}::{hashed_key}'.format(key_prefix=key_prefix, hashed_key=hash(key))
cache = redis.StrictRedis(host=host, port=port, db=0)
if cache.get(key):
dumped = cache.get(key)
return cPickle.loads(dumped)
# If we get here, the TrecQrelsHandler does not exist in the cache; create it, dump it.
handler = super(RedisDataHandler, self)._initialise_handler(filename)
dumped = cPickle.dumps(handler)
cache.set(key, dumped)
return handler | {
"repo_name": "leifos/simiir",
"path": "simiir/utils/data_handlers.py",
"copies": "1",
"size": "4520",
"license": "mit",
"hash": -1644649796903854800,
"line_mean": 36.9915966387,
"line_max": 115,
"alpha_frac": 0.635619469,
"autogenerated": false,
"ratio": 4.24015009380863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02932437928783765,
"num_lines": 119
} |
__author__ = 'David Oreper'
class PeopleDataKeys:
def __init__(self):
pass
MANAGER = "Manager"
NAME = "HR Name"
NICK_NAME = "Nickname"
LEVEL = "Level"
TITLE = "Title"
FUNCTION = "Function"
PROJECT = "Project"
#PROJECT = "Cost Center"
FEATURE_TEAM = "Feature Team"
TYPE = "Type"
REQ = "Requisition"
CONSULTANT = "Consultant"
CONTRACTOR = "Contractor"
EXPAT_TYPE = "Expat"
VENDOR_TYPE = "Vendor"
INTERN_TYPE = "Intern"
LOCATION = "Location"
START_DATE = "Start Date"
COST_CENTER = "Cost Center"
CROSS_FUNCTIONS = ["admin", "admin operations", "devops","inf", "infrastructure", "cross functional", "customer success", "technology",]
CROSS_FUNCT_TEAM = "cross"
FLOORS = {}
TEAM_MODEL = {}
PRODUCT_SORT_ORDER = []
FLOOR_SORT_ORDER = []
class PeopleDataKeysBellevue(PeopleDataKeys):
def __init__(self):
PeopleDataKeys.__init__(self)
# CROSS_FUNCTIONS = ["technology", "admin", "inf", "infrastructure", "cross functional"]
class PeopleDataKeysSantaClara(PeopleDataKeys):
def __init__(self):
PeopleDataKeys.__init__(self)
TEAM_MODEL = {
"UCP" : "1 Tracks @ (1 PO, 1 TA, 4 Dev, 1 QA, 2 Char, 2 Auto)",
"HID" : "2 Tracks @ (1 PO, 5 Dev, 2 QA, 2 Auto, 1 UX)",
"HVS" : "Q1:20; Q2:25; Q3:27; Q4:32 -- 1 Tracks @ (1 PO, 5 Dev, 1 QA, 1 Auto)",
"Evidence Management" : "1 Tracks @ (1 PO, 4 Dev, 1 QA, 1 Auto)",
"HCmD" : "1 Tracks @ (1 Head Coach, 2 PO, 2 Dev, 1 QA, 1 UX)",
}
class PeopleDataKeysSIBU(PeopleDataKeys):
def __init__(self):
PeopleDataKeys.__init__(self)
TEAM_MODEL = {
"HVS" : "[Forecast: Q1:20; Q2:26; Q3:29; Q4:34] -- 1 Tracks @ (1 PO, 5 Dev, 1 QA, 1 Auto)",
"HVS EM" : "2 Tracks @ (1 PO, 4 Dev, 1 QA, 1 Char, 1 Auto)",
"Lumada - System" : "[Forecast: Q1:7; Q2:6; Q3:43; Q4:110]",
"Lumada - Studio" : "[Forecast: Q1:7; Q2:10; Q3:43; Q4:110]",
"City Data Exchange" : "[Forecast: Q1:6; Q2:19; Q3:6; Q4:6]",
"Predictive Maintenance" : "[Forecast: Q1:5; Q2:17; Q3:22; Q4:27]",
"Optimized Factory" : "[Forecast: Q1:1; Q2:6; Q3:13; Q4:15]",
}
PRODUCT_SORT_ORDER = ["hvs", "hvs em", "vmp", "hvp", "smart city technology", "technology", "tactical integration",
"tactical integrations", "lumada - system", "sc iiot", "set", "bel iiot", "lumada platform", "pdm", "predictive maintenance",
"lumada - studio", "lumada - microservices", "optimized factory", "opf", "city data exchange",
"cde", "denver", "lumada - edge", "lumada - ai", "lumada - analytics", "lumada - data science" "lumada - di", "lumada - hci", "hci", "lumada - foundry", "foundry", "lumada - machine intelligence", "lumada", "cross", "lumada cross", "global"]
class PeopleDataKeysWaltham(PeopleDataKeysSIBU):
def __init__(self):
PeopleDataKeys.__init__(self)
FUNCTION = "Function"
TEAM_MODEL = {
"Aspen" : "4 Tracks @ (1 PO, 3 Dev, 1 QA, 1 Char, 1 Auto)",
"HCP-Rhino" : "4 Tracks @ (1 PO, 4 Dev, 2 QA, 1 Char, 2 Auto)",
"HCP-India" : "1 Track @ (1 PO, 3 Dev, 1 QA, 1 Auto)",
# "HCP (Rhino)" : "1 Track @ (1 PO, 4 Dev, 2 QA, 2 Char, 2 Auto)",
"HCP-AW" : "4 Tracks @ (1 PO, 4 Dev, 2 QA, 1 Char, 2 Auto)",
}
# names should be lower case here
PRODUCT_SORT_ORDER = ["aspen", "ensemble", "hcp-rhino", "hcp-india", "hcp-aw", "aw-japan","hpp", "hpp-india", "hdid-uk", "hdid-waltham", "hdid-germany", "hdid-pune", "future funding"]
FLOOR_SORT_ORDER = ["- ensemble", "- content", "- mobility", "- hpp" ]
class PeopleDataKeysSIBU(PeopleDataKeys):
def __init__(self):
PeopleDataKeys.__init__(self)
TEAM_MODEL = {
"HVS" : "[Forecast: Q1:20; Q2:26; Q3:29; Q4:34] -- 1 Tracks @ (1 PO, 5 Dev, 1 QA, 1 Auto)",
"HVS EM" : "2 Tracks @ (1 PO, 4 Dev, 1 QA, 1 Char, 1 Auto)",
"Lumada - System" : "[Forecast: Q1:7; Q2:6; Q3:43; Q4:110]",
"Lumada - Studio" : "[Forecast: Q1:7; Q2:10; Q3:43; Q4:110]",
"City Data Exchange" : "[Forecast: Q1:6; Q2:19; Q3:6; Q4:6]",
"Predictive Maintenance" : "[Forecast: Q1:5; Q2:17; Q3:22; Q4:27]",
"Optimized Factory" : "[Forecast: Q1:1; Q2:6; Q3:13; Q4:15]",
}
PRODUCT_SORT_ORDER = ["hvs", "hvs em", "vmp", "hvp", "smart city technology", "technology", "tactical integration",
"tactical integrations", "lumada - system", "sc iiot", "bel iiot", "lumada platform", "pdm", "predictive maintenance",
"lumada - studio", "lumada - microservices", "optimized factory", "opf", "city data exchange",
"cde", "denver", "lumada - ai", "lumada - analytics", "lumada - di", "lumada - hci", "hci", "lumada - machine intelligence", "lumada", "cross", "lumada cross", "global"]
class PeopleDataKeysFinance(PeopleDataKeys):
def __init__(self):
PeopleDataKeys.__init__(self)
MANAGER = "Manager Name"
NAME = "Last Name"
FIRST_NAME = "First Name"
LAST_NAME = "Last Name"
NICK_NAME = "Preferred Name"
TITLE = "Internal Job Title"
PROJECT = "Project"
TYPE = "Person Type"
REQ = "Requisition Number"
CONSULTANT = "Consultant"
CONTRACTOR = "Contractor"
EXPAT_TYPE = "Expat"
LOCATION = "Location Code"
START_DATE = "Start Date"
COST_CENTER = "GL"
| {
"repo_name": "Hitachi-Data-Systems/org-chart-builder",
"path": "orgchart_keys.py",
"copies": "1",
"size": "5505",
"license": "apache-2.0",
"hash": 2294652481058351600,
"line_mean": 43.3951612903,
"line_max": 267,
"alpha_frac": 0.561489555,
"autogenerated": false,
"ratio": 2.8332475553268144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.886631514247388,
"avg_score": 0.005684393570587057,
"num_lines": 124
} |
__author__ = 'David Oreper'
class FilterCriteria:
def __init__(self,):
pass
def matches(self, aPerson):
return True
class KeyMatchesCriteria(FilterCriteria):
def __init__(self, expectedValue):
FilterCriteria.__init__(self)
self.expectedValue = expectedValue or ""
def _matches(self, actualValue):
return actualValue.lower() == self.expectedValue.lower()
class ProductCriteria(KeyMatchesCriteria):
def matches(self, aPerson):
return self._matches(aPerson.getProduct())
class FunctionalGroupCriteria(KeyMatchesCriteria):
def matches(self, aPerson):
return self._matches(aPerson.getFunction())
class FeatureTeamCriteria(KeyMatchesCriteria):
def matches(self, aPerson):
return self._matches(aPerson.getFeatureTeam())
class LocationCriteria(KeyMatchesCriteria):
def matches(self, aPerson):
return self._matches(aPerson.getLocation())
class CostCenterCriteria(KeyMatchesCriteria):
def matches(self, aPerson):
return self._matches(aPerson.getCostCenter())
class ManagerCriteria(FilterCriteria):
def __init__(self, manager):
FilterCriteria.__init__(self)
self.manager = manager
def matches(self, aPerson):
personManager = aPerson.getManagerFullName()
# Do explicit checks to make sure the manager field is populated before evaluating to avoid case
# where person has no manager set and falsely matches because manager we're checking has one of these
# fields empty
if self.manager:
if self.manager.getFullName() and (personManager == self.manager.getFullName()):
return True
if self.manager.getRawName() and (personManager == self.manager.getRawName()):
return True
if self.manager.getNormalizedRawName() and (personManager == self.manager.getNormalizedRawName()):
return True
if self.manager.getRawNickName():
if personManager == self.manager.getPreferredName():
return True
return False
return not personManager
class ManagerEmptyCriteria(FilterCriteria):
def __init__(self):
FilterCriteria.__init__(self)
def matches(self, aPerson):
personManager = aPerson.getManagerFullName().lower().strip()
if personManager == "" or personManager == "tbd":
return True
return False
class IsInternCriteria(FilterCriteria):
def __init__(self, isIntern):
FilterCriteria.__init__(self)
self.isIntern = isIntern
def matches(self, aPerson):
return aPerson.isIntern() == self.isIntern
class IsExpatCriteria(FilterCriteria):
def __init__(self, isExpat):
FilterCriteria.__init__(self)
self.isExpat = isExpat
def matches(self, aPerson):
return aPerson.isExpat() == self.isExpat
class IsTBHCriteria(FilterCriteria):
def __init__(self, isTBH):
FilterCriteria.__init__(self)
self.isTBH = isTBH
def matches(self, aPerson):
return aPerson.isTBH() == self.isTBH
class IsProductManagerCriteria(FilterCriteria):
def __init__(self, isProductManager):
FilterCriteria.__init__(self)
self.isProductManager = isProductManager
def matches(self, aPerson):
return aPerson.isProductManager() == self.isProductManager
class IsCrossFuncCriteria(FilterCriteria):
def __init__(self, isCrossFunc):
FilterCriteria.__init__(self)
self.isCrossFunc = isCrossFunc
def matches(self, aPerson):
return aPerson.isCrossFunc() == self.isCrossFunc
class IsManagerCriteria(FilterCriteria):
def __init__(self, isManager):
FilterCriteria.__init__(self)
self.isManager = isManager
def matches(self, aPerson):
return aPerson.isManager() == self.isManager
| {
"repo_name": "Hitachi-Data-Systems/org-chart-builder",
"path": "people_filter_criteria.py",
"copies": "1",
"size": "3899",
"license": "apache-2.0",
"hash": 1552389800296185300,
"line_mean": 29.7007874016,
"line_max": 110,
"alpha_frac": 0.6614516543,
"autogenerated": false,
"ratio": 3.9029029029029028,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010451972513566006,
"num_lines": 127
} |
import unittest
from sunpy.roi.chaincode import Chaincode
import numpy as np
class CCTests(unittest.TestCase):
def testEnds(self):
cc = Chaincode([0, 0], "2460") # Can I test more than one path? How?
end = [0, 0]
self.failUnless(cc.matchend(end))
def testEndsFalse(self):
cc = Chaincode([0, 0], "24460")
end = [0, 0]
self.failIf(cc.matchend(end))
def testSecondCoordinate(self):
cc = Chaincode([0, 0], "0023")
second = [-2, 0]
self.failUnless(cc.matchany(second, 2))
def testSecondCoordinateFails(self):
cc = Chaincode([1, 0], "0023")
second = [-2, 0]
self.failIf(cc.matchany(second, 2))
def testScaleSecond(self):
cc = Chaincode([0, 0], "0723", xdelta=0.5, ydelta=0.5)
second = [-1, 0.5]
self.failUnless(cc.matchany(second, 2))
def testScaleEnd(self):
cc = Chaincode([1.2, 3],"0723", xdelta=2.629, ydelta=2.629)
end = [-1.429, 0.371]
self.failUnless(cc.matchany(end, -1))
def testnparray(self):
# Let's test that the shape of the array matchs the expected
# To do so we need to use np.array, instead of lists.
cc = Chaincode([0, 0], "2460")
shape = (2,5)
self.failUnless(cc.coordinates.shape == shape)
def testBoundingBox(self): #needs of np.array... I think
cc = Chaincode([0, 0], "00033344")
boundingbox = [[-3, 2], [-3, 0]] # [[x0,x1],[y0,y1]] (like cc)
self.failUnless(np.all(cc.BoundingBox() == np.array(boundingbox)))
def testBoundingBoxFalse(self):
cc = Chaincode([0, 0], "002")
boundingbox = [[-1, 0], [-1, 0]]
self.failIf(np.all(cc.BoundingBox() != np.array(boundingbox)))
def testSubBoundingBoxX(self):
cc = Chaincode([0, 0], "44464660012075602223")
self.failUnless(cc.subBoundingBox(xedge=[0.1, 2]) == [0, 3])
def testSubBoundingBoxY(self):
cc = Chaincode([0, 0], "44464660012075602223")
self.failUnless(cc.subBoundingBox(yedge=[-1, 0.5]) == [0, 3])
def main():
unittest.main()
if __name__ == '__main__':
main()
| {
"repo_name": "mjm159/sunpy",
"path": "sunpy/tests/roi/chaincode_test.py",
"copies": "1",
"size": "2226",
"license": "bsd-2-clause",
"hash": 2508294367186981400,
"line_mean": 30.8,
"line_max": 76,
"alpha_frac": 0.5736747529,
"autogenerated": false,
"ratio": 3.0831024930747923,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41567772459747926,
"avg_score": null,
"num_lines": null
} |
"""app.controllers.forms
Define all form objects used in the application.
"""
# Import required data from the Flask WTForms extension.
from flask.ext.wtf import Form
from flask.ext.wtf.file import FileField, FileAllowed, FileRequired
from wtforms import StringField, SubmitField
from wtforms.validators import Required
class RegisterForm(Form):
"""Define registration form fields."""
username = StringField('Please enter your name.', validators=[Required()])
submit = SubmitField('Submit')
class UploadForm(Form):
"""Define text document upload form fields."""
_FILE_FORMAT = ('txt', 'dat')
_FILE_FORMAT_ERROR = 'Please upload a .txt or .dat file only.'
file_field = FileField('Select a text file to upload.',
validators=[FileRequired(),
FileAllowed(_FILE_FORMAT,
_FILE_FORMAT_ERROR)])
upload = SubmitField('Upload')
| {
"repo_name": "davidtimmons/text-scalpel-app",
"path": "app/controllers/forms.py",
"copies": "1",
"size": "1081",
"license": "mit",
"hash": -2448655471722583600,
"line_mean": 30.7941176471,
"line_max": 78,
"alpha_frac": 0.6216466235,
"autogenerated": false,
"ratio": 4.6594827586206895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007352941176470588,
"num_lines": 34
} |
"""app.models.visitor
Sets up the database models associated with an application visitor.
"""
# Import the Flask SQLAlchemy database object associated with this application.
from ..createapp import db
class User(db.Model):
"""Create a user database model to store account state information."""
# Create SQL table name label.
__tablename__ = 'users'
# Create database schema as global variables.
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
# Create a reference to the text results using the table and field name.
text_results_id = db.relationship('TextResults', backref='user', lazy='dynamic')
def __repr__(self):
"""Return the appropriate string for identifying this class."""
return '<ID: {0!r}, Username: {1!r}>'.format(self.id, self.username)
class TextResults(db.Model):
"""Create a database model to store the results of the text manipulation."""
# Create SQL table name label.
__tablename__ = 'text_results'
# Create database schema as global variables.
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
word_count = db.Column(db.Integer)
unique_words = db.Column(db.Integer)
top_words = db.Column(db.PickleType) ## Serializes Python objects.
random_text = db.Column(db.String(2048))
def __repr__(self):
"""Return the appropriate string for identifying this class."""
return '<ID: {0!r}, Word Count: {1!r}>'.format(self.id, self.word_count)
| {
"repo_name": "davidtimmons/text-scalpel-app",
"path": "app/models/visitor.py",
"copies": "1",
"size": "1657",
"license": "mit",
"hash": 8897061969041905000,
"line_mean": 32.8163265306,
"line_max": 84,
"alpha_frac": 0.6813518407,
"autogenerated": false,
"ratio": 3.8267898383371826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.999450103759542,
"avg_score": 0.002728128288352378,
"num_lines": 49
} |
"""app.views.routes
Registers the application site map on the Flask blueprint object and manages app
view functionality presented to the user.
"""
from flask import current_app, redirect, render_template, session, url_for
from werkzeug import secure_filename
from ..createapp import db
from ..createblueprint import bp_main
from ..models.visitor import User, TextResults
from ..controllers.forms import RegisterForm, UploadForm
from ..controllers.textscalpel import TextScalpel
import os
@bp_main.route('/', methods=['GET', 'POST'])
def index():
"""Application root page."""
# Check whether this page was generated based on a form submission.
form = RegisterForm()
if form.validate_on_submit():
# Get username entered into the form field then add to a Flask session.
# Flask session variables are implemented with cookies.
current_username = form.username.data
session['name'] = current_username
# Check against the username field in the User database model.
user = User.query.filter_by(username=current_username).first()
# Log in or add the user to the database.
if user is None:
# Associate a User object with this visitor and set the username.
user = User(username=current_username)
# Add the user to the database.
db.session.add(user)
# Redirect to the index page after form submission to prevent a browser
# message about resubmitting the form data. The Flask function url_for()
# matches the page by looking at the function name, and the dot prefix
# is a relative reference to the Flask blueprint object.
return redirect(url_for('.index'))
# Render the page and pass user variables into the Jinja2 template.
# The 'name' argument defaults to False if not found in the dictionary.
return render_template('index.html',
form=form,
name=session.get('name', False))
@bp_main.route('/dashboard', methods=['GET', 'POST'])
def dashboard():
"""Features a file upload form and displays text statistics."""
# Get the active username and text analysis results.
current_username = session.get('name')
analyzed = session.get('analyzed', False)
word_count = session.get('word_count', 0)
unique_words = session.get('unique_words', 0)
top_words = session.get('top_words', [('None', 0)])
random_text = session.get('random_text', '')
# Check whether this page was generated based on a form submission.
form = UploadForm()
if form.validate_on_submit():
# Change the file name a secure name that works with the OS.
filename = secure_filename(form.file_field.data.filename)
# Save file to the server, analyze text, then remove file.
path = os.path.abspath(os.path.join('../uploads/', filename))
form.file_field.data.save(path)
ts = TextScalpel(path)
# Get the user ID.
current_username = session.get('name')
user = User.query.filter_by(username=current_username).first()
user_id = user.id if user else -1
# Add TextScalpel results to the database if no errors, else alert user.
if not ts.error:
text_results = TextResults(user_id=user_id,
word_count=ts.word_count,
unique_words=ts.unique_words,
top_words=ts.top_words,
random_text=ts.random_text)
db.session.add(text_results)
# Add analysis results to the session.
session['analyzed'] = True
session['word_count'] = ts.word_count
session['unique_words'] = ts.unique_words
session['top_words'] = ts.top_words
session['random_text'] = ts.random_text
else:
session['analyzed'] = False
flash(ts.error)
# Remove text file from the server and refresh page.
os.remove(path)
return redirect(url_for('.dashboard'))
# Display page contents.
return render_template('dashboard.html',
form=form,
name=current_username,
analyzed=analyzed,
word_count=word_count,
unique_words=unique_words,
top_words=top_words,
random_text=random_text)
@bp_main.route('/logout')
def logout():
"""Logout feature that wipes session state and redirects home."""
# Set all session values to false.
for key in session.keys():
session[key] = False
# Redirect to the homepage.
return redirect(url_for('.index'))
| {
"repo_name": "davidtimmons/text-scalpel-app",
"path": "app/views/routes.py",
"copies": "1",
"size": "4911",
"license": "mit",
"hash": 989646301621477400,
"line_mean": 35.9248120301,
"line_max": 80,
"alpha_frac": 0.6120952963,
"autogenerated": false,
"ratio": 4.428313796212804,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5540409092512805,
"avg_score": null,
"num_lines": null
} |
__author__ = 'daweim0'
import os
import datasets
import datasets.imdb
import cPickle
import numpy as np
import cv2
from fcn.config import cfg
class lov_synthetic(datasets.imdb):
def __init__(self, image_set, lov_path = None):
datasets.imdb.__init__(self, 'lov_synthetic_' + image_set)
self._image_set = image_set
self._lov_synthetic_path = self._get_default_path() + "/" + image_set if lov_path is None \
else lov_path
self._data_path = os.path.join(self._lov_synthetic_path)
self._classes = ('__background__', '002_master_chef_can', '003_cracker_box', '004_sugar_box', '005_tomato_soup_can', '006_mustard_bottle', \
'007_tuna_fish_can', '008_pudding_box', '009_gelatin_box', '010_potted_meat_can', '011_banana', '019_pitcher_base', \
'021_bleach_cleanser', '024_bowl', '025_mug', '035_power_drill', '036_wood_block', '037_scissors', '040_large_marker', \
'051_large_clamp', '052_extra_large_clamp', '061_foam_brick')
# standard balcklist
self._blacklist = {'007_tuna_fish_can', '011_banana', '019_pitcher_base', '024_bowl', '025_mug',
'036_wood_block', '037_scissors', '051_large_clamp', '052_extra_large_clamp',
'061_foam_brick'}
self._class_colors = [(255, 255, 255), (255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255), \
(128, 0, 0), (0, 128, 0), (0, 0, 128), (128, 128, 0), (128, 0, 128), (0, 128, 128), \
(64, 0, 0), (0, 64, 0), (0, 0, 64), (64, 64, 0), (64, 0, 64), (0, 64, 64),
(192, 0, 0), (0, 192, 0), (0, 0, 192)]
self._class_weights = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._image_ext = '.png'
self._image_index = self._load_image_set_index()
self._roidb_handler = self.gt_roidb
self.background_imgs = [os.listdir("data/backgrounds/")]
assert os.path.exists(self._lov_synthetic_path), \
'lov path does not exist: {}'.format(self._lov_synthetic_path)
assert os.path.exists(self._data_path), \
'Data path does not exist: {}'.format(self._data_path)
# image
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self.image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, index + '_color' + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
# depth
def depth_path_at(self, i):
"""
Return the absolute path to depth i in the image sequence.
"""
return self.depth_path_from_index(self.image_index[i])
def depth_path_from_index(self, index):
"""
Construct an depth path from the image's "index" identifier.
"""
depth_path = os.path.join(self._data_path, index + '_depth' + self._image_ext)
assert os.path.exists(depth_path), \
'Path does not exist: {}'.format(depth_path)
return depth_path
# camera pose
def pose_path_at(self, i):
"""
Return the absolute path to metadata i in the image sequence.
"""
return self.pose_path_from_index(self.image_index[i])
def pose_path_from_index(self, index):
"""
Construct an metadata path from the image's "index" identifier.
"""
metadata_path = os.path.join(self._data_path, index + '_pose.txt')
assert os.path.exists(metadata_path), \
'Path does not exist: {}'.format(metadata_path)
return metadata_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
if cfg.SET_VARIANT == "":
image_set_file = os.path.join(self._lov_synthetic_path, self._image_set + cfg.TRAIN.IMAGE_LIST_NAME +
cfg.SET_VARIANT + '.txt')
else:
image_set_file = os.path.join(self._lov_synthetic_path, self._image_set + cfg.TRAIN.IMAGE_LIST_NAME +
"-" + cfg.SET_VARIANT + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.rstrip('\n').split(" ") for x in f]
return image_index
def _get_default_path(self):
"""
Return the default path where KITTI is expected to be installed.
"""
return os.path.join(datasets.ROOT_DIR, 'data', 'lov_synthetic')
def _load_object_extents(self):
extent_file = os.path.join(self._lov_synthetic_path, 'extents.txt')
assert os.path.exists(extent_file), \
'Path does not exist: {}'.format(extent_file)
extents = np.zeros((self.num_classes, 3), dtype=np.float32)
extents[1:, :] = np.loadtxt(extent_file)
return extents
def compute_class_weights(self):
print 'computing class weights'
num_classes = self.num_classes
count = np.zeros((num_classes,), dtype=np.int64)
for index in self.image_index:
# label path
label_path = self.label_path_from_index(index)
im = cv2.imread(label_path, cv2.IMREAD_UNCHANGED)
for i in xrange(num_classes):
I = np.where(im == i)
count[i] += len(I[0])
for i in xrange(num_classes):
self._class_weights[i] = min(float(count[0]) / float(count[i]), 10.0)
print self._classes[i], self._class_weights[i]
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
# The cache is commented out because it isn't updated when the dataset is updated, which
# cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
# if os.path.exists(cache_file):
# with open(cache_file, 'rb') as fid:
# roidb = cPickle.load(fid)
# print '{} gt roidb loaded from {}'.format(self.name, cache_file)
# # print 'class weights: ', roidb[0]['class_weights']
# return roidb
# self.compute_class_weights()
gt_roidb = [self._load_lov_annotation(index) for index in self.image_index if index[0].split("/")[0] not in self._blacklist]
# with open(cache_file, 'wb') as fid:
# cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
# print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def _load_lov_annotation(self, index):
"""
Load class name and meta data
"""
# image path
image_path_l = self.image_path_from_index(index[0])
image_path_r = self.image_path_from_index(index[1])
# depth path
depth_path_l = self.depth_path_from_index(index[0])
depth_path_r = self.depth_path_from_index(index[1])
# metadata path
pose_path_l = self.pose_path_from_index(index[0])
pose_path_r = self.pose_path_from_index(index[1])
# parse image name
video_id = index[0].split("/")[0]
return {'image': image_path_l,
'image_right': image_path_r,
'depth': depth_path_l,
'depth_right': depth_path_r,
'pose': pose_path_l,
'pose_right': pose_path_r,
'video_id': video_id,
'flipped': False}
def _process_label_image(self, label_image):
"""
change label image to label index
"""
class_colors = self._class_colors
width = label_image.shape[1]
height = label_image.shape[0]
label_index = np.zeros((height, width), dtype=np.float32)
# label image is in BGR order
index = label_image[:,:,2] + 256*label_image[:,:,1] + 256*256*label_image[:,:,0]
for i in xrange(len(class_colors)):
color = class_colors[i]
ind = color[0] + 256*color[1] + 256*256*color[2]
I = np.where(index == ind)
label_index[I] = i
return label_index
def labels_to_image(self, im, labels):
class_colors = self._class_colors
height = labels.shape[0]
width = labels.shape[1]
image_r = np.zeros((height, width), dtype=np.float32)
image_g = np.zeros((height, width), dtype=np.float32)
image_b = np.zeros((height, width), dtype=np.float32)
for i in xrange(len(class_colors)):
color = class_colors[i]
I = np.where(labels == i)
image_r[I] = color[0]
image_g[I] = color[1]
image_b[I] = color[2]
image = np.stack((image_r, image_g, image_b), axis=-1)
return image.astype(np.uint8)
def evaluate_segmentations(self, segmentations, output_dir):
print 'evaluating segmentations'
# compute histogram
n_cl = self.num_classes
hist = np.zeros((n_cl, n_cl))
# make image dir
image_dir = os.path.join(output_dir, 'images')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
# make matlab result dir
import scipy.io
mat_dir = os.path.join(output_dir, 'mat')
if not os.path.exists(mat_dir):
os.makedirs(mat_dir)
# for each image
for im_ind, index in enumerate(self.image_index):
# read ground truth labels
im = cv2.imread(self.label_path_from_index(index), cv2.IMREAD_UNCHANGED)
gt_labels = im.astype(np.float32)
# predicated labels
sg_labels = segmentations[im_ind]['labels']
hist += self.fast_hist(gt_labels.flatten(), sg_labels.flatten(), n_cl)
'''
# label image
rgba = cv2.imread(self.image_path_from_index(index), cv2.IMREAD_UNCHANGED)
image = rgba[:,:,:3]
alpha = rgba[:,:,3]
I = np.where(alpha == 0)
image[I[0], I[1], :] = 255
label_image = self.labels_to_image(image, sg_labels)
# save image
filename = os.path.join(image_dir, '%04d.png' % im_ind)
print filename
cv2.imwrite(filename, label_image)
'''
'''
# save matlab result
labels = {'labels': sg_labels}
filename = os.path.join(mat_dir, '%04d.mat' % im_ind)
print filename
scipy.io.savemat(filename, labels)
#'''
# overall accuracy
acc = np.diag(hist).sum() / hist.sum()
print 'overall accuracy', acc
# per-class accuracy
acc = np.diag(hist) / hist.sum(1)
print 'mean accuracy', np.nanmean(acc)
# per-class IU
print 'per-class IU'
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
for i in range(n_cl):
print '{} {}'.format(self._classes[i], iu[i])
print 'mean IU', np.nanmean(iu)
freq = hist.sum(1) / hist.sum()
print 'fwavacc', (freq[freq > 0] * iu[freq > 0]).sum()
filename = os.path.join(output_dir, 'segmentation.txt')
with open(filename, 'wt') as f:
for i in range(n_cl):
f.write('{:f}\n'.format(iu[i]))
filename = os.path.join(output_dir, 'confusion_matrix.txt')
with open(filename, 'wt') as f:
for i in range(n_cl):
for j in range(n_cl):
f.write('{:f} '.format(hist[i, j]))
f.write('\n')
if __name__ == '__main__':
d = datasets.lov('train')
res = d.roidb
from IPython import embed; embed()
import os
file = open('train.txt', 'w+')
for folder in os.listdir('.'):
try:
n_files = len([name for name in os.listdir(folder)]) / 3
for i in range(n_files):
file.write(folder + "/" + str(i).zfill(6) + "\n")
except:
pass
for folder in os.listdir('.'):
try:
for cur_file in os.listdir(folder):
if cur_file.count('color') != 0:
file.write(folder + "/" + cur_file.split("_")[0] + "\n")
except:
pass
| {
"repo_name": "daweim0/Just-some-image-features",
"path": "lib/datasets/lov_synthetic.py",
"copies": "1",
"size": "12835",
"license": "mit",
"hash": 1894668566564073700,
"line_mean": 35.776504298,
"line_max": 148,
"alpha_frac": 0.5382937281,
"autogenerated": false,
"ratio": 3.4364123159303883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9451455139370473,
"avg_score": 0.00465018093198314,
"num_lines": 349
} |
__author__ = 'Dawei'
import math
longest = 1
longestStart = 1
computed = {1:1}
def getCount(i):
number = i
count = 0
while True:
if number in computed:
return computed[number]
if number % 2 == 0:
lognumber = math.log2(number)
intlognumber = int(lognumber)
if lognumber == intlognumber:
count += lognumber + 1
break
else:
divide = math.pow(2, int(lognumber))
while number % divide != 0:
divide /= 2
number = (number / divide)*3+1
count += int(math.log2(divide))+1
else:
number = number * 3 + 1
count += 1
computed[i] = count
return count
for i in range(500000, 1000000):
if (i-1)%3 == 0 and (i-1)%6!=0:
continue
lognumber = math.log2(i)
intlognumber = int(lognumber)
if lognumber == intlognumber:
count = lognumber+1
else:
count = getCount(i)
if count > longest:
longest = count
longestStart = i
print(longest)
print(longestStart)
| {
"repo_name": "fresky/ProjectEulerSolution",
"path": "014.py",
"copies": "1",
"size": "1148",
"license": "mit",
"hash": -5321454442302309000,
"line_mean": 21.0769230769,
"line_max": 52,
"alpha_frac": 0.5069686411,
"autogenerated": false,
"ratio": 3.6913183279742765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4698286969074277,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Dawei'
bignumber = '''
37107287533902102798797998220837590246510135740250
46376937677490009712648124896970078050417018260538
74324986199524741059474233309513058123726617309629
91942213363574161572522430563301811072406154908250
23067588207539346171171980310421047513778063246676
89261670696623633820136378418383684178734361726757
28112879812849979408065481931592621691275889832738
44274228917432520321923589422876796487670272189318
47451445736001306439091167216856844588711603153276
70386486105843025439939619828917593665686757934951
62176457141856560629502157223196586755079324193331
64906352462741904929101432445813822663347944758178
92575867718337217661963751590579239728245598838407
58203565325359399008402633568948830189458628227828
80181199384826282014278194139940567587151170094390
35398664372827112653829987240784473053190104293586
86515506006295864861532075273371959191420517255829
71693888707715466499115593487603532921714970056938
54370070576826684624621495650076471787294438377604
53282654108756828443191190634694037855217779295145
36123272525000296071075082563815656710885258350721
45876576172410976447339110607218265236877223636045
17423706905851860660448207621209813287860733969412
81142660418086830619328460811191061556940512689692
51934325451728388641918047049293215058642563049483
62467221648435076201727918039944693004732956340691
15732444386908125794514089057706229429197107928209
55037687525678773091862540744969844508330393682126
18336384825330154686196124348767681297534375946515
80386287592878490201521685554828717201219257766954
78182833757993103614740356856449095527097864797581
16726320100436897842553539920931837441497806860984
48403098129077791799088218795327364475675590848030
87086987551392711854517078544161852424320693150332
59959406895756536782107074926966537676326235447210
69793950679652694742597709739166693763042633987085
41052684708299085211399427365734116182760315001271
65378607361501080857009149939512557028198746004375
35829035317434717326932123578154982629742552737307
94953759765105305946966067683156574377167401875275
88902802571733229619176668713819931811048770190271
25267680276078003013678680992525463401061632866526
36270218540497705585629946580636237993140746255962
24074486908231174977792365466257246923322810917141
91430288197103288597806669760892938638285025333403
34413065578016127815921815005561868836468420090470
23053081172816430487623791969842487255036638784583
11487696932154902810424020138335124462181441773470
63783299490636259666498587618221225225512486764533
67720186971698544312419572409913959008952310058822
95548255300263520781532296796249481641953868218774
76085327132285723110424803456124867697064507995236
37774242535411291684276865538926205024910326572967
23701913275725675285653248258265463092207058596522
29798860272258331913126375147341994889534765745501
18495701454879288984856827726077713721403798879715
38298203783031473527721580348144513491373226651381
34829543829199918180278916522431027392251122869539
40957953066405232632538044100059654939159879593635
29746152185502371307642255121183693803580388584903
41698116222072977186158236678424689157993532961922
62467957194401269043877107275048102390895523597457
23189706772547915061505504953922979530901129967519
86188088225875314529584099251203829009407770775672
11306739708304724483816533873502340845647058077308
82959174767140363198008187129011875491310547126581
97623331044818386269515456334926366572897563400500
42846280183517070527831839425882145521227251250327
55121603546981200581762165212827652751691296897789
32238195734329339946437501907836945765883352399886
75506164965184775180738168837861091527357929701337
62177842752192623401942399639168044983993173312731
32924185707147349566916674687634660915035914677504
99518671430235219628894890102423325116913619626622
73267460800591547471830798392868535206946944540724
76841822524674417161514036427982273348055556214818
97142617910342598647204516893989422179826088076852
87783646182799346313767754307809363333018982642090
10848802521674670883215120185883543223812876952786
71329612474782464538636993009049310363619763878039
62184073572399794223406235393808339651327408011116
66627891981488087797941876876144230030984490851411
60661826293682836764744779239180335110989069790714
85786944089552990653640447425576083659976645795096
66024396409905389607120198219976047599490197230297
64913982680032973156037120041377903785566085089252
16730939319872750275468906903707539413042652315011
94809377245048795150954100921645863754710598436791
78639167021187492431995700641917969777599028300699
15368713711936614952811305876380278410754449733078
40789923115535562561142322423255033685442488917353
44889911501440648020369068063960672322193204149535
41503128880339536053299340368006977710650566631954
81234880673210146739058568557934581403627822703280
82616570773948327592232845941706525094512325230608
22918802058777319719839450180888072429661980811197
77158542502016545090413245809786882778948721859617
72107838435069186155435662884062257473692284509516
20849603980134001723930671666823555245252804609722
53503534226472524250874054075591789781264330331690
'''
lines = bignumber.split()
singleSum = 0;
result = ''
for i in range(49, -1, -1):
for j in range(len(lines)):
singleSum += int(lines[j][i])
result += repr(singleSum%10)
singleSum = int(singleSum/10)
while singleSum > 0:
result += repr(singleSum%10)
singleSum = int(singleSum/10)
listResult = list(result)
listResult.reverse()
print(''.join(listResult[0:10]))
# print(len(bignumber.split("\r\n"))) | {
"repo_name": "fresky/ProjectEulerSolution",
"path": "013.py",
"copies": "1",
"size": "5581",
"license": "mit",
"hash": -1193688646553723100,
"line_mean": 40.3481481481,
"line_max": 50,
"alpha_frac": 0.9485755241,
"autogenerated": false,
"ratio": 2.3234804329725227,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37720559570725226,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Dawei'
words = {1:'one',2:'two',3:'three',4:'four',5:'five',6:'six',7:'seven',8:'eight',9:'nine',10:'ten'
,11:'eleven',12:'twelve', 13:'thirteen', 14:'fourteen', 15:'fifteen'
, 16:'sixteen',17:'seventeen',18:'eighteen',19:'nineteen', 20:'twenty'}
twowords = {2:'twenty'
, 3:'thirty', 4:'forty', 5:'fifty',6:'sixty',7:'seventy',8:'eighty',9:'ninety'}
threewords = {100:'hundred'}
fourwords = {1000:'thousand'}
def getonewordcount(n):
if n == 0:
return 0
return len(words[n])
def gettwowordcount(n):
return getonewordcount(n%10)+ len(twowords[n//10])
def getthreewordcount(n):
result = getonewordcount(n//100) + len('hundred')
if n%100 != 0:
result += len('and')
if n%100 <=20:
return result + getonewordcount(n%100)
else:
return result + gettwowordcount(n%100)
def getwordcount(n):
if n<=20:
return getonewordcount(n)
elif n < 100:
return gettwowordcount(n)
else:
return getthreewordcount(n)
result = 0
for i in range(1, 1000):
result += getwordcount(i)
result += len('onethousand')
print(result)
result = 0
for i in range(1,6):
result += getwordcount(i)
print(result) | {
"repo_name": "fresky/ProjectEulerSolution",
"path": "017.py",
"copies": "1",
"size": "1208",
"license": "mit",
"hash": -306416977183477060,
"line_mean": 23.6734693878,
"line_max": 98,
"alpha_frac": 0.6100993377,
"autogenerated": false,
"ratio": 2.6666666666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8637423082476825,
"avg_score": 0.02786858437796826,
"num_lines": 49
} |
__author__ = 'dbaker'
import hashlib
import optparse
import os
from shutil import copyfile
def hashfile(filePath):
sha1 = hashlib.sha1()
f = open(filePath, 'rb')
try:
sha1.update(f.read())
finally:
f.close()
return sha1.hexdigest()
parser = optparse.OptionParser()
parser.add_option('-o', '--origin',
action="store", dest="origin",
help="the origin of the copy items")
parser.add_option('-d', '--destination',
action="store", dest="destination",
help="the destination of the copy items")
options, args = parser.parse_args()
for root, directories, files in os.walk(options.origin):
for file in files:
originFilePath = root + "\\" + file
destinationFilePath = options.destination + root.split(options.origin)[-1] + "\\" + file
if not os.path.exists(os.path.dirname(destinationFilePath)):
os.makedirs(os.path.dirname(destinationFilePath))
try:
if os.path.isfile(originFilePath) and not os.path.isfile(destinationFilePath):
print "File does not exist. Copying " + file + " to " + destinationFilePath
copyfile(originFilePath, destinationFilePath)
elif hashfile(originFilePath) != hashfile(destinationFilePath):
print "Files differ. Copying " + file + " to " + destinationFilePath
copyfile(originFilePath, destinationFilePath)
except:
continue
| {
"repo_name": "daniebker/PyNewcopy",
"path": "CopyNewFiles.py",
"copies": "1",
"size": "1539",
"license": "mit",
"hash": 828405050522693200,
"line_mean": 30.4081632653,
"line_max": 96,
"alpha_frac": 0.604288499,
"autogenerated": false,
"ratio": 4.335211267605634,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5439499766605633,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dborysenko'
from pysphere import VIServer
from models import Vcenter
import ssl
def connect_vcenter(vcenter):
#default_context = ssl._create_default_https_context
server = VIServer()
try:
#ssl._create_default_https_context = ssl._create_unverified_context
server.connect(vcenter.host_name, vcenter.user_name, vcenter.user_password)
return server
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
def get_templates(vcenter):
vc = connect_vcenter(vcenter)
template_list = vc.get_registered_vms(advanced_filters={'config.template': True})
template_names = []
for template in template_list:
name = template.split()[1].split('/')[0]
template_names.append(name)
vc.disconnect()
return template_names
def get_vms(vcenter):
vc = connect_vcenter(vcenter)
vm_list = vc.get_registered_vms(advanced_filters={'config.template': False})
vm_names = []
for vm in vm_list:
name = vm.split()[1].split('/')[0]
vm_names.append(name)
vc.disconnect()
return vm_names
def get_vcenters():
result = []
all_vcenters = Vcenter.objects.all()
for vcenter in all_vcenters:
result.append((vcenter._get_pk_val(), vcenter.host_name))
return result
| {
"repo_name": "borisensx/ansiblePower",
"path": "vmware/vmvc.py",
"copies": "1",
"size": "1353",
"license": "mit",
"hash": 1349740994675121700,
"line_mean": 28.4130434783,
"line_max": 85,
"alpha_frac": 0.6614929786,
"autogenerated": false,
"ratio": 3.5793650793650795,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47408580579650794,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dcard'
from optparse import OptionParser
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..preprocessing import label_reader
def main():
usage = "%prog dataset filename.csv"
parser = OptionParser(usage=usage)
(options, args) = parser.parse_args()
find_most_errors(args[0], args[1])
def find_most_errors(dataset, filename):
predicted = pd.read_csv(filename, index_col=0)
df_labels = label_reader.get_dataset_labels(dataset)
false_positives = []
false_negatives = []
for i in predicted.index:
false_positives.append(np.sum(np.maximum(predicted.loc[i, :] - df_labels.loc[i, :], 0)))
false_negatives.append(np.sum(np.maximum(df_labels.loc[i, :] - predicted.loc[i, :], 0)))
print "false positives"
order = np.argsort(false_positives)
for i in range(1, 11):
print false_positives[order[-i]], predicted.index[order[-i]]
plt.hist(false_positives)
plt.show()
print "false negatives"
order = np.argsort(false_negatives)
for i in range(1, 11):
print false_negatives[order[-i]], predicted.index[order[-i]]
plt.hist(false_negatives)
plt.show()
if __name__ == '__main__':
main()
| {
"repo_name": "dallascard/guac",
"path": "core/evaluation/find_most_errors.py",
"copies": "1",
"size": "1235",
"license": "apache-2.0",
"hash": -6092429443715663000,
"line_mean": 24.2040816327,
"line_max": 96,
"alpha_frac": 0.6510121457,
"autogenerated": false,
"ratio": 3.3651226158038146,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45161347615038144,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dcl9'
from render import render_template
import argparse
import yaml
def generate_preferences_track_dict(metadata):
d = dict()
d['track_name'] = metadata['track_name']
d['bigbed_url'] = metadata['track_filename']
d['short_label'] = '{} vs. {}'.format(metadata['proteins'][0], metadata['proteins'][1])
d['long_label'] = 'Binding site preferences of {}(Red) vs. {}(Blue); iMADS models {} and {}'.format(
metadata['proteins'][0], metadata['proteins'][1], metadata['serial_numbers'][0], metadata['serial_numbers'][1]
)
d['type'] = 'bigBed 9 .'
d['spectrum'] = 'off'
d['itemRgb'] = 'on'
return d
def generate_predictions_track_dict(metadata):
d = dict()
d['track_name'] = metadata['track_name']
d['bigbed_url'] = metadata['track_filename']
d['short_label'] = metadata['protein']
d['long_label'] = 'Predicted {} binding sites (site width = {}); iMADS model {}'.format(metadata['protein'], metadata['width'], metadata['serial_number'])
d['type'] = 'bigBed 5 .'
d['spectrum'] = 'on'
d['itemRgb'] = 'off'
return d
def render_tracks(assembly, mode, metadata_file):
obj = yaml.load(metadata_file)
# Just pull out the assembly ones
if mode == 'preferences':
generate_track_dict = generate_preferences_track_dict
else:
generate_track_dict = generate_predictions_track_dict
tracks = [generate_track_dict(x) for x in obj if x['assembly'] == assembly]
trackdb = {'tracks': tracks}
render_template(trackdb, 'trackDb')
def main():
parser = argparse.ArgumentParser(description='Render trackDb.txt')
parser.add_argument('--assembly')
parser.add_argument('--mode')
parser.add_argument('metadata_file', type=argparse.FileType('r'))
args = parser.parse_args()
render_tracks(args.assembly, args.mode, args.metadata_file)
if __name__ == '__main__':
main()
| {
"repo_name": "Duke-GCB/TrackHubGenerator",
"path": "python/render/render_tracks.py",
"copies": "1",
"size": "1902",
"license": "mit",
"hash": -1480492012125097700,
"line_mean": 35.5769230769,
"line_max": 158,
"alpha_frac": 0.6366982124,
"autogenerated": false,
"ratio": 3.414721723518851,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4551419935918851,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dc'
import uuid
import random
import tornado.web
import sae.kvdb
kv = sae.kvdb.KVClient()
op_set = ('up', 'down', 'left', 'right')
status_set = ('start', 'running', 'finish', 'failed')
main_info = '''<html>
<head>
<title>2048judge</title>
</head>
<body>
<p>welcome to 2048 ai judge</p>
<p>first use</p>
<samp>GET /judge/</samp>
<p>to get table and uid. then use</p>
<samp>GET /judge/?uid=XXXXXXXX&op=left</samp>
<p>to do op in the table, it will return your op, the new table and the status.</p>
<p>op should in ('up', 'down', 'left', 'right')</p>
<p>the return status will in ('start', 'running','finish')</p>
<p>when you receive 'finish', it means you got 2048.</p>
<p>use</p>
<samp>GET /log/?uid=XXXXXXXX</samp>
<p>to get full log</p>
</body>
</html>
'''
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write(main_info)
class JudgeHandler(tornado.web.RequestHandler):
def get(self):
op = self.get_argument('op', None)
uid = str(self.get_argument('uid', None))
if uid and op:
# add op
assert op in op_set, 'op error'
log = kv.get(uid)
assert log, 'uid error'
assert log[-1]['status'] != 'finish', 'game finished!'
table = log[-1]['table']
push_line(table, op)
sum_line(table, op)
push_line(table, op)
add_number(table)
status = check_status(table)
log.append({'table': table, 'op': op, 'status': status})
kv.set(uid, log)
self.write(log[-1])
else:
# first round
uid = uuid.uuid1().hex
kv.set(uid, [{'table': gen_table(), 'op': None, 'status': 'start'}])
self.write({'uid': uid, 'log': kv.get(uid)})
class LogHandler(tornado.web.RequestHandler):
def get(self):
uid = str(self.get_argument('uid', None))
if uid:
log = kv.get(uid)
assert log, 'uid error'
self.write({'uid': uid, 'log': log})
def gen_table():
table = [[0 for j in range(4)] for i in range(4)]
add_number(table, 2)
return table
def op_table(table, op):
assert op in op_set
if op == 'up':
pass
add_number(table)
return table
def push_line(table, op):
for i in range(4):
if op == 'up':
line = [table[j][i] for j in range(4)]
for j in range(line.count(0)): line.remove(0)
for j in range(4 - len(line)): line.append(0)
for j in range(4): table[j][i] = line[j]
elif op == 'down':
line = [table[j][i] for j in range(4)]
for j in range(line.count(0)): line.remove(0)
for j in range(4 - len(line)): line.insert(0, 0)
for j in range(4): table[j][i] = line[j]
elif op == 'left':
line = [table[i][j] for j in range(4)]
for j in range(line.count(0)): line.remove(0)
for j in range(4 - len(line)): line.append(0)
for j in range(4): table[i][j] = line[j]
elif op == 'right':
line = [table[i][j] for j in range(4)]
for j in range(line.count(0)): line.remove(0)
for j in range(4 - len(line)): line.insert(0, 0)
for j in range(4): table[i][j] = line[j]
def sum_line(table, op):
for i in range(4):
if op == 'up':
for j in range(3):
if table[j][i] == table[j + 1][i]:
table[j][i] *= 2
table[j + 1][i] = 0
elif op == 'down':
for j in range(3, 0, -1):
if table[j][i] == table[j - 1][i]:
table[j][i] *= 2
table[j - 1][i] = 0
elif op == 'left':
for j in range(3):
if table[i][j] == table[i][j + 1]:
table[i][j] *= 2
table[i][j + 1] = 0
elif op == 'right':
for j in range(3, 0, -1):
if table[i][j] == table[i][j - 1]:
table[i][j] *= 2
table[i][j - 1] = 0
def check_status(table):
for i in range(4):
for j in range(4):
if table[i][j] == 2048:
return 'finish'
filled = True
for i in range(4):
for j in range(4):
if table[i][j] == 0:
filled = False
if not filled:
return 'running'
for i in range(4):
for j in range(3):
if table[i][j] == table[i][j + 1]:
return 'running'
if table[j][i] == table[j + 1][i]:
return 'running'
return 'failed'
def add_number(table, num=1):
select = []
# find empty
for i in range(4):
for j in range(4):
if table[i][j] == 0:
select.append([i, j])
# choice pos
if len(select) < num:
return
select = random.sample(select, num)
# set to 2
for i, j in select:
# table[i][j] = random.choice([2, 4])
table[i][j] = 2
def pretty_print(table):
for i in range(4):
for j in range(4):
print table[i][j],
print
| {
"repo_name": "dccrazyboy/2048judge",
"path": "handler.py",
"copies": "1",
"size": "5231",
"license": "mit",
"hash": 9170223502177002000,
"line_mean": 26.5315789474,
"line_max": 83,
"alpha_frac": 0.4826992927,
"autogenerated": false,
"ratio": 3.2350030921459494,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42177023848459494,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dcowden'
"""
Tests for CadQuery Selectors
These tests do not construct any solids, they test only selectors that query
an existing solid
"""
import math
import unittest,sys
import os.path
#my modules
from tests import BaseTest,makeUnitCube,makeUnitSquareWire
from cadquery import *
from cadquery import selectors
class TestCQSelectors(BaseTest):
def testWorkplaneCenter(self):
"Test Moving workplane center"
s = Workplane(Plane.XY())
#current point and world point should be equal
self.assertTupleAlmostEquals((0.0,0.0,0.0),s.plane.origin.toTuple(),3)
#move origin and confirm center moves
s.center(-2.0,-2.0)
#current point should be 0,0, but
self.assertTupleAlmostEquals((-2.0,-2.0,0.0),s.plane.origin.toTuple(),3)
def testVertices(self):
t = makeUnitSquareWire() # square box
c = CQ(t)
self.assertEqual(4,c.vertices().size() )
self.assertEqual(4,c.edges().size() )
self.assertEqual(0,c.vertices().edges().size() ) #no edges on any vertices
self.assertEqual(4,c.edges().vertices().size() ) #but selecting all edges still yields all vertices
self.assertEqual(1,c.wires().size()) #just one wire
self.assertEqual(0,c.faces().size())
self.assertEqual(0,c.vertices().faces().size()) #odd combinations all work but yield no results
self.assertEqual(0,c.edges().faces().size())
self.assertEqual(0,c.edges().vertices().faces().size())
def testEnd(self):
c = CQ(makeUnitSquareWire())
self.assertEqual(4,c.vertices().size() ) #4 because there are 4 vertices
self.assertEqual(1,c.vertices().end().size() ) #1 because we started with 1 wire
def testAll(self):
"all returns a list of CQ objects, so that you can iterate over them individually"
c = CQ(makeUnitCube())
self.assertEqual(6,c.faces().size())
self.assertEqual(6,len(c.faces().all()))
self.assertEqual(4,c.faces().all()[0].vertices().size() )
def testFirst(self):
c = CQ( makeUnitCube())
self.assertEqual(type(c.vertices().first().val()),Vertex)
self.assertEqual(type(c.vertices().first().first().first().val()),Vertex)
def testCompounds(self):
c = CQ(makeUnitSquareWire())
self.assertEqual(0,c.compounds().size() )
self.assertEqual(0,c.shells().size() )
self.assertEqual(0,c.solids().size() )
def testSolid(self):
c = CQ(makeUnitCube())
#make sure all the counts are right for a cube
self.assertEqual(1,c.solids().size() )
self.assertEqual(6,c.faces().size() )
self.assertEqual(12,c.edges().size())
self.assertEqual(8,c.vertices().size() )
self.assertEqual(0,c.compounds().size())
#now any particular face should result in 4 edges and four vertices
self.assertEqual(4,c.faces().first().edges().size() )
self.assertEqual(1,c.faces().first().size() )
self.assertEqual(4,c.faces().first().vertices().size() )
self.assertEqual(4,c.faces().last().edges().size() )
def testFaceTypesFilter(self):
"Filters by face type"
c = CQ(makeUnitCube())
self.assertEqual(c.faces().size(), c.faces('%PLANE').size())
self.assertEqual(c.faces().size(), c.faces('%plane').size())
self.assertEqual(0, c.faces('%sphere').size())
self.assertEqual(0, c.faces('%cone').size())
self.assertEqual(0, c.faces('%SPHERE').size())
def testPerpendicularDirFilter(self):
c = CQ(makeUnitCube())
self.assertEqual(8,c.edges("#Z").size() ) #8 edges are perp. to z
self.assertEqual(4, c.faces("#Z").size()) #4 faces are perp to z too!
def testFaceDirFilter(self):
c = CQ(makeUnitCube())
#a cube has one face in each direction
self.assertEqual(1, c.faces("+Z").size())
self.assertEqual(1, c.faces("-Z").size())
self.assertEqual(1, c.faces("+X").size())
self.assertEqual(1, c.faces("X").size()) #should be same as +X
self.assertEqual(1, c.faces("-X").size())
self.assertEqual(1, c.faces("+Y").size())
self.assertEqual(1, c.faces("-Y").size())
self.assertEqual(0, c.faces("XY").size())
def testParallelPlaneFaceFilter(self):
c = CQ(makeUnitCube())
#faces parallel to Z axis
self.assertEqual(2, c.faces("|Z").size())
#TODO: provide short names for ParallelDirSelector
self.assertEqual(2, c.faces(selectors.ParallelDirSelector(Vector((0,0,1)))).size()) #same thing as above
self.assertEqual(2, c.faces(selectors.ParallelDirSelector(Vector((0,0,-1)))).size()) #same thing as above
#just for fun, vertices on faces parallel to z
self.assertEqual(8, c.faces("|Z").vertices().size())
def testParallelEdgeFilter(self):
c = CQ(makeUnitCube())
self.assertEqual(4, c.edges("|Z").size())
self.assertEqual(4, c.edges("|X").size())
self.assertEqual(4, c.edges("|Y").size())
def testMaxDistance(self):
c = CQ(makeUnitCube())
#should select the topmost face
self.assertEqual(1, c.faces(">Z").size())
self.assertEqual(4, c.faces(">Z").vertices().size())
#vertices should all be at z=1, if this is the top face
self.assertEqual(4, len(c.faces(">Z").vertices().vals() ))
for v in c.faces(">Z").vertices().vals():
self.assertAlmostEqual(1.0,v.Z,3)
# test the case of multiple objects at the same distance
el = c.edges("<Z").vals()
self.assertEqual(4, len(el))
def testMinDistance(self):
c = CQ(makeUnitCube())
#should select the topmost face
self.assertEqual(1, c.faces("<Z").size())
self.assertEqual(4, c.faces("<Z").vertices().size())
#vertices should all be at z=1, if this is the top face
self.assertEqual(4, len(c.faces("<Z").vertices().vals() ))
for v in c.faces("<Z").vertices().vals():
self.assertAlmostEqual(0.0,v.Z,3)
# test the case of multiple objects at the same distance
el = c.edges("<Z").vals()
self.assertEqual(4, len(el))
def testNthDistance(self):
c = Workplane('XY').pushPoints([(-2,0),(2,0)]).box(1,1,1)
#2nd face
val = c.faces(selectors.DirectionNthSelector(Vector(1,0,0),1)).val()
self.assertAlmostEqual(val.Center().x,-1.5)
#2nd face with inversed selection vector
val = c.faces(selectors.DirectionNthSelector(Vector(-1,0,0),1)).val()
self.assertAlmostEqual(val.Center().x,1.5)
#2nd last face
val = c.faces(selectors.DirectionNthSelector(Vector(1,0,0),-2)).val()
self.assertAlmostEqual(val.Center().x,1.5)
#Last face
val = c.faces(selectors.DirectionNthSelector(Vector(1,0,0),-1)).val()
self.assertAlmostEqual(val.Center().x,2.5)
#check if the selected face if normal to the specified Vector
self.assertAlmostEqual(val.normalAt().cross(Vector(1,0,0)).Length,0.0)
#repeat the test using string based selector
#2nd face
val = c.faces('>(1,0,0)[1]').val()
self.assertAlmostEqual(val.Center().x,-1.5)
#2nd face with inversed selection vector
val = c.faces('>(-1,0,0)[1]').val()
self.assertAlmostEqual(val.Center().x,1.5)
#2nd last face
val = c.faces('>X[-2]').val()
self.assertAlmostEqual(val.Center().x,1.5)
#Last face
val = c.faces('>X[-1]').val()
self.assertAlmostEqual(val.Center().x,2.5)
#check if the selected face if normal to the specified Vector
self.assertAlmostEqual(val.normalAt().cross(Vector(1,0,0)).Length,0.0)
def testNearestTo(self):
c = CQ(makeUnitCube())
#nearest vertex to origin is (0,0,0)
t = (0.1,0.1,0.1)
v = c.vertices(selectors.NearestToPointSelector(t)).vals()[0]
self.assertTupleAlmostEquals((0.0,0.0,0.0),(v.X,v.Y,v.Z),3)
t = (0.1,0.1,0.2)
#nearest edge is the vertical side edge, 0,0,0 -> 0,0,1
e = c.edges(selectors.NearestToPointSelector(t)).vals()[0]
v = c.edges(selectors.NearestToPointSelector(t)).vertices().vals()
self.assertEqual(2,len(v))
#nearest solid is myself
s = c.solids(selectors.NearestToPointSelector(t)).vals()
self.assertEqual(1,len(s))
def testBox(self):
c = CQ(makeUnitCube())
# test vertice selection
test_data_vertices = [
# box point0, box point1, selected vertice
((0.9, 0.9, 0.9), (1.1, 1.1, 1.1), (1.0, 1.0, 1.0)),
((-0.1, 0.9, 0.9), (0.9, 1.1, 1.1), (0.0, 1.0, 1.0)),
((-0.1, -0.1, 0.9), (0.1, 0.1, 1.1), (0.0, 0.0, 1.0)),
((-0.1, -0.1, -0.1), (0.1, 0.1, 0.1), (0.0, 0.0, 0.0)),
((0.9, -0.1, -0.1), (1.1, 0.1, 0.1), (1.0, 0.0, 0.0)),
((0.9, 0.9, -0.1), (1.1, 1.1, 0.1), (1.0, 1.0, 0.0)),
((-0.1, 0.9, -0.1), (0.1, 1.1, 0.1), (0.0, 1.0, 0.0)),
((0.9, -0.1, 0.9), (1.1, 0.1, 1.1), (1.0, 0.0, 1.0))
]
for d in test_data_vertices:
vl = c.vertices(selectors.BoxSelector(d[0], d[1])).vals()
self.assertEqual(1, len(vl))
v = vl[0]
self.assertTupleAlmostEquals(d[2], (v.X, v.Y, v.Z), 3)
# this time box points are swapped
vl = c.vertices(selectors.BoxSelector(d[1], d[0])).vals()
self.assertEqual(1, len(vl))
v = vl[0]
self.assertTupleAlmostEquals(d[2], (v.X, v.Y, v.Z), 3)
# test multiple vertices selection
vl = c.vertices(selectors.BoxSelector((-0.1, -0.1, 0.9),(0.1, 1.1, 1.1))).vals()
self.assertEqual(2, len(vl))
vl = c.vertices(selectors.BoxSelector((-0.1, -0.1, -0.1),(0.1, 1.1, 1.1))).vals()
self.assertEqual(4, len(vl))
# test edge selection
test_data_edges = [
# box point0, box point1, edge center
((0.4, -0.1, -0.1), (0.6, 0.1, 0.1), (0.5, 0.0, 0.0)),
((-0.1, -0.1, 0.4), (0.1, 0.1, 0.6), (0.0, 0.0, 0.5)),
((0.9, 0.9, 0.4), (1.1, 1.1, 0.6), (1.0, 1.0, 0.5)),
((0.4, 0.9, 0.9), (0.6, 1.1, 1.1,), (0.5, 1.0, 1.0))
]
for d in test_data_edges:
el = c.edges(selectors.BoxSelector(d[0], d[1])).vals()
self.assertEqual(1, len(el))
ec = el[0].Center()
self.assertTupleAlmostEquals(d[2], (ec.x, ec.y, ec.z), 3)
# test again by swapping box points
el = c.edges(selectors.BoxSelector(d[1], d[0])).vals()
self.assertEqual(1, len(el))
ec = el[0].Center()
self.assertTupleAlmostEquals(d[2], (ec.x, ec.y, ec.z), 3)
# test multiple edge selection
el = c.edges(selectors.BoxSelector((-0.1, -0.1, -0.1), (0.6, 0.1, 0.6))).vals()
self.assertEqual(2, len(el))
el = c.edges(selectors.BoxSelector((-0.1, -0.1, -0.1), (1.1, 0.1, 0.6))).vals()
self.assertEqual(3, len(el))
# test face selection
test_data_faces = [
# box point0, box point1, face center
((0.4, -0.1, 0.4), (0.6, 0.1, 0.6), (0.5, 0.0, 0.5)),
((0.9, 0.4, 0.4), (1.1, 0.6, 0.6), (1.0, 0.5, 0.5)),
((0.4, 0.4, 0.9), (0.6, 0.6, 1.1), (0.5, 0.5, 1.0)),
((0.4, 0.4, -0.1), (0.6, 0.6, 0.1), (0.5, 0.5, 0.0))
]
for d in test_data_faces:
fl = c.faces(selectors.BoxSelector(d[0], d[1])).vals()
self.assertEqual(1, len(fl))
fc = fl[0].Center()
self.assertTupleAlmostEquals(d[2], (fc.x, fc.y, fc.z), 3)
# test again by swapping box points
fl = c.faces(selectors.BoxSelector(d[1], d[0])).vals()
self.assertEqual(1, len(fl))
fc = fl[0].Center()
self.assertTupleAlmostEquals(d[2], (fc.x, fc.y, fc.z), 3)
# test multiple face selection
fl = c.faces(selectors.BoxSelector((0.4, 0.4, 0.4), (0.6, 1.1, 1.1))).vals()
self.assertEqual(2, len(fl))
fl = c.faces(selectors.BoxSelector((0.4, 0.4, 0.4), (1.1, 1.1, 1.1))).vals()
self.assertEqual(3, len(fl))
# test boundingbox option
el = c.edges(selectors.BoxSelector((-0.1, -0.1, -0.1), (1.1, 0.1, 0.6), True)).vals()
self.assertEqual(1, len(el))
fl = c.faces(selectors.BoxSelector((0.4, 0.4, 0.4), (1.1, 1.1, 1.1), True)).vals()
self.assertEqual(0, len(fl))
fl = c.faces(selectors.BoxSelector((-0.1, 0.4, -0.1), (1.1, 1.1, 1.1), True)).vals()
self.assertEqual(1, len(fl))
def testAndSelector(self):
c = CQ(makeUnitCube())
S = selectors.StringSyntaxSelector
BS = selectors.BoxSelector
el = c.edges(selectors.AndSelector(S('|X'), BS((-2,-2,0.1), (2,2,2)))).vals()
self.assertEqual(2, len(el))
# test 'and' (intersection) operator
el = c.edges(S('|X') & BS((-2,-2,0.1), (2,2,2))).vals()
self.assertEqual(2, len(el))
# test using extended string syntax
v = c.vertices(">X and >Y").vals()
self.assertEqual(2, len(v))
def testSumSelector(self):
c = CQ(makeUnitCube())
S = selectors.StringSyntaxSelector
fl = c.faces(selectors.SumSelector(S(">Z"), S("<Z"))).vals()
self.assertEqual(2, len(fl))
el = c.edges(selectors.SumSelector(S("|X"), S("|Y"))).vals()
self.assertEqual(8, len(el))
# test the sum operator
fl = c.faces(S(">Z") + S("<Z")).vals()
self.assertEqual(2, len(fl))
el = c.edges(S("|X") + S("|Y")).vals()
self.assertEqual(8, len(el))
# test using extended string syntax
fl = c.faces(">Z or <Z").vals()
self.assertEqual(2, len(fl))
el = c.edges("|X or |Y").vals()
self.assertEqual(8, len(el))
def testSubtractSelector(self):
c = CQ(makeUnitCube())
S = selectors.StringSyntaxSelector
fl = c.faces(selectors.SubtractSelector(S("#Z"), S(">X"))).vals()
self.assertEqual(3, len(fl))
# test the subtract operator
fl = c.faces(S("#Z") - S(">X")).vals()
self.assertEqual(3, len(fl))
# test using extended string syntax
fl = c.faces("#Z exc >X").vals()
self.assertEqual(3, len(fl))
def testInverseSelector(self):
c = CQ(makeUnitCube())
S = selectors.StringSyntaxSelector
fl = c.faces(selectors.InverseSelector(S('>Z'))).vals()
self.assertEqual(5, len(fl))
el = c.faces('>Z').edges(selectors.InverseSelector(S('>X'))).vals()
self.assertEqual(3, len(el))
# test invert operator
fl = c.faces(-S('>Z')).vals()
self.assertEqual(5, len(fl))
el = c.faces('>Z').edges(-S('>X')).vals()
self.assertEqual(3, len(el))
# test using extended string syntax
fl = c.faces('not >Z').vals()
self.assertEqual(5, len(fl))
el = c.faces('>Z').edges('not >X').vals()
self.assertEqual(3, len(el))
def testComplexStringSelector(self):
c = CQ(makeUnitCube())
v = c.vertices('(>X and >Y) or (<X and <Y)').vals()
self.assertEqual(4, len(v))
def testFaceCount(self):
c = CQ(makeUnitCube())
self.assertEqual( 6, c.faces().size() )
self.assertEqual( 2, c.faces("|Z").size() )
def testVertexFilter(self):
"test selecting vertices on a face"
c = CQ(makeUnitCube())
#TODO: filters work ok, but they are in global coordinates which sux. it would be nice
#if they were available in coordinates local to the selected face
v2 = c.faces("+Z").vertices("<XY")
self.assertEqual(1,v2.size() ) #another way
#make sure the vertex is the right one
self.assertTupleAlmostEquals((0.0,0.0,1.0),v2.val().toTuple() ,3)
def testGrammar(self):
"""
Test if reasonable string selector expressions parse without an error
"""
gram = selectors._expression_grammar
expressions = ['+X ',
'-Y',
'|(1,0,0)',
'#(1.,1.4114,-0.532)',
'%Plane',
'>XZ',
'<Z[-2]',
'>(1,4,55.)[20]',
'|XY',
'<YZ[0]',
'front',
'back',
'left',
'right',
'top',
'bottom',
'not |(1,1,0) and >(0,0,1) or XY except >(1,1,1)[-1]',
'(not |(1,1,0) and >(0,0,1)) exc XY and (Z or X)']
for e in expressions: gram.parseString(e,parseAll=True)
| {
"repo_name": "huskier/cadquery",
"path": "tests/TestCQSelectors.py",
"copies": "1",
"size": "17196",
"license": "apache-2.0",
"hash": 9137331437236900000,
"line_mean": 36.5480349345,
"line_max": 113,
"alpha_frac": 0.5377413352,
"autogenerated": false,
"ratio": 3.2052190121155637,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9169057032424293,
"avg_score": 0.014780662978254287,
"num_lines": 458
} |
__author__ = 'ddeconti'
# Goes through fda approved drug list from Joe
# Scrapes wikipedia for the drug smiles
# Gets unique smiles string from set
# prints to screen for analysis outside script
import random
import re
import sys
import time
import wikipedia
from bs4 import BeautifulSoup
def scrape_for_smiles(name, max_wait=2):
'''
Scrapes wikipedia given drug name for smiles string
:param name: name of drug as string
:param max_wait: optional default wait to not overload search requests
:return: smiles string
'''
time.sleep(max_wait*random.random())
try:
search_results = wikipedia.search(name)
except:
return None
if len(search_results) < 1:
return None
try:
wiki_page = wikipedia.page(search_results[0])
except:
return None
wiki_html = wiki_page.html()
soup = BeautifulSoup(wiki_html, "html.parser")
smiles_tag = soup.find("div",
{"style":
"word-wrap:break-word; text-indent:-1.5em"})
if not smiles_tag:
return None
smiles_tag = str(smiles_tag)
first = re.search("-1.5em", smiles_tag).start()
last = re.search("/div", smiles_tag).start()
smiles = smiles_tag[first+8:last-1]
return smiles
def parse_csv(filename):
'''
Parses given comma-delimited csv file to get nda type drug names to scrape
for smiles
:param filename: name of csv
:return: set of smiles strings
'''
try:
handle = open(filename, 'rU')
except IOError as e:
sys.stderr.write("IOError: " + str(e) +
"\nError in parse_csv()\n")
sys.exit()
smiles_set = set([])
handle.readline()
for line in handle:
arow = line.strip('\n').split(',')
app_type = arow[-2]
if app_type != "nda":
continue
drug_name = arow[0]
alt_name = arow[1]
if re.search("nda", drug_name):
end = re.search("nda", drug_name).start()-2
drug_name = drug_name[:end]
smiles = scrape_for_smiles(drug_name)
if not smiles:
smiles = scrape_for_smiles(alt_name)
if smiles != None and smiles not in smiles_set:
smiles_set.add(smiles)
sys.stdout.write(drug_name + "\t" + smiles + '\n')
sys.stdout.flush()
handle.close()
def print_smiles(smiles_set):
'''
Prints set of smiles string to stdout
:param smiles_set: set of unique fda approved smiles strings
:return:
'''
for smiles in smiles_set:
sys.stdout.write(smiles + '\n')
sys.stdout.flush()
def main(sa):
'''
Parses arg
:param sa:
:return:
'''
csv_filename = sa[0]
parse_csv(csv_filename)
#print_smiles(smiles_set)
if __name__ == "__main__":
main(sys.argv[1:])
| {
"repo_name": "dkdeconti/PAINS-train",
"path": "training_methods/file_manipulation/fda_approved_smiles_wikipedia_scrape.py",
"copies": "1",
"size": "2870",
"license": "mit",
"hash": -5231856349739147000,
"line_mean": 26.0754716981,
"line_max": 78,
"alpha_frac": 0.5843205575,
"autogenerated": false,
"ratio": 3.4330143540669855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45173349115669853,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ddeconti'
import base64
import numpy
import sys
import pickle
from io import BytesIO
from flask import Flask, render_template, request, make_response, send_file
from rdkit import DataStructs
from rdkit.Chem import AllChem, MolFromSmiles, Draw
#from app import app
app = Flask(__name__)
@app.route('/')
@app.route('/index')
def index():
return render_template("input.html")
@app.route('/about')
def pains_train_about():
return render_template("about.html")
@app.route('/contact')
def pains_train_contact():
return render_template("contact.html")
@app.route('/input')
def smile_input():
return render_template("input.html")
@app.route('/output')
def smile_output():
smile_str = request.args.get("smile_str")
smile_str = str(smile_str)
rf = pickle.load(open("static/rf_n300.p", "rb"))
try:
mol = MolFromSmiles(smile_str)
except:
return render_template("error.html",
error_str="Error in MolFromSmiles")
sys.stdout.write(str(mol) + "\n")
try:
fp = AllChem.GetMorganFingerprintAsBitVect(mol, 2)
except:
return render_template("error.html",
error_str="Error in AllChem.GetMorgan" +
"FingerprintAsBitVect()")
sys.stdout.write(str(fp) + '\n')
try:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
fpm = arr
except:
return render_template("error.html",
error_str="Error in DataStructs." +
"ConvertToNumpyArray()")
figfile = BytesIO()
Draw.MolToFile(mol, figfile, imageType="png")
figfile.seek(0)
figdata_png = figfile.getvalue()
figdata_png = base64.b64encode(figdata_png)
p = rf.predict(fpm)
prob = rf.predict_proba(fpm)[0][1]
percent = int(prob*100)
if p == 1:
outstr = "promiscuous molecule"
else:
outstr = "non-promiscuous molecule"
sys.stdout.write(smile_str + "\n")
return render_template("output.html", smile_str=smile_str,
binary_str=outstr, img_obj=figdata_png,
predict_prob=percent)
@app.route('/error')
def error_output():
return render_template("error.html")
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
| {
"repo_name": "dkdeconti/PAINS-train",
"path": "web_app_frontend/app/views.py",
"copies": "1",
"size": "2396",
"license": "mit",
"hash": 322642044279495600,
"line_mean": 26.5402298851,
"line_max": 75,
"alpha_frac": 0.5976627713,
"autogenerated": false,
"ratio": 3.422857142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9489114485918881,
"avg_score": 0.006281085647652364,
"num_lines": 87
} |
__author__ = 'ddeconti'
import FileHandler
import math
import numpy
import pickle
import sys
from bokeh.plotting import figure, output_file, show, HBox
from rdkit.Chem import AllChem, MolFromSmiles
def plot_scatter(x, y, title, x_label, y_label, color="red"):
plot = figure(x_axis_label=x_label,
y_axis_label=y_label)
plot.scatter(x, y, fill_color="red")
return plot
def build_prediction_result_array(rf, drug_list):
'''
Attemps loading pickle (may need to turn off function
:param rf:
:param filename:
:return:
'''
prediction_probs = []
for drug in drug_list:
try:
mol = MolFromSmiles(drug.get_SMILES())
fp = AllChem.GetMorganFingerprintAsBitVect(mol, 2)
except:
continue
p = rf.predict_proba(fp)[0][1]
drug.set_rf_prediction_prob(p)
return drug_list
def polyfit(x, y, degree):
new_x = []
new_y = []
for i in xrange(len(x)):
if x[i] != None and y[i] != None:
new_x.append(x[i])
new_y.append(y[i])
x = new_x
y = new_y
results = {}
coeffs = numpy.polyfit(x, y, degree)
results['polynomial'] = coeffs.tolist()
correlation = numpy.corrcoef(x,y)[0,1]
results['correlation'] = correlation
results['determination'] = correlation**2
return results
def main(sa):
rf_pickle_name = sa[0]
chem_dict_filename = sa[1]
try:
rf = pickle.load(open(rf_pickle_name, "rb"))
except IOError as e:
sys.stderr.write("IOError: " + str(e) +
"\nError in main()\n")
sys.exit()
drug_list = FileHandler.WikiScrapedDB(chem_dict_filename).get_drug_list()
drug_list = build_prediction_result_array(rf, drug_list)
p1 = plot_scatter([drug.get_rf_prediction_prob() for drug in drug_list],
[drug.get_side_effect_count() for drug in drug_list],
"Chemical promiscuity vs. # side effects",
"Predicted probability of PAINS classification",
"Number of side effects")
p2 = plot_scatter([drug.get_rf_prediction_prob() for drug in drug_list],
[drug.get_gene_effect_count() for drug in drug_list],
"Chemical promiscuity vs. # gene expression changes",
"Predicted probability of PAINS classification",
"Number of identified gene expression effects")
results = polyfit([drug.get_rf_prediction_prob() for drug in drug_list],
[drug.get_side_effect_count() for drug in drug_list],
1)
print results
output_file("lregress.html")
show(HBox(p1, p2))
if __name__ == "__main__":
main(sys.argv[1:]) | {
"repo_name": "dkdeconti/PAINS-train",
"path": "training_methods/clustering/lregress_predicted_PAINS.py",
"copies": "1",
"size": "2788",
"license": "mit",
"hash": -3652999253744932400,
"line_mean": 31.0574712644,
"line_max": 77,
"alpha_frac": 0.581061693,
"autogenerated": false,
"ratio": 3.4083129584352077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4489374651435208,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ddeconti'
import FileHandler
import matplotlib.pyplot as plt
import numpy
import sys
import scipy.cluster.hierarchy as hcluster
from bokeh.plotting import figure, output_file, show, VBox, HBox
from rdkit import DataStructs
from sklearn.decomposition.pca import PCA
def pca_plot(fp_list, clusters):
np_fps = []
for fp in fp_list:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
np_fps.append(arr)
pca = PCA(n_components=3)
pca.fit(np_fps)
np_fps_r = pca.transform(np_fps)
p1 = figure(x_axis_label="PC1",
y_axis_label="PC2",
title="PCA clustering of PAINS")
p2 = figure(x_axis_label="PC2",
y_axis_label="PC3",
title="PCA clustering of PAINS")
color_vector = ["blue", "red", "green", "orange", "pink", "cyan", "magenta",
"brown", "purple"]
print len(set(clusters))
for clust_num in set(clusters):
print clust_num
local_cluster = []
for i in xrange(len(clusters)):
if clusters[i] == clust_num:
local_cluster.append(np_fps_r[i])
print len(local_cluster)
p1.scatter(np_fps_r[:,0], np_fps_r[:,1],
color=color_vector[clust_num])
p2.scatter(np_fps_r[:,1], np_fps_r[:,2],
color=color_vector[clust_num])
return HBox(p1, p2)
def clust(fp_list):
np_fps = []
for fp in fp_list:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
np_fps.append(arr)
thresh = 6.5
clusters = hcluster.fclusterdata(np_fps, thresh, criterion="distance")
return clusters
def main(sa):
pains_filename = sa[0]
fp_list = FileHandler.SlnFile(pains_filename).get_fingerprint_list()
clusters = clust(fp_list)
p = pca_plot(fp_list, clusters)
output_file("PCA_w_hclust.html")
show(p)
if __name__ == "__main__":
main(sys.argv[1:]) | {
"repo_name": "dkdeconti/PAINS-train",
"path": "training_methods/clustering/hclust_PAINS.py",
"copies": "1",
"size": "1977",
"license": "mit",
"hash": 6109634984811112000,
"line_mean": 28.5223880597,
"line_max": 80,
"alpha_frac": 0.5897824987,
"autogenerated": false,
"ratio": 3.2094155844155843,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4299198083115584,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ddeconti'
import FileHandler
import numpy
import pickle
import random
import sys
from rdkit.Chem import AllChem, SDMolSupplier
from rdkit import Chem, DataStructs
from sklearn.cross_validation import train_test_split, StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
def optimize_rf(target, control):
out_list = ["num_trees", "specificity", "sensitivity", "fdr", "acc", "f1",
"precision"]
print '\t'.join(out_list)
data_list = target + control
label_list = [0]*len(target) + [1]*len(control)
for i in [1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 100, 200, 300, 400, 500,
1000]:
stat_list = []
kfold = StratifiedKFold(label_list, n_folds=5)
for train, test in kfold:
rf = train_rf([data_list[j] for j in train if label_list[j] == 0],
[data_list[j] for j in train if label_list[j] == 1],
i,
1986)
stat_list.append(test_rf([data_list[j] for j in test
if label_list[j] == 0],
[data_list[j] for j in test
if label_list[j] == 1],
rf))
stat_list = numpy.mean(stat_list, axis=0)
print '\t'.join(str(s) for s in [i] + list(stat_list))
def train_rf(target, control, n_est, rand_state):
np_fps = []
for fp in target + control:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
np_fps.append(arr)
rf = RandomForestClassifier(n_estimators=n_est,
random_state=rand_state)
ys_fit = [1] * len(target) + [0] * len(control)
rf.fit(np_fps, ys_fit)
return rf
def test_rf(target, control, rf):
p = len(target)
n = len(control)
fp = 0
fn = 0
tp = 0
tn = 0
for test in target:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(test, arr)
test = arr
if rf.predict(test) == 1:
tp += 1
else:
fn += 1
for test in control:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(test, arr)
test = arr
if rf.predict(test) == 1:
fp += 1
else:
tn += 1
specificity = tn/float(tn+fp)
sensitivity = tp/float(tp+fn)
fdr = fp/float(tp+fp)
acc = (tp+tn)/float(p+n)
f1 = (2*tp)/float(2*tp+fp+fn)
precision = (tp)/float(tp+fp)
out_list = [specificity, sensitivity, fdr, acc, f1, precision]
return out_list
def randomly_pick_from_sdf(sdf_filename, max_N=4000):
sdf_struct = SDMolSupplier(sdf_filename)
print len(sdf_struct)
sdf_struct = random.sample(sdf_struct, max_N)
try:
mol_list = [m for m in sdf_struct]
except:
sys.stderr.write("Error parsing SDMolSupplier object\n" +
"Error in randomly_pick_from_sdf()\n")
sys.exit()
fp_list = []
for m in mol_list:
try:
fp_list.append(AllChem.GetMorganFingerprintAsBitVect(m, 2))
except:
continue
return filter(lambda x: x != None, fp_list)
def main(sa):
sln_filename = sa[0]
sdf_filename = sa[1]
sln_fps = FileHandler.SlnFile(sln_filename).get_fingerprint_list()
sdf_fps = randomly_pick_from_sdf(sdf_filename, 400)
pain_train, pain_test = train_test_split(sln_fps,
test_size=0.2,
random_state=24)
control_train, control_test = train_test_split(sdf_fps,
test_size=0.2,
random_state=24)
#rf = train_rf(pain_train + pain_test,
# control_train + control_test,
# n_est=300, rand_state=1986)
#pickle.dump(rf, open("rf_n300.p", "wb"))
#test_rf(pain_test, control_test, rf)
#control_train = randomly_pick_from_sdf(sdf_filename, 400)
#pain_test = sln_fps
optimize_rf(pain_train, control_train)
rf = train_rf(pain_train, control_train, n_est=300, rand_state=1986)
pickle.dum(rf, open("rf_n300.p", "wb"))
test_rf(pain_test, control_test, rf)
if __name__ == "__main__":
main(sys.argv[1:]) | {
"repo_name": "dkdeconti/PAINS-train",
"path": "training_methods/classifier/random_forest_analysis.py",
"copies": "1",
"size": "4363",
"license": "mit",
"hash": 4448526765426525700,
"line_mean": 33.3622047244,
"line_max": 78,
"alpha_frac": 0.5340362136,
"autogenerated": false,
"ratio": 3.2878673700075356,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9307153353723709,
"avg_score": 0.0029500459767650834,
"num_lines": 127
} |
__author__ = 'ddeconti'
import FileHandler
import numpy
import random
import pickle
import sys
from rdkit import DataStructs
from rdkit.Chem import AllChem, SDMolSupplier
from sklearn.cross_validation import train_test_split
from sklearn.neighbors import KNeighborsClassifier
def optimize_knn(target_train, target_test, control_train, control_test):
out_list = ["num_trees", "specificity", "sensitivity", "fdr", "acc", "f1",
"precision"]
print '\t'.join(out_list)
for i in [1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 100, 200, 300, 400, 500,
1000, 2000, 3000, 4000, 5000]:
knn = train_knn(target_train, control_train, i)
stat_list = test_knn(target_test, control_test, knn)
print '\t'.join(str(j) for j in [i] + stat_list)
def train_knn(target, control, n_neigh):
knn = KNeighborsClassifier()
np_fps = []
for fp in target + control:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
np_fps.append(arr)
ys_fit = [1] * len(target) + [0] * len(control)
knn.fit(np_fps, ys_fit)
return knn
def test_knn(target, control, knn):
p = len(target)
n = len(control)
fp = 0
fn = 0
tp = 0
tn = 0
ys_fit = [1] * len(target) + [0] * len(control)
for test in target:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(test, arr)
test = arr
if knn.predict(test) == 1:
tp += 1
else:
fn += 1
for test in control:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(test, arr)
test = arr
if knn.predict(test) == 1:
fp += 1
else:
tn += 1
specificity = tn/float(tn+fp)
sensitivity = tp/float(tp+fn)
fdr = fp/float(tp+fp)
acc = (tp+tn)/float(p+n)
f1 = (2*tp)/float(2*tp+fp+fn)
precision = (tp)/float(tp+fp)
out_list = [specificity, sensitivity, fdr, acc, f1, precision]
return out_list
def randomly_pick_from_sdf(sdf_filename, max_N=4000):
sdf_struct = SDMolSupplier(sdf_filename)
print len(sdf_struct)
sdf_struct = random.sample(sdf_struct, max_N)
try:
mol_list = [m for m in sdf_struct]
except:
sys.stderr.write("Error parsing SDMolSupplier object\n" +
"Error in randomly_pick_from_sdf()\n")
sys.exit()
fp_list = []
for m in mol_list:
try:
fp_list.append(AllChem.GetMorganFingerprintAsBitVect(m, 2))
except:
continue
return filter(lambda x: x != None, fp_list)
def main(sa):
sln_filename = sa[0]
sdf_filename = sa[1]
test_filename = sa[2]
sln_fp = FileHandler.SlnFile(sln_filename).get_fingerprint_list()
sdf_fp = randomly_pick_from_sdf(sdf_filename, max_N=4000)
test_fp = FileHandler.SdfFile(test_filename).get_fingerprint_list()
pain_train, pain_test = train_test_split(sln_fp,
test_size=0.2,
random_state=24)
control_train, control_test = train_test_split(sdf_fp,
test_size=0.2,
random_state=24)
#optimize_knn(pain_train, pain_test, control_train, control_test)
knn = train_knn(pain_train, control_train, 5)
stat_list = test_knn(sln_fp, test_fp, knn)
out_list = ["num_trees", "specificity", "sensitivity", "fdr", "acc", "f1",
"precision"]
print '\t'.join(out_list)
print '\t'.join(str(j) for j in stat_list)
if __name__ == "__main__":
main(sys.argv[1:]) | {
"repo_name": "dkdeconti/PAINS-train",
"path": "training_methods/classifier/knn_analysis.py",
"copies": "1",
"size": "3662",
"license": "mit",
"hash": -7822300753026449000,
"line_mean": 31.4159292035,
"line_max": 78,
"alpha_frac": 0.5674494812,
"autogenerated": false,
"ratio": 3.1487532244196044,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42162027056196044,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ddeconti'
import FileHandler
import numpy
import random
import sys
from bokeh.plotting import figure, output_file, show, VBox, HBox
from rdkit import DataStructs
from rdkit.Chem import AllChem, SDMolSupplier
from sklearn.decomposition.pca import PCA
from sklearn.cross_validation import train_test_split
def pca(target, control, title, name_one, name_two):
np_fps = []
for fp in target + control:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
np_fps.append(arr)
ys_fit = [1] * len(target) + [0] * len(control)
names = ["PAINS", "Control"]
pca = PCA(n_components=3)
pca.fit(np_fps)
np_fps_r = pca.transform(np_fps)
p1 = figure(x_axis_label="PC1",
y_axis_label="PC2",
title=title)
p1.scatter(np_fps_r[:len(target), 0], np_fps_r[:len(target), 1],
color="blue", legend=name_one)
p1.scatter(np_fps_r[len(target):, 0], np_fps_r[len(target):, 1],
color="red", legend=name_two)
p2 = figure(x_axis_label="PC2",
y_axis_label="PC3",
title=title)
p2.scatter(np_fps_r[:len(target), 1], np_fps_r[:len(target), 2],
color="blue", legend=name_one)
p2.scatter(np_fps_r[len(target):, 1], np_fps_r[len(target):, 2],
color="red", legend=name_two)
return HBox(p1, p2)
def pca_no_labels(target, title="PCA clustering of PAINS", color="blue"):
np_fps = []
for fp in target:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
np_fps.append(arr)
pca = PCA(n_components=3)
pca.fit(np_fps)
np_fps_r = pca.transform(np_fps)
p3 = figure(x_axis_label="PC1",
y_axis_label="PC2",
title=title)
p3.scatter(np_fps_r[:, 0], np_fps_r[:, 1], color=color)
p4 = figure(x_axis_label="PC2",
y_axis_label="PC3",
title=title)
p4.scatter(np_fps_r[:, 1], np_fps_r[:, 2], color=color)
return HBox(p3, p4)
def randomly_pick_from_sdf(sdf_filename, max_N):
sdf_struct = SDMolSupplier(sdf_filename)
print len(sdf_struct)
sdf_struct = random.sample(sdf_struct, max_N)
try:
mol_list = [m for m in sdf_struct]
except:
sys.stderr.write("Error parsing SDMolSupplier object\n" +
"Error in randomly_pick_from_sdf()\n")
sys.exit()
fp_list = []
for m in mol_list:
try:
fp_list.append(AllChem.GetMorganFingerprintAsBitVect(m, 2))
except:
continue
return filter(lambda x: x != None, fp_list)
def main(sa):
sln_filename = sa[0]
sdf_filename = sa[1]
setsix_filename = sa[2]
smiles_filename = sa[3]
sln_fp = FileHandler.SlnFile(sln_filename).get_fingerprint_list()
sdf_fp = randomly_pick_from_sdf(sdf_filename, 400)
setsix_fp = FileHandler.SdfFile(setsix_filename).get_fingerprint_list()
smile_fp = FileHandler.SmilesFile(smiles_filename).get_fingerprint_list()
print "PCA for PAINS vs. Chembl"
pvc = pca(sln_fp, sdf_fp, "PAINS vs. ChEMBL",
"PAINS", "ChEMBL")
print "PCA for PAINS vs. Set six"
pvb = pca(sln_fp, setsix_fp, "PAINS vs. ChEMBL set 5",
"PAINS", "ChEMBL.5")
print "PCA for PAINS vs. STitch"
pva = pca(sln_fp, smile_fp, "PAINS vs. Stitch",
"PAINS", "Stitch")
print "PCA within PAINS"
pvp = pca_no_labels(sln_fp)
bvb = pca_no_labels(setsix_fp, title="PCA clustering of ChEMBL set 5",
color="red")
output_file("pca_plots.html")
p = VBox(pvc, pvb, pva, pvp, bvb)
show(p)
if __name__ == "__main__":
main(sys.argv[1:]) | {
"repo_name": "dkdeconti/PAINS-train",
"path": "training_methods/clustering/pca_plots_on_fp.py",
"copies": "1",
"size": "3717",
"license": "mit",
"hash": -8352905608510267000,
"line_mean": 33.7476635514,
"line_max": 77,
"alpha_frac": 0.5881086898,
"autogenerated": false,
"ratio": 2.9337016574585637,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9009535965825304,
"avg_score": 0.0024548762866519877,
"num_lines": 107
} |
__author__ = 'ddeconti'
import FileHandler
import pickle
import sys
from bokeh.palettes import Blues9
from bokeh.plotting import output_file, figure, show, VBox, HBox
from bokeh.charts import Histogram, HeatMap
from rdkit import DataStructs
def similarity_compare(fp):
tanimoto_matrix = [[1] * len(fp)] * len(fp)
for i in xrange(len(fp)):
for j in xrange(len(fp)):
if i == j:
continue
sim = DataStructs.FingerprintSimilarity(fp[i],
fp[j])
tanimoto_matrix[i][j] = sim
return tanimoto_matrix
def get_similarities_list(m):
sim_list = []
for i in xrange(len(m)):
if i >= len(m) - 1:
continue
for j in xrange(i + 1, len(m)):
sim_list.append(m[i][j])
return sim_list
def plot_histogram(pains, control):
'''
distributions = OrderedDict(PAINs=pains, Control=control)
df = pandas.DataFrame(distributions)
distributions = df.to_dict()
for k, v in distributions.items():
distributions[k] = v.values()
'''
df = {"PAINs": pains, "Control": control}
output_file("histograms.html")
hist = Histogram(df, bins=20, legend=True)
return hist
def plot_heatmap(all):
p = HeatMap(all, palette=Blues9)
return p
def main(sa):
sln_filename = sa[0] # PAINs
sdf_filename = sa[1] # Control set
try:
sln_file = pickle.load(open("sln_file.p", "rb"))
except:
sln_file = FileHandler.SlnFile(sln_filename)
pickle.dump(sln_file, open("sln_file.p", "wb"))
try:
sdf_file = pickle.load(open("sdf_file.p", "rb"))
except:
sdf_file = FileHandler.SdfFile(sdf_filename)
pickle.dump(sdf_file, open("sdf_file.p", "wb"))
try:
pains_fp = pickle.load(open("pains_fp.p", "rb"))
control_fp = pickle.load(open("control_fp.p", "rb"))
except:
pains_fp = sln_file.get_fingerprint_list()
control_fp = sdf_file.get_fingerprint_list()
pickle.dump(pains_fp, open("pains_fp.p", "wb"))
pickle.dump(control_fp, open("control_fp.p", "wb"))
sys.stdout.write("Tanimoto similarity of PAINs.\n")
sys.stdout.flush()
try:
pains_tanimoto = pickle.load(open("pains_tanimoto.p", "rb"))
except:
pains_tanimoto = similarity_compare(pains_fp)
pickle.dump(pains_tanimoto, open("pains_tanimoto.p", "wb"))
sys.stdout.write("Tanimoto similarity of Control.\n")
sys.stdout.flush()
try:
control_tanimoto = pickle.load(open("control_tanimoto.p", "rb"))
except:
control_tanimoto = similarity_compare(control_fp)
pickle.dump(control_tanimoto, open("control_tanimoto.p", "wb"))
sys.stdout.write("Tanimoto similarity of both.\n")
sys.stdout.flush()
try:
all_tanimoto = pickle.load(open("all_tanimoto.p", "rb"))
except:
all_tanimoto = similarity_compare(pains_fp + control_fp)
pickle.dump(all_tanimoto, open("all_tanimoto.p", "wb"))
sys.stdout.write("Plotting histograms.\n")
sys.stdout.flush()
hist = plot_histogram(get_similarities_list(pains_tanimoto),
get_similarities_list(control_tanimoto))
sys.stdout.write("Plotting heatmap\n")
sys.stdout.flush()
heatmap = plot_heatmap(all_tanimoto)
output_file("Pains_vs_Control_plots.html")
VBox(hist, heatmap)
show()
if __name__ == "__main__":
main(sys.argv[1:]) | {
"repo_name": "dkdeconti/PAINS-train",
"path": "training_methods/classifier/basic_stats_plotting.py",
"copies": "1",
"size": "3480",
"license": "mit",
"hash": -36656988482466296,
"line_mean": 31.2314814815,
"line_max": 72,
"alpha_frac": 0.6060344828,
"autogenerated": false,
"ratio": 3.146473779385172,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4252508262185172,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ddeconti'
import FileHandler
import random
import sys
from bokeh.charts import output_file, Histogram, show
from bokeh.models import Range1d
from rdkit.Chem.Fingerprints import FingerprintMols
from rdkit.Chem import AllChem, DataStructs, SDMolSupplier, Fingerprints
def randomly_pick_from_sdf(sdf_filename, max_N=4000):
sdf_struct = SDMolSupplier(sdf_filename)
print len(sdf_struct)
sdf_struct = random.sample(sdf_struct, max_N)
try:
mol_list = [m for m in sdf_struct]
except:
sys.stderr.write("Error parsing SDMolSupplier object\n" +
"Error in randomly_pick_from_sdf()\n")
sys.exit()
return filter(lambda x: x != None, mol_list)
def get_same_similarity(fps):
same_sims = []
for i in xrange(len(fps)):
for j in xrange(i+1, len(fps)):
sim = DataStructs.FingerprintSimilarity(fps[i], fps[j])
same_sims.append(sim)
return same_sims
def get_diff_sims(pains, rest):
diff_sims = []
for i in xrange(len(pains)):
for j in xrange(len(rest)):
sim = DataStructs.FingerprintSimilarity(pains[i], rest[j])
diff_sims.append(sim)
return diff_sims
def mol_to_fp(mol_list):
fp_list = []
for mol in mol_list:
try:
fp = FingerprintMols.FingerprintMol(mol)
fp_list.append(fp)
except:
continue
fp_list = filter(lambda x: x != None, fp_list)
return fp_list
def plot_histogram(same, diff):
df = {"PAINS vs. PAINS": same, "PAINS vs. ChEMBL": diff}
output_file("histogram_pains_self_diff.html")
hist = Histogram(df, bins=20, density=True, legend=True)
hist.x_range = Range1d(0, 1)
#hist.legend.orientation = "top_right"
show(hist)
def main(sa):
sln_filename = sa[0]
sdf_filename = sa[1]
sln_mols = FileHandler.SlnFile(sln_filename).get_mol_list()
sdf_mols = randomly_pick_from_sdf(sdf_filename, max_N=400)
print len(sln_mols), len(sdf_mols)
sln_fps = mol_to_fp(sln_mols)
sdf_fps = mol_to_fp(sdf_mols)
same_sims = get_same_similarity(sln_fps)
diff_sims = get_diff_sims(sln_fps, sdf_fps)
plot_histogram(same_sims, diff_sims)
if __name__ == "__main__":
main(sys.argv[1:]) | {
"repo_name": "dkdeconti/PAINS-train",
"path": "training_methods/classifier/histogram.py",
"copies": "1",
"size": "2269",
"license": "mit",
"hash": 6381512365689395000,
"line_mean": 27.7341772152,
"line_max": 72,
"alpha_frac": 0.6315557514,
"autogenerated": false,
"ratio": 2.950585175552666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9066029553589126,
"avg_score": 0.0032222746727079516,
"num_lines": 79
} |
__author__ = 'ddeconti'
import FileObjects
import random
import re
import sys
from rdkit.Chem import AllChem, MolFromSmiles
from rdkit.Chem.rdSLNParse import MolFromSLN
from rdkit.Chem import SDMolSupplier
class WikiScrapedDB():
'''
Custom wrapper around drug name - side effect counts - gene count - SMILES
Produce a dictionary of the values
'''
def __init__(self, filename):
'''
:param fileanme:
:return: None
Creates a container for dict with key values
'''
try:
handle = open(filename, 'rU')
except IOError as e:
sys.stderr.write("IOError: " + str(e) +
"\nError in WikiScrapedDB.__init__()\n")
sys.exit()
self.drug_list = []
handle.readline()
for line in handle:
line = line.strip('\n').split('\t')
drug_name = line[0]
try:
se_count = int(line[1])
gene_count = int(line[2])
except ValueError as e:
sys.stderr.write("ValueError: " + str(e) +
"\nError in WikiSrapedDB.__init__()\n")
sys.exit()
smiles = line[3]
self.drug_list.append(FileObjects.DrugEffects(drug_name, se_count,
gene_count, smiles))
handle.close()
def get_drug_list(self):
return self.drug_list
class SdfFile():
'''
Custom wrapper around RDKit's Struct file input methods.
Focus is to produce a list of RDKit.Mol object.
Acts as a container for the RDKit.Mol object list.
'''
def __init__(self, filename):
'''
:param filename:
:return: None
Creates container for reference for RDKit.Mol object list:
self.mol_list
'''
self.filename = filename
try:
sdl_struct = SDMolSupplier(self.filename)
except IOError as e:
sys.stderr.write("IOError: " + str(e) +
"\nError in SdfFile.__init__()\n")
sys.stderr.flush()
sys.exit()
try:
self.mol_list = [m for m in sdl_struct]
except:
sys.stderr.write("Error parsing SDMolSupplier object\n" +
"Error in SdfFile.__init__()\n")
sys.exit()
self.fingerprint_list = None
def mol_to_fp(self):
fp_list = []
for mol in self.get_mol_list():
try:
fp_list.append(AllChem.GetMorganFingerprintAsBitVect(mol, 2))
#fp_list.append(FingerprintMols.FingerprintMol(mol))
except:
continue
#return filter(lambda x: x != None, fp_list)
return fp_list
# Setters
def set_fingerprint_list(self, fp_list):
self.fingerprint_list = fp_list
# Getters
def randomly_pick_from_sdf(self, max_N=4000):
sdf_struct = SDMolSupplier(self.filename)
print len(sdf_struct)
sdf_struct = random.sample(sdf_struct, max_N)
try:
mol_list = [m for m in sdf_struct]
except:
sys.stderr.write("Error parsing SDMolSupplier object\n" +
"Error in randomly_pick_from_sdf()\n")
sys.exit()
fp_list = []
for m in mol_list:
try:
fp_list.append(AllChem.GetMorganFingerprintAsBitVect(m, 2))
except:
continue
return filter(lambda x: x != None, fp_list)
def get_fingerprint_list(self):
if not self.fingerprint_list:
self.set_fingerprint_list(self.mol_to_fp())
return self.fingerprint_list
def get_mol_list(self):
return self.mol_list
class SlnFile():
'''
Custom wrapper around RDKit's SLN parsers.
Focus is to produce a list of RDKit.Mol objects.
Acts as container for the RDKit.Mol object list.
'''
def __init__(self, filename):
self.filename = filename
self.sln_list = self.parse_sln_file()
self.mol_list = None
self.fingerprint_list = None
def parse_sln_file(self):
'''
Specifially parses sybyl notation (SLN file) from
JA Holloway 2010 PAINs paper
:return: list(rdkit.Chem Mol)
'''
try:
handle = open(self.filename, 'rU')
except IOError as e:
sys.stderr.write("IOError: " + str(e) +
"\nError in SlnFile.parse_sln_file()\n")
sys.stderr.flush()
sys.exit()
sln_list = []
next_line = False
for line in handle:
line = line.strip('\n')
if re.search(".txt", line):
next_line = True
continue
if next_line:
sln_list.append(line)
next_line = False
handle.close()
return sln_list
def sln_to_mol(self):
'''
Converts string-based sln
:param sln_list: sln in string format in list
:return: rdkit mol class in list
'''
mol_list = []
for sln in self.sln_list:
try:
mol = MolFromSLN(sln)
except ValueError:
# ToDo Error tracking output at some point
continue
mol_list.append(mol)
return filter(lambda x: x != None, mol_list)
def mol_to_fp(self):
if not self.mol_list:
self.set_mol_list(self.sln_to_mol())
fp_list = map(lambda x:
AllChem.GetMorganFingerprintAsBitVect(x, 2),
self.mol_list)
#fp_list = map(lambda x: FingerprintMols.FingerprintMol(x),
# self.mol_list)
return filter(lambda x: x != None, fp_list)
def mol_to_plain_fp(self):
if not self.mol_list:
self.set_mol_list(self.sln_to_mol())
fp_list = map(lambda x:
AllChem.GetMorganFingerprint(x, 2), self.mol_list)
return filter(lambda x: x != None, fp_list)
# Setters
def set_mol_list(self, mol_list):
self.mol_list = mol_list
def set_fp_list(self, fp_list):
self.fingerprint_list = fp_list
# Getters
def get_mol_list(self):
if not self.mol_list:
self.set_mol_list(self.sln_to_mol())
return self.mol_list
def get_fingerprint_list(self):
if not self.fingerprint_list:
self.set_fp_list(self.mol_to_fp())
return self.fingerprint_list
def get_plain_fingerprint_list(self):
return self.mol_to_plain_fp()
class SmilesFile():
def __init__(self, filename):
self.filename = filename
smile_list = []
try:
handle = open(self.filename, 'rU')
except IOError as e:
sys.stderr.write("IOError: " + str(e) +
"\nError in FileHandler.__init__()\n")
sys.exit()
for line in handle:
line = line.strip('\n').split('\t')
smile_str = line[3]
smile_list.append(smile_str)
handle.close()
self.smile_list = smile_list
self.mol_list = None
self.fp_list = None
def smile_to_mol(self):
mol_list = []
for smile in self.smile_list:
try:
mol = MolFromSmiles(smile)
except:
continue
mol_list.append(mol)
if len(mol_list) == 0:
mol_list = None
return mol_list
def mol_to_fp(self):
fp_list = []
if not self.mol_list:
self.set_mol_list(self.smile_to_mol())
for mol in self.mol_list:
try:
fp = AllChem.GetMorganFingerprintAsBitVect(mol, 2)
fp_list.append(fp)
except:
continue
return fp_list
# Setters
def set_mol_list(self, mol_list):
self.mol_list = mol_list
def set_fp_list(self, fp_list):
self.fp_list = fp_list
# Getters
def get_smile_list(self):
return self.smile_list
def get_mol_list(self):
if not self.mol_list:
self.set_mol_list(self.smile_to_mol())
return self.mol_list
def get_fingerprint_list(self):
if not self.fp_list:
self.set_fp_list(self.mol_to_fp())
return self.fp_list
| {
"repo_name": "dkdeconti/PAINS-train",
"path": "training_methods/classifier/FileHandler.py",
"copies": "2",
"size": "8509",
"license": "mit",
"hash": -5555985953902499000,
"line_mean": 27.9421768707,
"line_max": 78,
"alpha_frac": 0.5233282407,
"autogenerated": false,
"ratio": 3.7435107787065554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5266839019406555,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ddeconti'
import FileObjects
import re
import sys
from rdkit.Chem import AllChem, MolFromSmiles
from rdkit.Chem.rdSLNParse import MolFromSLN
from rdkit.Chem import SDMolSupplier
class WikiScrapedDB():
'''
Custom wrapper around drug name - side effect counts - gene count - SMILES
Produce a dictionary of the values
'''
def __init__(self, filename):
'''
:param fileanme:
:return: None
Creates a container for dict with key values
'''
try:
handle = open(filename, 'rU')
except IOError as e:
sys.stderr.write("IOError: " + str(e) +
"\nError in WikiScrapedDB.__init__()\n")
sys.exit()
self.drug_list = []
handle.readline()
for line in handle:
line = line.strip('\n').split('\t')
drug_name = line[0]
try:
se_count = int(line[1])
gene_count = int(line[2])
except ValueError as e:
sys.stderr.write("ValueError: " + str(e) +
"\nError in WikiSrapedDB.__init__()\n")
sys.exit()
smiles = line[3]
self.drug_list.append(FileObjects.DrugEffects(drug_name, se_count,
gene_count, smiles))
handle.close()
def get_drug_list(self):
return self.drug_list
class SdfFile():
'''
Custom wrapper around RDKit's Struct file input methods.
Focus is to produce a list of RDKit.Mol object.
Acts as a container for the RDKit.Mol object list.
'''
def __init__(self, filename):
'''
:param filename:
:return: None
Creates container for reference for RDKit.Mol object list:
self.mol_list
'''
self.filename = filename
try:
sdl_struct = SDMolSupplier(self.filename)
except IOError as e:
sys.stderr.write("IOError: " + str(e) +
"\nError in SdfFile.__init__()\n")
sys.stderr.flush()
sys.exit()
try:
self.mol_list = [m for m in sdl_struct]
except:
sys.stderr.write("Error parsing SDMolSupplier object\n" +
"Error in SdfFile.__init__()\n")
sys.exit()
self.fingerprint_list = None
def mol_to_fp(self):
fp_list = []
for mol in self.get_mol_list():
try:
fp_list.append(AllChem.GetMorganFingerprintAsBitVect(mol, 2))
#fp_list.append(FingerprintMols.FingerprintMol(mol))
except:
continue
#return filter(lambda x: x != None, fp_list)
return fp_list
# Setters
def set_fingerprint_list(self, fp_list):
self.fingerprint_list = fp_list
# Getters
def get_fingerprint_list(self):
if not self.fingerprint_list:
self.set_fingerprint_list(self.mol_to_fp())
return self.fingerprint_list
def get_mol_list(self):
return self.mol_list
class SlnFile():
'''
Custom wrapper around RDKit's SLN parsers.
Focus is to produce a list of RDKit.Mol objects.
Acts as container for the RDKit.Mol object list.
'''
def __init__(self, filename):
self.filename = filename
self.sln_list = self.parse_sln_file()
self.mol_list = None
self.fingerprint_list = None
def parse_sln_file(self):
'''
Specifially parses sybyl notation (SLN file) from
JA Holloway 2010 PAINs paper
:return: list(rdkit.Chem Mol)
'''
try:
handle = open(self.filename, 'rU')
except IOError as e:
sys.stderr.write("IOError: " + str(e) +
"\nError in SlnFile.parse_sln_file()\n")
sys.stderr.flush()
sys.exit()
sln_list = []
next_line = False
for line in handle:
line = line.strip('\n')
if re.search(".txt", line):
next_line = True
continue
if next_line:
sln_list.append(line)
next_line = False
handle.close()
return sln_list
def sln_to_mol(self):
'''
Converts string-based sln
:param sln_list: sln in string format in list
:return: rdkit mol class in list
'''
mol_list = []
for sln in self.sln_list:
try:
mol = MolFromSLN(sln)
except ValueError:
# ToDo Error tracking output at some point
continue
mol_list.append(mol)
return filter(lambda x: x != None, mol_list)
def mol_to_fp(self):
if not self.mol_list:
self.set_mol_list(self.sln_to_mol())
fp_list = map(lambda x:
AllChem.GetMorganFingerprintAsBitVect(x, 2),
self.mol_list)
#fp_list = map(lambda x: FingerprintMols.FingerprintMol(x),
# self.mol_list)
return filter(lambda x: x != None, fp_list)
def mol_to_plain_fp(self):
if not self.mol_list:
self.set_mol_list(self.sln_to_mol())
fp_list = map(lambda x:
AllChem.GetMorganFingerprint(x, 2), self.mol_list)
return filter(lambda x: x != None, fp_list)
# Setters
def set_mol_list(self, mol_list):
self.mol_list = mol_list
def set_fp_list(self, fp_list):
self.fingerprint_list = fp_list
# Getters
def get_mol_list(self):
if not self.mol_list:
self.set_mol_list(self.sln_to_mol())
return self.mol_list
def get_fingerprint_list(self):
if not self.fingerprint_list:
self.set_fp_list(self.mol_to_fp())
return self.fingerprint_list
def get_plain_fingerprint_list(self):
return self.mol_to_plain_fp()
class SmilesFile():
def __init__(self, filename):
self.filename = filename
smile_list = []
try:
handle = open(self.filename, 'rU')
except IOError as e:
sys.stderr.write("IOError: " + str(e) +
"\nError in FileHandler.__init__()\n")
sys.exit()
for line in handle:
line = line.strip('\n')
#smile_str = line[0]
smile_list.append(line)
handle.close()
self.smile_list = smile_list
self.mol_list = None
self.fp_list = None
def smile_to_mol(self):
mol_list = []
for smile in self.smile_list:
try:
mol = MolFromSmiles(smile)
except:
continue
mol_list.append(mol)
if len(mol_list) == 0:
mol_list = None
return mol_list
def mol_to_fp(self):
fp_list = []
if not self.mol_list:
self.set_mol_list(self.smile_to_mol())
for mol in self.mol_list:
try:
fp = AllChem.GetMorganFingerprintAsBitVect(mol, 2)
fp_list.append(fp)
except:
continue
return fp_list
# Setters
def set_mol_list(self, mol_list):
self.mol_list = mol_list
def set_fp_list(self, fp_list):
self.fp_list = fp_list
# Getters
def get_smile_list(self):
return self.smile_list
def get_mol_list(self):
if not self.mol_list:
self.set_mol_list(self.smile_to_mol())
return self.mol_list
def get_fingerprint_list(self):
if not self.fp_list:
self.set_fp_list(self.mol_to_fp())
return self.fp_list
| {
"repo_name": "dkdeconti/PAINS-train",
"path": "training_methods/clustering/FileHandler.py",
"copies": "1",
"size": "7819",
"license": "mit",
"hash": 8171442052276227000,
"line_mean": 27.4327272727,
"line_max": 78,
"alpha_frac": 0.5223174319,
"autogenerated": false,
"ratio": 3.7537205952952473,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47760380271952474,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ddeconti'
import numpy
import pickle
from rdkit.Chem import AllChem, MolFromSmiles
from rdkit import DataStructs
import sys
def parse_smiles(rf, fda_filename):
try:
handle = open(fda_filename, 'rU')
except IOError:
sys.stderr.write("IOError\n")
sys.exit()
fpm_list = []
line_list = []
total = 0
for line in handle:
arow = line.strip('\n').split('\t')
smiles = arow[1]
try:
mol = MolFromSmiles(smiles)
fp = AllChem.GetMorganFingerprintAsBitVect(mol, 2)
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
fpm = arr
fpm_list.append(fpm)
except:
continue
total += 1
if rf.predict(fpm)[0] == 1:
proba = rf.predict_proba(fpm)[0][1]
line_list.append(line.strip('\n').split()[1])
sys.stderr.write(str(total) + "\t" + str(len(line_list)) + "\n")
for line in line_list:
sys.stdout.write(line + "\n")
def main(sa):
rf_filename = sa[0]
fda_filename = sa[1]
rf = pickle.load(open(rf_filename, 'rb'))
parse_smiles(rf, fda_filename)
if __name__ == "__main__":
main(sys.argv[1:])
| {
"repo_name": "dkdeconti/PAINS-train",
"path": "training_methods/file_manipulation/filter_fda_for_PAINS.py",
"copies": "1",
"size": "1239",
"license": "mit",
"hash": 6310865192790266000,
"line_mean": 24.8125,
"line_max": 68,
"alpha_frac": 0.5528652139,
"autogenerated": false,
"ratio": 3.1446700507614214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4197535264661421,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ddeconti'
import pickle
import FileHandler
import sys
import numpy
import random
from bokeh.plotting import figure, output_file, show, VBox, HBox
from rdkit import DataStructs
from rdkit.Chem import AllChem, SDMolSupplier
def randomly_pick_from_sdf(sdf_filename):
sdf_struct = SDMolSupplier(sdf_filename)
print len(sdf_struct)
sdf_struct = random.sample(sdf_struct, 4000)
try:
mol_list = [m for m in sdf_struct]
except:
sys.stderr.write("Error parsing SDMolSupplier object\n" +
"Error in randomly_pick_from_sdf()\n")
sys.exit()
fp_list = []
for m in mol_list:
try:
fp_list.append(AllChem.GetMorganFingerprintAsBitVect(m, 2))
except:
continue
return filter(lambda x: x != None, fp_list)
def fill_array(fps):
np_fps = []
for fp in fps:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
np_fps.append(arr)
return np_fps
def fill_points(target, control, rf):
x = []
y = []
for i in range(0, 101, 5):
sys.stdout.write('.')
sys.stdout.flush()
p = len(target)
n = len(control)
fp = 0
fn = 0
tp = 0
tn = 0
for test in target:
if rf.predict_proba(test)[0][1]*100 > i:
tp += 1
else:
fn += 1
for test in control:
if rf.predict_proba(test)[0][1]*100 > i:
fp += 1
else:
tn += 1
specificity = tn/float(tn+fp)
fpr = 1-specificity
sensitivity = tp/float(tp+fn)
x.append(fpr)
y.append(sensitivity)
sys.stdout.write('\n')
return x, y
def main(sa):
rf_filename = sa[0]
pains_filename = sa[1]
control_filename = sa[2]
rf = pickle.load(open(rf_filename, 'rb'))
pains_fps = fill_array(FileHandler.SlnFile(pains_filename)
.get_fingerprint_list())
control_fps = fill_array(randomly_pick_from_sdf(control_filename))
x, y = fill_points(random.sample(pains_fps, 40),
random.sample(control_fps, 400),
rf)
output_file("rf_roc.html")
p = figure(x_axis_label="False Positive Rate",
y_axis_label="True Positive Rate")
p.line(x, y)
show(p)
pass
if __name__ == "__main__":
main(sys.argv[1:])
| {
"repo_name": "dkdeconti/PAINS-train",
"path": "training_methods/classifier/rf_roc.py",
"copies": "1",
"size": "2448",
"license": "mit",
"hash": 2386385120189677000,
"line_mean": 25.3225806452,
"line_max": 71,
"alpha_frac": 0.5477941176,
"autogenerated": false,
"ratio": 3.3534246575342466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9390978119732301,
"avg_score": 0.002048131080389145,
"num_lines": 93
} |
__author__ = 'ddeconti'
import sys
def parse_chemicals(filename):
chem_dict = {}
try:
handle = open(filename, 'rU')
except IOError as e:
sys.stderr.write("IOError: " + str(e) +
"\nError in parse_chemicals()\n")
sys.exit()
for line in handle:
line = line.strip('\n').split('\t')
chem_id = line[0]
smile = line[-1]
chem_dict[chem_id] = {"smiles": smile, "num_inter": 0}
handle.close()
return chem_dict
def parse_interactions(filename, chem_dict):
try:
handle = open(filename, 'rU')
except IOError as e:
sys.stderr.write("IOError: " + str(e) +
"\nError in parse_interactions()\n")
sys.exit()
for line in handle:
line = line.strip('\n').split('\t')
chem_id = line[0]
if chem_id in chem_dict:
chem_dict[chem_id]["num_inter"] += 1
handle.close()
return chem_dict
def print_chem_interactions(chem_dict):
for chem_id in chem_dict:
out_list = [chem_id, chem_dict[chem_id]["smiles"],
chem_dict[chem_id]["num_inter"]]
out_list = map(lambda x: str(x), out_list)
outstr = '\t'.join(out_list)
sys.stdout.write(outstr + '\n')
def main(sa):
chems_filename = sa[0]
interactions_filename = sa[1]
chem_dict = parse_chemicals(chems_filename)
chem_dict = parse_interactions(interactions_filename,
chem_dict)
print_chem_interactions(chem_dict)
if __name__ == "__main__":
main(sys.argv[1:]) | {
"repo_name": "dkdeconti/PAINS-train",
"path": "training_methods/classifier/build_promiscuity_index_from_stitch.py",
"copies": "1",
"size": "1600",
"license": "mit",
"hash": -8201000231034797000,
"line_mean": 27.5892857143,
"line_max": 62,
"alpha_frac": 0.54375,
"autogenerated": false,
"ratio": 3.2388663967611335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42826163967611336,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ddeconti'
'''
Compresses total drugs by number of interactions.
'''
import re
import sys
def print_drug(name, num):
print "Test", name, num
outstr = '\t'.join([name, str(num)])
sys.stdout.write(outstr + '\n')
def parse_chem(filename):
try:
handle = open(filename, 'rU')
except IOError as e:
sys.stderr.write("IOError: " + str(e) +
"\nError in parse_chem()\n")
sys.exit()
current_drug = None
current_num = 0
handle.readline()
for line in handle:
line = line.strip('\n').split('\t')
if not current_drug:
if re.search("D1", line[3]):
current_drug = line[0]
current_num += 1
continue
else:
continue
if current_drug != line[0]:
print_drug(current_drug, current_num)
if re.search("D1", line[3]):
current_drug = line[0]
current_num = 1
else:
print "Is this the culprit?"
current_drug = None
current_num = 0
if current_drug:
print "last one:", current_drug, current_num
print_drug(current_drug, current_num)
handle.close()
def main(sa):
chem_filename = sa[0]
parse_chem(chem_filename)
pass
if __name__ == "__main__":
main(sys.argv[1:]) | {
"repo_name": "dkdeconti/PAINS-train",
"path": "training_methods/file_manipulation/compress_dsig_file.py",
"copies": "1",
"size": "1404",
"license": "mit",
"hash": 579136021452841900,
"line_mean": 23.224137931,
"line_max": 53,
"alpha_frac": 0.5092592593,
"autogenerated": false,
"ratio": 3.5816326530612246,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9586973417063419,
"avg_score": 0.0007836990595611285,
"num_lines": 58
} |
__author__ = 'ddeconti'
import FileHandler
import numpy
import sys
from bokeh.plotting import figure, output_file, show, VBox, HBox
from rdkit import DataStructs
from sklearn.cluster import DBSCAN
from sklearn.decomposition.pca import PCA
def train_pca(pains_fps, num_components=3):
'''
Dimensional reduction of fps bit vectors to principal components
:param pains_fps:
:return: pca reduced fingerprints bit vectors
'''
np_fps = []
for fp in pains_fps:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
np_fps.append(arr)
pca = PCA(n_components=num_components)
pca.fit(np_fps)
fps_reduced = pca.transform(np_fps)
return fps_reduced
def train_dbscan(pains_fps):
db = DBSCAN(eps=1, min_samples=10).fit(pains_fps)
print db.labels_
pass
def main(sa):
pains_filename = sa[0]
pains_fps = FileHandler.SlnFile(pains_filename).get_fingerprint_list()
reduced_pains_fps = train_pca(pains_fps, num_components=2)
train_dbscan(reduced_pains_fps)
p = figure(x_axis_label="PC1",
y_axis_label="PC2")
if __name__ == "__main__":
main(sys.argv[1:])
| {
"repo_name": "dkdeconti/PAINS-train",
"path": "training_methods/clustering/dbscan_PAINS_pca.py",
"copies": "1",
"size": "1174",
"license": "mit",
"hash": -727271860407655800,
"line_mean": 23.9787234043,
"line_max": 74,
"alpha_frac": 0.6669505963,
"autogenerated": false,
"ratio": 3.1559139784946235,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4322864574794623,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ddeconti'
import FileHandler
import numpy
import sys
from bokeh.plotting import figure, output_file, show, VBox, HBox
from rdkit import DataStructs
from sklearn.cluster import KMeans
from sklearn.decomposition.pca import PCA
from sklearn.metrics import silhouette_score
def train_pca(pains_fps, num_components=3):
'''
Dimensional reduction of fps bit vectors to principal components
:param pains_fps:
:return: pca reduced fingerprints bit vectors
'''
np_fps = []
for fp in pains_fps:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
np_fps.append(arr)
pca = PCA(n_components=num_components)
pca.fit(np_fps)
fps_reduced = pca.transform(np_fps)
return fps_reduced
def train_kmeans(pc_array, n_clusters):
k_means = KMeans(n_clusters=n_clusters)
k_means.fit(pc_array[:,0:2])
return k_means
def plot_k_means(k_means, pc_array):
plot_dict = {}
for i in xrange(len(k_means.labels_)):
kl = k_means.labels_[i]
if kl in plot_dict:
plot_dict[kl]["PC1"].append(pc_array[i][0])
plot_dict[kl]["PC2"].append(pc_array[i][1])
else:
plot_dict[kl] = {}
plot_dict[kl]["PC1"] = [pc_array[i][0]]
plot_dict[kl]["PC2"] = [pc_array[i][1]]
output_file("pca_kmeans_cluster.html")
p = figure(x_axis_label = "PC1",
y_axis_label = "PC2")
color_vector = ["blue", "red", "green", "purple", "orange", "cyan",
"magenta", "yellow", "black"]
for k in sorted(plot_dict.keys()):
p.scatter(plot_dict[k]["PC1"], plot_dict[k]["PC2"],
color = color_vector[k])
return p
def recurr_plots(reduced_pains_fps, current_iter, max_iter=9):
kmeans_predict = train_kmeans(reduced_pains_fps, current_iter)
p = plot_k_means(kmeans_predict, reduced_pains_fps)
if current_iter == max_iter:
return p
else:
current_iter += 1
return VBox(p, recurr_plots(reduced_pains_fps, current_iter))
def kmeans_sil_analysis(reduced_pains_fps):
s = []
for n_clusters in range(2, 21):
kmeans = train_kmeans(reduced_pains_fps, n_clusters)
labels = kmeans.labels_
centroids = kmeans.cluster_centers_
s.append(silhouette_score(reduced_pains_fps[:,:],
labels,
metric="euclidean"))
print s
p = figure(title="Silhouette Scoring of PCA K-means clusters",
x_axis_label="Number of k clusters",
y_axis_label="Silhouette score")
p.line(range(2,21), s)
return p
def plot_intersect(d):
color_vector = ["blue", "red", "green", "purple", "orange", "cyan",
"magenta", "yellow", "black"]
p = figure(x_axis_label = "PC1",
y_axis_label = "PC2")
for k in sorted(d.keys()):
for v in d[k]:
p.scatter(v[0], v[1], color=color_vector[k])
output_file("PCA_with_fda_drugs.html")
show(p)
def main(sa):
pains_filename = sa[0]
fda_filename = sa[1]
#sdf_filename = sa[1]
pains_fps = FileHandler.SlnFile(pains_filename).get_fingerprint_list()
fda_fps = FileHandler.SmilesFile(fda_filename).get_fingerprint_list()
#sdf_fps = FileHandler.SdfFile(sdf_filename).get_fingerprint_list()
#reduced_pains_fps = train_pca(pains_fps+sdf_fps)
reduced_pains_fps = train_pca(pains_fps+fda_fps)
#kmeans_predict = train_kmeans(reduced_pains_fps)
#plot_k_means(kmeans_predict, reduced_pains_fps)
#plots = recurr_plots(reduced_pains_fps, 1)
#sil_plot = kmeans_sil_analysis(reduced_pains_fps)
#show(VBox(plots, sil_plot))
k = train_kmeans(reduced_pains_fps, 8)
reduced_fda_fps = reduced_pains_fps[len(pains_fps):]
fda_dict = {}
for x in reduced_fda_fps[:,0:2]:
key = k.predict(x)[0]
if key in fda_dict:
fda_dict[key] += 1
else:
fda_dict[key] = 1
print fda_dict
color_dict = {}
for fps in reduced_pains_fps[:len(pains_fps)]:
f = list(fps[0:2])
clust = k.predict(f)[0]
if clust in color_dict:
color_dict[clust].append(f)
else:
color_dict[clust] = [f]
color_dict[8] = list([list(x) for x in reduced_fda_fps[:,0:2]])
#plot_intersect(color_dict)
if __name__ == "__main__":
main(sys.argv[1:])
| {
"repo_name": "dkdeconti/PAINS-train",
"path": "training_methods/clustering/kmeans_clustering_of_pca_reduction.py",
"copies": "1",
"size": "4434",
"license": "mit",
"hash": -641481312068564,
"line_mean": 30.6714285714,
"line_max": 74,
"alpha_frac": 0.5906630582,
"autogenerated": false,
"ratio": 3.0558235699517575,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9125134279191489,
"avg_score": 0.0042704697920537245,
"num_lines": 140
} |
__author__ = 'ddeconti'
import FileHandler
import pandas
import random
import sys
from bokeh.palettes import Blues9
from bokeh.charts import HeatMap, output_file, show
from rdkit.Chem.Fingerprints import FingerprintMols
from rdkit.Chem import AllChem, DataStructs, SDMolSupplier, Fingerprints
def randomly_pick_from_sdf(sdf_filename, max_N=4000):
sdf_struct = SDMolSupplier(sdf_filename)
print len(sdf_struct)
sdf_struct = random.sample(sdf_struct, max_N)
try:
mol_list = [m for m in sdf_struct]
except:
sys.stderr.write("Error parsing SDMolSupplier object\n" +
"Error in randomly_pick_from_sdf()\n")
sys.exit()
return filter(lambda x: x != None, mol_list)
def mol_to_fp(mol_list):
fp_list = []
for mol in mol_list:
try:
fp = FingerprintMols.FingerprintMol(mol)
fp_list.append(fp)
except:
continue
fp_list = filter(lambda x: x != None, fp_list)
return fp_list
def make_sim_matrix(fps):
m = []
for i in xrange(len(fps)):
temp = []
for j in xrange(len(fps)):
temp.append(DataStructs.FingerprintSimilarity(fps[i], fps[j]))
m.append(temp)
return m
def plot_heatmap(m):
import matplotlib.pyplot as plt
import numpy as np
m = [np.array(i) for i in m]
m = np.array(m)
fig, ax = plt.subplots()
heatmap = ax.pcolor(m)
plt.savefig("heatmap.png")
def b_heatmap(df):
output_file("heatmap.html")
p = HeatMap(df, palette=Blues9)
show(p)
def matrix_to_pd(m):
d = {}
idx = []
for i in xrange(len(m)):
idx.append(i)
d[i] = m[i]
df = pandas.DataFrame(d, index=idx)
return df
def main(sa):
sln_filename = sa[0]
sdf_filename = sa[1]
sln_mols = FileHandler.SlnFile(sln_filename).get_mol_list()
sdf_mols = randomly_pick_from_sdf(sdf_filename, max_N=400)
sln_fps = mol_to_fp(sln_mols)
sdf_fps = mol_to_fp(sdf_mols)
fps = sln_fps + sdf_fps
sims = make_sim_matrix(fps)
plot_heatmap(sims)
#b_heatmap(matrix_to_pd(sims))
if __name__ == "__main__":
main(sys.argv[1:])
| {
"repo_name": "dkdeconti/PAINS-train",
"path": "training_methods/classifier/heatmap.py",
"copies": "1",
"size": "2166",
"license": "mit",
"hash": -7639049572899019000,
"line_mean": 23.6136363636,
"line_max": 74,
"alpha_frac": 0.6108033241,
"autogenerated": false,
"ratio": 2.911290322580645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4022093646680645,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ddeconti'
import random
import re
import sys
import time
import wikipedia
from bs4 import BeautifulSoup
'''
Scraping Wikipedia for SMILES for drugbank csv from Aqeel.
Deprecated...
'''
def scrape_for_smiles(name):
'''
Scrapes wikipedia for smiles string of given drug name
:param name: name of drug
:return: smiles string
'''
max_wait = 7
time.sleep(max_wait*random.random())
search_results = wikipedia.search(name)
wiki_page = wikipedia.page(search_results[0])
wiki_html = wiki_page.html()
soup = BeautifulSoup(wiki_html, "html.parser")
smiles_tag = soup.find("div",
{"style":
"word-wrap:break-word; text-indent:-1.5em"})
if not smiles_tag:
return None
smiles_tag = str(smiles_tag)
first = re.search("-1.5em", smiles_tag).start()
last = re.search("/div", smiles_tag).start()
smiles = smiles_tag[first+8:last-1]
return smiles
def get_smiles(drug_dict):
'''
Puts smiles value from wikipedia to "smiles" key from drug dict
:param drug_dict: dictionary of drugs with keys for smiles and counts
:return: updated drug_dict with smiles key and value
'''
for drug_name in drug_dict:
smiles = scrape_for_smiles(drug_name)
if not smiles:
continue
drug_dict[drug_name]["smiles"] = smiles
return drug_dict
def print_dict(drug_dict):
'''
Prints drug dict_dict keys and values to tab delimited stdout
:param drug_dict:
:return: None
'''
out_str = ["drug_name", "se_count", "gene_count", "smiles"]
sys.stdout.write('\t'.join(out_str) + "\n")
for drug_name in drug_dict:
se_count = drug_dict[drug_name]["se_count"]
gene_count = drug_dict[drug_name]["gene_count"]
if "smiles" not in drug_dict[drug_name]:
continue
smiles = drug_dict[drug_name]["smiles"]
out_list = [drug_name, se_count, gene_count, smiles]
out_list = map(lambda x: str(x), out_list)
out_str = '\t'.join(out_list)
sys.stdout.write(out_str + '\n')
def parse_csv(filename):
'''
Parse Aqeel csv summary of database with drug names, # of side effects
and gene expression interactions
:param filename: name of csv file
:return: drug as k,v with name and k,v counts
'''
drug_dict = {}
try:
handle = open(filename, 'rU')
except IOError as e:
sys.stderr.write("IOError: " + str(e) +
"\nError in parse_csv()\n")
sys.exit()
handle.readline()
for line in handle:
line = line.strip('\n').split(',')
drug_name = line[1]
try:
gene_count = int(line[2])
se_count = int(line[3])
except ValueError as e:
sys.stderr.write("ValueError: " + str(e) +
"\nError in parse_csv()\n")
sys.exit()
drug_dict[drug_name] = {"se_count": se_count, "gene_count": gene_count}
handle.close()
return drug_dict
def main(sa):
csv_filename = sa[0]
drug_dict = parse_csv(csv_filename)
drug_dict = get_smiles(drug_dict)
print_dict(drug_dict)
if __name__ == "__main__":
main(sys.argv[1:])
| {
"repo_name": "dkdeconti/PAINS-train",
"path": "training_methods/file_manipulation/scrape_wikipedia_for_smiles.py",
"copies": "1",
"size": "3272",
"license": "mit",
"hash": 1241002158242353700,
"line_mean": 28.2142857143,
"line_max": 79,
"alpha_frac": 0.5861858191,
"autogenerated": false,
"ratio": 3.255721393034826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4341907212134826,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ddurando'
"""
POX component - arpnat
The aim of this component is to address the problem of
ARP poisoning in SDN networks
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.packet.ethernet import ethernet, ETHER_BROADCAST
from pox.lib.revent import *
from pox.lib.packet.arp import arp
from pox.lib.addresses import IPAddr, EthAddr
from collections import defaultdict
from pox.openflow.discovery import Discovery
from pox.lib.util import dpid_to_str, str_to_dpid
from pox.lib.recoco import Timer
import time
import threading
import collections
log = core.getLogger()
_flood_delay = 0
class DictTTL:
def __init__(self, timeout):
self.lock = threading.Lock()
self.timeout = timeout
self.container = {}
def add(self, key, value):
if key in self.container:
if self.container[key] != value:
#print "multiple replies with same IP address and different MAC addresses"
return False
else:
#print "multiple replies with same IP address and same MAC address"
return True
with self.lock:
self.container[key] = value
threading.Timer(self.timeout, self.expire_func, args=(key, )).start()
return True
def __len__(self):
with self.lock:
return len(self.container)
def expire_func(self, remove_item):
with self.lock:
val = self.container.pop(remove_item)
#print "-- expired %s" % str(remove_item)
def __contains__(self,val):
with self.lock:
if val in self.container:
return True
else:
return False
def __getitem__(self,val):
with self.lock:
if val in self.container:
return self.container[val]
else:
return False
class ArpNat(object):
def __init__(self):
# self._expire_timer = Timer(5, _handle_expiration, recurring=True)
# self.ip = IPAddr(input("Enter dummy IP Address: "))
# self.mac = EthAddr(input("Enter dummy MAC Address: "))
self.ip = IPAddr("10.0.0.100")
self.mac = EthAddr("00:11:22:33:44:55")
self.safe = EthAddr("00:00:00:00:00:00")
core.addListeners(self, priority=1)
self.hold_down_expired = _flood_delay == 0
def _handle_GoingUpEvent(self, event):
core.openflow.addListeners(self)
log.debug("up...")
def _handle_ConnectionUp(self, event):
fm1 = of.ofp_flow_mod()
fm1.priority -= 0x1000
fm1.match.dl_type = ethernet.ARP_TYPE
fm1.match.nw_src = self.ip
fm1.match.dl_src = self.mac
fm1.match.nw_proto = arp.REQUEST
fm1.actions.append(of.ofp_action_output(port=of.OFPP_FLOOD))
event.connection.send(fm1)
fm2 = of.ofp_flow_mod()
fm2.priority -= 0x1000
fm2.match.dl_type = ethernet.ARP_TYPE
fm2.match.nw_dst = self.ip
fm2.match.dl_dst = self.mac
fm2.match.nw_proto = arp.REPLY
fm2.actions.append(of.ofp_action_output(port=of.OFPP_CONTROLLER))
event.connection.send(fm2)
fm3 = of.ofp_flow_mod()
fm3.priority -= 0x1000
fm3.match.dl_type = ethernet.ARP_TYPE
fm3.match.nw_proto = arp.REQUEST
fm3.actions.append(of.ofp_action_output(port=of.OFPP_CONTROLLER))
event.connection.send(fm3)
fm4 = of.ofp_flow_mod()
fm4.priority -= 0x1000
fm4.match.dl_type = ethernet.ARP_TYPE
fm4.match.nw_proto = arp.REPLY
fm4.actions.append(of.ofp_action_output(port=of.OFPP_NONE))
event.connection.send(fm4)
def _handle_PacketIn(self, event):
dpid = event.dpid
inport = event.port
packet = event.parsed
if not packet.parsed:
log.warning("%s: ignoring unparsed packet", dpid_to_str(dpid))
return
def drop(duration=None):
"""
Drops this packet and optionally installs a flow to continue
dropping similar ones for a while
"""
if duration is not None:
if not isinstance(duration, tuple):
duration = (duration, duration)
msg = of.ofp_flow_mod()
msg.match = of.ofp_match.from_packet(packet)
msg.idle_timeout = duration[0]
msg.hard_timeout = duration[1]
msg.buffer_id = event.ofp.buffer_id
event.connection.send(msg)
elif event.ofp.buffer_id is not None:
msg = of.ofp_packet_out()
msg.buffer_id = event.ofp.buffer_id
msg.in_port = event.port
event.connection.send(msg)
a = packet.find('arp')
if not a:
return
log.debug("%s ARP %s %s => %s", dpid_to_str(dpid),
{arp.REQUEST: "request", arp.REPLY: "reply"}.get(a.opcode,'op:%i' % (a.opcode,)), str(a.protosrc),
str(a.protodst))
if a.opcode == arp.REQUEST:
if packet.payload.hwsrc != self.mac and packet.payload.protosrc != self.ip:
if packet.payload.protodst in arpNat:
arpNat[packet.payload.protodst].append(
[packet.payload.hwsrc, packet.payload.protosrc, dpid, inport])
else:
arpNat[packet.payload.protodst] = [[packet.payload.hwsrc, packet.payload.protosrc, dpid, inport]]
r = arp()
r.hwtype = r.HW_TYPE_ETHERNET
r.prototype = r.PROTO_TYPE_IP
r.hwlen = 6
r.protolen = r.protolen
r.opcode = r.REQUEST
r.hwdst = ETHER_BROADCAST
r.protodst = packet.payload.protodst
r.protosrc = self.ip
r.hwsrc = self.mac
e = ethernet(type=ethernet.ARP_TYPE, src=self.mac, dst=ETHER_BROADCAST)
e.payload = r
msg = of.ofp_packet_out()
msg.data = e.pack()
msg.actions.append(of.ofp_action_output(port=of.OFPP_FLOOD))
# msg.in_port = inport
event.connection.send(msg)
return EventHalt
else:
return
elif a.opcode == arp.REPLY:
if (packet.payload.protosrc in arpNat and arpNat[packet.payload.protosrc]) and (packet.payload.protodst == self.ip) and (packet.payload.hwdst == self.mac):
# flag = False
# count = 0
# for e in arpNat[packet.payload.protosrc]:
# if e[2] == dpid:
# flag = True
# i = count
# count += 1
# if flag:
r = arp()
r.hwtype = r.HW_TYPE_ETHERNET
r.prototype = r.PROTO_TYPE_IP
r.hwlen = 6
r.protolen = r.protolen
r.opcode = r.REPLY
# print arpNat[packet.payload.protosrc]
r.hwdst, r.protodst, outpid, outport = arpNat[packet.payload.protosrc].pop()
r.hwsrc = packet.payload.hwsrc
r.protosrc = packet.payload.protosrc
e = ethernet(type=ethernet.ARP_TYPE, src=self.mac, dst=r.hwdst)
e.set_payload(r)
log.debug("ARPing for %s on behalf of %s" % (r.protodst, r.protosrc))
msg = of.ofp_packet_out(data=e.pack(), action = of.ofp_action_output(port = outport))
# msg.data = e.pack()
# msg.actions.append(of.ofp_action_output(port=outport))
# msg.in_port = inport
core.openflow.sendToDPID(outpid,msg)
# event.connection.send(msg)
# print "outport: "+str(outport) +" dpid:"+ str(dpid)+" outpid:"+str(outpid)
arpttl.add((packet.payload.protosrc, packet.payload.protodst),[packet.payload.hwsrc,outport,outpid, r.protodst, r.hwdst])
return EventHalt
else:
if (packet.payload.protosrc, packet.payload.protodst) in arpttl:
if arpttl[(packet.payload.protosrc, packet.payload.protodst)][0] == packet.payload.hwsrc:
print "multiple replies, but OK"
return EventHalt
else:
# if dpid == arpttl[(packet.payload.protosrc, packet.payload.protodst)][2]:
print "multiple replies for the same IP with different mac addresses"
r = arp()
r.hwtype = r.HW_TYPE_ETHERNET
r.prototype = r.PROTO_TYPE_IP
r.hwlen = 6
r.protolen = r.protolen
r.opcode = r.REPLY
r.hwdst = arpttl[(packet.payload.protosrc, packet.payload.protodst)][4]
r.protodst = arpttl[(packet.payload.protosrc, packet.payload.protodst)][3]
outport = arpttl[(packet.payload.protosrc, packet.payload.protodst)][1]
r.hwsrc = self.safe
r.protosrc = packet.payload.protosrc
e = ethernet(type=ethernet.ARP_TYPE, src=self.safe, dst=r.hwdst)
e.set_payload(r)
log.debug("ARPing for %s on behalf of %s" % (r.protodst, r.protosrc))
msg = of.ofp_packet_out(data=e.pack(), action = of.ofp_action_output(port = outport))
# msg = of.ofp_packet_out()
# msg.data = e.pack()
# msg.actions.append(of.ofp_action_output(port=outport))
# msg.in_port = inport
time.sleep(1)
core.openflow.sendToDPID(arpttl[(packet.payload.protosrc, packet.payload.protodst)][2],msg)
# event.connection.send(msg)
return EventHalt
else:
print "Dropping gratuitous reply"
return EventHalt
def launch():
log.info("arpNat component running")
core.registerNew(ArpNat)
arpNat = {}
arpttl = DictTTL(timeout = 5)
| {
"repo_name": "ddurando/pox.carp",
"path": "ext/mr_arpnat.py",
"copies": "2",
"size": "10608",
"license": "apache-2.0",
"hash": -5639258446158064000,
"line_mean": 38.4349442379,
"line_max": 167,
"alpha_frac": 0.5266779789,
"autogenerated": false,
"ratio": 3.7750889679715303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00431295579193758,
"num_lines": 269
} |
__author__ = 'ddustin'
import certifi
import json
from threading import Thread, Condition
from urllib2 import Request, urlopen, URLError
from datetime import datetime, timedelta
class BtcPrice(Thread):
"""
A class for loading and caching the current Bitcoin exchange price.
There only needs to be one instance of the class running, use BtcPrice.instance() to access it
"""
@staticmethod
def instance():
return BtcPrice.__instance
def __init__(self):
Thread.__init__(self, name="BtcPrice Thread")
self.prices = {}
self.condition = Condition()
self.keepRunning = True
self.loadPriorities = ["loadbitcoinaverage", "loadbitpay", "loadblockchain", "loadbitcoincharts"]
BtcPrice.__instance = self
def closethread(self):
self.condition.acquire()
self.keepRunning = False
self.condition.notify()
self.condition.release()
def get(self, currency, refresh_rates=True):
"""
:param currency: an upper case 3 letter currency code
:return: a floating point number representing the exchange rate from BTC => currency
"""
if refresh_rates:
self.loadPrices()
self.condition.acquire()
try:
last = self.prices[currency]
except Exception:
last = 0
finally:
self.condition.release()
return last
def run(self):
minuteInterval = 15
while self.keepRunning:
self.condition.acquire()
self.loadPrices()
now = datetime.now()
sleepTime = timedelta(minutes=minuteInterval - now.minute % minuteInterval).total_seconds() - now.second
self.condition.wait(sleepTime)
self.condition.release()
BtcPrice.__instance = None
def loadPrices(self):
success = False
for priority in self.loadPriorities:
try:
getattr(self, priority)()
success = True
break
except URLError as e:
print "Error loading " + priority + " url " + str(e)
except (ValueError, KeyError, TypeError) as e:
print "Error reading " + priority + " data" + str(e)
if not success: # pragma: no cover
print "BtcPrice unable to load Bitcoin exchange price"
@staticmethod
def dictForUrl(url):
request = Request(url)
result = urlopen(request, cafile=certifi.where(), timeout=5).read()
return json.loads(result)
def loadbitcoinaverage(self):
for currency, info in self.dictForUrl('https://api.bitcoinaverage.com/ticker/global/all').iteritems():
if currency != "timestamp":
self.prices[currency] = info["last"]
def loadbitpay(self):
for currency in self.dictForUrl('https://bitpay.com/api/rates'):
self.prices[currency["code"]] = currency["rate"]
def loadblockchain(self):
for currency, info in self.dictForUrl('https://blockchain.info/ticker').iteritems():
self.prices[currency] = info["last"]
def loadbitcoincharts(self):
for currency, info in self.dictForUrl('https://api.bitcoincharts.com/v1/weighted_prices.json').iteritems():
if currency != "timestamp":
self.prices[currency] = info["24h"]
| {
"repo_name": "cpacia/OpenBazaar-Server",
"path": "market/btcprice.py",
"copies": "2",
"size": "3402",
"license": "mit",
"hash": -2331751910465916000,
"line_mean": 31.4,
"line_max": 116,
"alpha_frac": 0.6058201058,
"autogenerated": false,
"ratio": 4.412451361867705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006401624373374956,
"num_lines": 105
} |
__author__ = 'ddustin'
import json
from threading import Thread, Condition
from urllib2 import Request, urlopen, URLError
from datetime import datetime, timedelta
class BtcPrice(Thread):
"""
A class for loading and caching the current Bitcoin exchange price.
There only needs to be one instance of the class running, use BtcPrice.instance() to access it
"""
@staticmethod
def instance():
return BtcPrice.__instance
def __init__(self):
Thread.__init__(self, name="BtcPrice Thread")
self.prices = {}
self.condition = Condition()
self.keepRunning = True
self.loadPriorities = ["loadbitcoinaverage", "loadbitpay", "loadblockchain", "loadbitcoincharts"]
BtcPrice.__instance = self
def closethread(self):
self.condition.acquire()
self.keepRunning = False
self.condition.notify()
self.condition.release()
def get(self, currency, refresh_rates=True):
"""
:param currency: an upper case 3 letter currency code
:return: a floating point number representing the exchange rate from BTC => currency
"""
if refresh_rates:
self.loadPrices()
self.condition.acquire()
try:
last = self.prices[currency]
except Exception:
last = 0
finally:
self.condition.release()
return last
def run(self):
minuteInterval = 15
while self.keepRunning:
self.condition.acquire()
self.loadPrices()
now = datetime.now()
sleepTime = timedelta(minutes=minuteInterval - now.minute % minuteInterval).total_seconds() - now.second
self.condition.wait(sleepTime)
self.condition.release()
BtcPrice.__instance = None
def loadPrices(self):
success = False
for priority in self.loadPriorities:
try:
getattr(self, priority)()
success = True
break
except URLError as e:
print "Error loading " + priority + " url " + str(e)
except (ValueError, KeyError, TypeError) as e:
print "Error reading " + priority + " data" + str(e)
if not success: # pragma: no cover
print "BtcPrice unable to load Bitcoin exchange price"
@staticmethod
def dictForUrl(url):
request = Request(url)
result = urlopen(request, timeout=5).read()
return json.loads(result)
def loadbitcoinaverage(self):
for currency, info in self.dictForUrl('https://api.bitcoinaverage.com/ticker/global/all').iteritems():
if currency != "timestamp":
self.prices[currency] = info["last"]
def loadbitpay(self):
for currency in self.dictForUrl('https://bitpay.com/api/rates'):
self.prices[currency["code"]] = currency["rate"]
def loadblockchain(self):
for currency, info in self.dictForUrl('https://blockchain.info/ticker').iteritems():
self.prices[currency] = info["last"]
def loadbitcoincharts(self):
for currency, info in self.dictForUrl('https://api.bitcoincharts.com/v1/weighted_prices.json').iteritems():
if currency != "timestamp":
self.prices[currency] = info["24h"]
| {
"repo_name": "tomgalloway/OpenBazaar-Server",
"path": "market/btcprice.py",
"copies": "2",
"size": "3363",
"license": "mit",
"hash": -2898122283335376400,
"line_mean": 31.3365384615,
"line_max": 116,
"alpha_frac": 0.6036277134,
"autogenerated": false,
"ratio": 4.430830039525691,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6034457752925692,
"avg_score": null,
"num_lines": null
} |
__author__ = 'ddustin'
import time
from twisted.trial import unittest
from market.btcprice import BtcPrice
class MarketProtocolTest(unittest.TestCase):
def test_BtcPrice(self):
btcPrice = BtcPrice()
btcPrice.start()
time.sleep(0.01)
rate = BtcPrice.instance().get("USD")
self.assertGreater(rate, 0)
btcPrice.closethread()
btcPrice.join()
def test_BtcPrice_loadbitcoinaverage(self):
btcPrice = BtcPrice()
btcPrice.loadPriorities = ["loadbitcoinaverage"]
btcPrice.start()
time.sleep(0.01)
rate = btcPrice.get("USD")
self.assertGreaterEqual(rate, 0)
btcPrice.closethread()
btcPrice.join()
def test_BtcPrice_loadbitpay(self):
btcPrice = BtcPrice()
btcPrice.loadPriorities = ["loadbitpay"]
btcPrice.start()
time.sleep(0.01)
rate = btcPrice.get("USD")
self.assertGreaterEqual(rate, 0)
btcPrice.closethread()
btcPrice.join()
def test_BtcPrice_loadblockchain(self):
btcPrice = BtcPrice()
btcPrice.loadPriorities = ["loadblockchain"]
btcPrice.start()
time.sleep(0.01)
rate = btcPrice.get("USD")
self.assertGreaterEqual(rate, 0)
btcPrice.closethread()
btcPrice.join()
def test_BtcPrice_loadbitcoincharts(self):
btcPrice = BtcPrice()
btcPrice.loadPriorities = ["loadbitcoincharts"]
btcPrice.start()
time.sleep(0.01)
rate = btcPrice.get("USD")
self.assertGreaterEqual(rate, 0)
btcPrice.closethread()
btcPrice.join()
| {
"repo_name": "saltduck/OpenBazaar-Server",
"path": "market/tests/test_btcprice.py",
"copies": "6",
"size": "1644",
"license": "mit",
"hash": 1016538267848418700,
"line_mean": 27.8421052632,
"line_max": 56,
"alpha_frac": 0.6161800487,
"autogenerated": false,
"ratio": 3.4465408805031448,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7062720929203145,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Deathnerd'
import os
class Base():
"""Base Config"""
# General App
ENV = os.environ['CODENINJA_SERVER_ENV']
SECRET_KEY = os.environ['CODENINJA_SECRET_KEY']
APP_DIR = os.path.abspath(os.path.dirname(__file__))
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
# SQLAlchemy
SQLALCHEMY_DATABASE_URI = "mysql://{}:{}@{}/{}?charset=utf8".format(os.environ['CODENINJA_DATABASE_USER'],
os.environ['CODENINJA_DATABASE_PASS'],
os.environ['CODENINJA_DATABASE_HOST'],
os.environ['CODENINJA_DATABASE_NAME'])
# Bcrypt
BCRYPT_LOG_ROUNDS = 13
# Debug Toolbar
DEBUG_TB_ENABLED = False
DEBUG_TB_INTERCEPT_REDIRECTS = False
# Flask-Assets
ASSETS_DEBUG = False
# WTForms
WTF_CSRF_ENABLED = True
WTF_CSRF_SECRET_KEY = os.environ['CODENINJA_WTF_CSRF_SECRET_KEY']
RECAPTCHA_PUBLIC_KEY = os.environ['CODENINJA_RECAPTCHA_PUBLIC_KEY']
RECAPTCHA_PRIVATE_KEY = os.environ['CODENINJA_RECAPTCHA_PRIVATE_KEY']
class Production(Base):
"""Production Config"""
DEBUG = False
DEBUG_TB_ENABLED = False
class Development(Base):
"""Development Config"""
# General App
DEBUG = True
# SQLAlchemy
# Debug Toolbar
DEBUG_TB_ENABLED = True
DEBUG_TB_PROFILER_ENABLED = True
DEBUG_TB_INTERCEPT_REDIRECTS = True
DEBUG_TB_TEMPLATE_EDITOR_ENABLED = True
# Flask-Assets
ASSETS_DEBUG = True
# WTF-Forms
class Staging(Base):
"""Staging Config"""
# General App
TESTING = True
DEBUG = True
# Bcrypt
BCRYPT_LOG_ROUNDS = 1
# WTForms | {
"repo_name": "Deathnerd/iamacodeninja",
"path": "codeninja/settings.py",
"copies": "1",
"size": "1527",
"license": "mit",
"hash": -1301975894223493400,
"line_mean": 20.8285714286,
"line_max": 107,
"alpha_frac": 0.6810740013,
"autogenerated": false,
"ratio": 2.7464028776978417,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39274768789978415,
"avg_score": null,
"num_lines": null
} |
__author__ = 'deathowl'
from datetime import datetime, timedelta
from notification.tasks import send_notifications
from openduty.escalation_helper import get_escalation_for_service
from django.utils import timezone
from notification.models import ScheduledNotification
from django.conf import settings
class NotificationHelper(object):
@staticmethod
def notify_incident(incident):
notifications = NotificationHelper.generate_notifications_for_incident(incident)
for notification in notifications:
notification.save()
send_notifications.apply_async((notification.id,) ,eta=notification.send_at)
@staticmethod
def notify_user_about_incident(incident, user, delay=None, preparedmsg = None):
notifications = NotificationHelper.generate_notifications_for_user(incident, user, delay, preparedmsg)
for notification in notifications:
notification.save()
send_notifications.apply_async((notification.id,) ,eta=notification.send_at)
@staticmethod
def generate_notifications_for_incident(incident):
now = timezone.make_aware(datetime.now(), timezone.get_current_timezone())
duty_officers = get_escalation_for_service(incident.service_key)
current_time = now
notifications = []
for officer_index, duty_officer in enumerate(duty_officers):
escalation_time = incident.service_key.escalate_after * (officer_index + 1)
escalate_at = current_time + timedelta(minutes=escalation_time)
methods = duty_officer.notification_methods.order_by('position').all()
method_index = 0
for method in methods:
notification_time = incident.service_key.retry * method_index + incident.service_key.escalate_after * officer_index
notify_at = current_time + timedelta(minutes=notification_time)
if notify_at < escalate_at:
notification = ScheduledNotification()
notification.incident = incident
notification.user_to_notify = duty_officer
notification.notifier = method.method
notification.send_at = notify_at
uri = settings.BASE_URL + "/incidents/details/" + str(incident.id)
notification.message = "A Service is experiencing a problem: " + incident.incident_key + " " + incident.description + ". Handle at: " + uri + " Details: " + incident.details
notifications.append(notification)
print "Notify %s at %s with method: %s" % (duty_officer.username, notify_at, notification.notifier)
else:
break
method_index += 1
# todo: error handling
return notifications
@staticmethod
def generate_notifications_for_user(incident, user, delay=None, preparedmsg = None):
now = timezone.make_aware(datetime.now(), timezone.get_current_timezone())
current_time = now
notifications = []
methods = user.notification_methods.order_by('position').all()
method_index = 0
for method in methods:
if delay is None:
notification_time = incident.service_key.retry * method_index + incident.service_key.escalate_after
else:
notification_time = method_index * delay
notify_at = current_time + timedelta(minutes=notification_time)
notification = ScheduledNotification()
notification.incident = incident
notification.user_to_notify = user
notification.notifier = method.method
notification.send_at = notify_at
if preparedmsg is None:
uri = settings.BASE_URL + "/incidents/details/" + str(incident.id)
notification.message = "A Service is experiencing a problem: " + incident.incident_key + " " + incident.description + ". Handle at: " + uri
else:
notification.message = preparedmsg
notifications.append(notification)
print "Notify %s at %s with method: %s" % (user.username, notify_at, notification.notifier)
method_index += 1
# todo: error handling
return notifications
| {
"repo_name": "ustream/openduty",
"path": "notification/helper.py",
"copies": "1",
"size": "4337",
"license": "mit",
"hash": 340565767379853950,
"line_mean": 43.7113402062,
"line_max": 193,
"alpha_frac": 0.63707632,
"autogenerated": false,
"ratio": 4.4757481940144475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0033958672363644028,
"num_lines": 97
} |
__author__ = 'deathowl'
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from .models import Incident, SchedulePolicy, SchedulePolicyRule
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email', 'groups')
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ('name',)
class IncidentSerializer(serializers.ModelSerializer):
class Meta:
model = Incident
fields = ('incident_key', 'service_key', 'event_type', 'description', 'details')
class SchedulePolicySerializer(serializers.HyperlinkedModelSerializer):
rules = serializers.RelatedField(many=True, read_only=True)
class Meta:
model = SchedulePolicy
fields = ('name', 'repeat_times', 'rules')
class SchedulePolicyRuleSerializer(serializers.HyperlinkedModelSerializer):
rules = serializers.RelatedField(many=True, read_only=True)
class Meta:
model = SchedulePolicyRule
fields = ('schedule_policy', 'position', 'user_id', 'schedule', 'escalate_after')
class NoneSerializer(serializers.Serializer):
class Meta:
fields = ()
class OpsWeeklySerializer(serializers.Serializer):
occurred_at = serializers.DateTimeField()
output = serializers.CharField(max_length=2000)
incindent_key = serializers.CharField(max_length=200)
class OnCallSerializer(serializers.Serializer):
person = serializers.CharField()
email = serializers.EmailField()
start = serializers.DateTimeField()
end = serializers.DateTimeField()
| {
"repo_name": "ustream/openduty",
"path": "openduty/serializers.py",
"copies": "1",
"size": "1671",
"license": "mit",
"hash": 6162830010275109000,
"line_mean": 27.8103448276,
"line_max": 89,
"alpha_frac": 0.7175344105,
"autogenerated": false,
"ratio": 4.34025974025974,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.017985868956907312,
"num_lines": 58
} |
__author__ = 'deathowl'
from django.http import HttpResponseRedirect
from django.template.response import TemplateResponse
from django.contrib.auth.decorators import login_required
from .models import Calendar, User, SchedulePolicy, SchedulePolicyRule
from django.http import Http404
from django.views.decorators.http import require_http_methods
from django.db import IntegrityError
from django.core.urlresolvers import reverse
from django.contrib import messages
@login_required()
def list(request):
policies = SchedulePolicy.objects.all()
return TemplateResponse(request, 'escalation/list.html', {'policies': policies})
@login_required()
def delete(request, id):
try:
policy = SchedulePolicy.objects.get(id=id)
policy.delete()
return HttpResponseRedirect('/policies/')
except Calendar.DoesNotExist:
raise Http404
@login_required()
def new(request):
try:
users = User.objects.all()
except User.DoesNotExist:
users = None
try:
calendars = Calendar.objects.all()
except Calendar.DoesNotExist:
calendars = None
return TemplateResponse(request, 'escalation/edit.html', {'calendars': calendars, 'users': users})
@login_required()
def edit(request, id):
try:
policy = SchedulePolicy.objects.get(id=id)
try:
elements = SchedulePolicyRule.objects.filter(schedule_policy = policy).order_by('position')
except SchedulePolicyRule.DoesNotExist:
elements = None
try:
calendars = Calendar.objects.all()
except Calendar.DoesNotExist:
calendars = None
try:
users = User.objects.all()
except User.DoesNotExist:
users = None
return TemplateResponse(request, 'escalation/edit.html', {'item': policy, 'elements': elements,
'calendars': calendars, 'users': users})
except Calendar.DoesNotExist:
raise Http404
@login_required()
@require_http_methods(["POST"])
def save(request):
try:
policy = SchedulePolicy.objects.get(id=request.POST['id'])
except SchedulePolicy.DoesNotExist:
policy = SchedulePolicy()
policy.name = request.POST['name']
policy.repeat_times = request.POST['repeat']
try:
policy.save()
except IntegrityError:
messages.error(request, 'Schedule already exists')
if int(request.POST['id']) > 0:
return HttpResponseRedirect(reverse('openduty.escalation.edit', None, [str(request.POST['id'])]))
else:
return HttpResponseRedirect(reverse('openduty.escalation.new'))
elements = request.POST.getlist('escalate_to[]')
try:
SchedulePolicyRule.objects.filter(schedule_policy=policy).delete()
except SchedulePolicyRule.DoesNotExist:
pass # Nothing to clear
for idx,item in enumerate(elements):
rule = SchedulePolicyRule()
rule.schedule_policy = policy
parts = item.split("|")
rule.escalate_after = 0 # HACK!
rule.position = idx + 1
if parts[0] == "user":
rule.user_id = User.objects.get(id=parts[1])
rule.schedule = None
if parts[0] == "calendar":
rule.schedule = Calendar.objects.get(id=parts[1])
rule.user_id = None
try:
rule.save()
except IntegrityError:
return HttpResponseRedirect(reverse('openduty.escalation.edit', None, [str(request.POST['id'])]))
return HttpResponseRedirect('/policies/')
| {
"repo_name": "ustream/openduty",
"path": "openduty/escalation.py",
"copies": "1",
"size": "3593",
"license": "mit",
"hash": -5912935396754181000,
"line_mean": 32.8962264151,
"line_max": 109,
"alpha_frac": 0.6462566101,
"autogenerated": false,
"ratio": 4.302994011976048,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5449250622076047,
"avg_score": null,
"num_lines": null
} |
__author__ = 'deathowl'
from django.template.response import TemplateResponse
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.contrib import messages
from django.conf import settings
from .models import EventLog, Service
@login_required()
def list(request):
filtered_actions = ('notified', 'notification_failed')
services = Service.objects.filter()
events = EventLog.objects.exclude(action__in=filtered_actions).order_by('-occurred_at')
page = request.GET.get('page')
p = Paginator(events, settings.PAGINATION_DEFAULT_PAGINATION)
try:
paginated = p.page(page)
except PageNotAnInteger:
paginated = p.page(1)
except EmptyPage:
paginated = p.page(p.num_pages)
return TemplateResponse(request, 'eventlog/list.html', {'services': services, 'events': paginated})
@login_required()
def get(request, id):
services = Service.objects.all()
page = request.GET.get('page')
events = []
actualService = None
try:
actualService = Service.objects.get(id = id)
events = EventLog.objects.filter(service_key = actualService).order_by('-occurred_at')
except Service.DoesNotExist:
messages.error(request, "No such service!")
p = Paginator(events, settings.PAGINATION_DEFAULT_PAGINATION)
try:
paginated = p.page(page)
except PageNotAnInteger:
paginated = p.page(1)
except EmptyPage:
paginated = p.page(p.num_pages)
return TemplateResponse(request, 'eventlog/list.html', {'services': services, 'events' : paginated,
'actual' : actualService}) | {
"repo_name": "ustream/openduty",
"path": "openduty/event_log.py",
"copies": "1",
"size": "1734",
"license": "mit",
"hash": -5969423230256968000,
"line_mean": 34.4081632653,
"line_max": 103,
"alpha_frac": 0.6770472895,
"autogenerated": false,
"ratio": 3.827814569536424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5004861859036424,
"avg_score": null,
"num_lines": null
} |
__author__ = 'deathowl'
from .models import User, SchedulePolicyRule, Service
from datetime import datetime, timedelta
from django.utils import timezone
from schedule.periods import Day
from datetime import timedelta
def get_current_events_users(calendar):
now = timezone.make_aware(datetime.now(), timezone.get_current_timezone())
result = []
day = Day(calendar.events.all(), now)
for o in day.get_occurrences():
if o.start <= now <= o.end:
usernames = o.event.title.split(',')
for username in usernames:
result.append(User.objects.get(username=username.strip()))
return result
def get_events_users_inbetween(calendar, since, until):
delta = until - since
result = {}
for i in range(delta.days + 1):
that_day = since + timedelta(days=i)
that_day = timezone.make_aware(that_day, timezone.get_current_timezone())
day = Day(calendar.events.all(), that_day)
for o in day.get_occurrences():
if o.start <= that_day <= o.end:
usernames = o.event.title.split(',')
for username in usernames:
if username not in result.keys():
user_instance = User.objects.get(username=username.strip())
result[username] = {"start": o.start, "person": username.strip(), "end": o.end,
"email": user_instance.email}
else:
result[username]["end"] = o.end
return result.values()
def get_escalation_for_service(service):
result = []
if service.notifications_disabled:
return result
rules = SchedulePolicyRule.getRulesForService(service)
for item in rules:
if item.schedule:
result += get_current_events_users(item.schedule)
if item.user_id:
result.append(item.user_id)
#TODO: This isnt de-deuped, is that right?
return result
def services_where_user_is_on_call(user):
from django.db.models import Q
services = Service.objects.filter(
Q(policy__rules__user_id=user) | Q(policy__rules__schedule__event__title__icontains=user)
)
return services
| {
"repo_name": "ustream/openduty",
"path": "openduty/escalation_helper.py",
"copies": "1",
"size": "2229",
"license": "mit",
"hash": 3584086175892779000,
"line_mean": 37.4310344828,
"line_max": 103,
"alpha_frac": 0.6083445491,
"autogenerated": false,
"ratio": 3.9803571428571427,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0022734933192304586,
"num_lines": 58
} |
__author__ = 'deathowl'
from openduty import escalation_helper
from urllib import quote
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.template.response import TemplateResponse
from django.contrib.auth.decorators import login_required
from django.utils.datetime_safe import datetime
from django.utils import timezone
from schedule.models import Calendar
from schedule.utils import coerce_date_dict
from schedule.periods import weekday_names
from django.http import Http404
from django.views.decorators.http import require_http_methods
from django.db import IntegrityError
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.shortcuts import get_object_or_404
import pytz
@login_required()
def list(request):
schedules = Calendar.objects.all()
return TemplateResponse(request, 'schedule/list.html', {'schedules': schedules})
@login_required()
def delete(request, calendar_slug):
try:
sched = get_object_or_404(Calendar, slug=calendar_slug)
sched.delete()
return HttpResponseRedirect('/schedules/');
except Calendar.DoesNotExist:
raise Http404
@login_required()
def new(request):
try:
return TemplateResponse(request, 'schedule/edit.html', {})
except Calendar.DoesNotExist:
raise Http404
@login_required()
def details(request, calendar_slug, periods=None):
try:
sched = get_object_or_404(Calendar, slug=calendar_slug)
date = coerce_date_dict(request.GET)
if date:
try:
date = datetime(**date)
except ValueError:
raise Http404
else:
date = timezone.now()
event_list = sched.event_set.all()
currently_oncall_users = escalation_helper.get_current_events_users(sched)
if len(currently_oncall_users) >= 2:
oncall1 = "%s, Phone Number:%s" % (currently_oncall_users[0].username,
currently_oncall_users[0].profile.phone_number)
oncall2 = "%s, Phone Number:%s" % (currently_oncall_users[1].username,
currently_oncall_users[1].profile.phone_number)
else:
oncall1 = "Nobody"
oncall2 = "Nobody"
if 'django_timezone' in request.session:
local_timezone = pytz.timezone(request.session['django_timezone'])
else:
local_timezone = timezone.get_default_timezone()
period_objects = {}
for period in periods:
if period.__name__.lower() == 'year':
period_objects[period.__name__.lower()] = period(event_list, date, None, local_timezone)
else:
period_objects[period.__name__.lower()] = period(event_list, date, None, None, local_timezone)
return render_to_response('schedule/detail.html',
{
'date': date,
'periods': period_objects,
'calendar': sched,
'weekday_names': weekday_names,
'currently_oncall_1' : oncall1,
'currently_oncall_2' : oncall2,
'local_timezone': local_timezone,
'current_date': timezone.now(),
'here':quote(request.get_full_path()),
},context_instance=RequestContext(request),
)
except Calendar.DoesNotExist:
raise Http404
@login_required()
def edit(request, calendar_slug):
try:
sched = get_object_or_404(Calendar, slug=calendar_slug)
return TemplateResponse(request, 'schedule/edit.html', {'item': sched, 'edit': True})
except Calendar.DoesNotExist:
raise Http404
@login_required()
@require_http_methods(["POST"])
def save(request):
try:
sched = Calendar.objects.get(slug=request.POST['slug'])
except Calendar.DoesNotExist:
sched = Calendar()
sched.name = request.POST['name']
sched.slug = request.POST['slug']
try:
sched.save()
return HttpResponseRedirect('/schedules/');
except IntegrityError:
messages.error(request, 'Schedule already exists')
if request.POST['slug']:
return HttpResponseRedirect(reverse('openduty.schedules.edit', None, [request.POST['slug']]))
else:
return HttpResponseRedirect(reverse('openduty.schedules.new'))
| {
"repo_name": "ustream/openduty",
"path": "openduty/schedules.py",
"copies": "1",
"size": "4463",
"license": "mit",
"hash": 8142238887594357000,
"line_mean": 36.1916666667,
"line_max": 110,
"alpha_frac": 0.6381357831,
"autogenerated": false,
"ratio": 4.214353163361662,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5352488946461662,
"avg_score": null,
"num_lines": null
} |
__author__ = 'deathowl'
from time import sleep, time
import datetime
from openduty.serializers import NoneSerializer
from openduty.models import Incident
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from .celery import add
from random import randint
class HealthCheckViewSet(viewsets.ModelViewSet):
queryset = Incident.objects.all()
serializer_class = NoneSerializer
def list(self, request):
try:
firstincident = Incident.objects.first()
except Exception:
return Response("FAILED", status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response("OK", status=status.HTTP_200_OK)
class CeleryHealthCheckViewSet(viewsets.ModelViewSet):
queryset = Incident.objects.all()
serializer_class = NoneSerializer
def list(self, request):
try:
timestamp = int(time())
random = randint(0, 100000)
result = add.apply_async(args=[timestamp, random])
now = datetime.datetime.now()
while (now + datetime.timedelta(seconds=10)) > datetime.datetime.now():
if result.result == timestamp + random:
return Response("OK", status=status.HTTP_200_OK)
sleep(0.5)
except IOError:
pass
return Response("FAILED", status=status.HTTP_500_INTERNAL_SERVER_ERROR)
| {
"repo_name": "ustream/openduty",
"path": "openduty/healthcheck.py",
"copies": "1",
"size": "1429",
"license": "mit",
"hash": 4214993582234910000,
"line_mean": 32.2325581395,
"line_max": 83,
"alpha_frac": 0.6655003499,
"autogenerated": false,
"ratio": 4.317220543806647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5482720893706646,
"avg_score": null,
"num_lines": null
} |
__author__ = 'deathowl'
import logging
from sleekxmpp import ClientXMPP
from sleekxmpp.xmlstream import resolver, cert
import ssl
class SendClient(ClientXMPP):
def verify_gtalk_cert(self, raw_cert):
hosts = resolver.get_SRV(self.boundjid.server, 5222,
self.dns_service,
resolver=resolver.default_resolver())
it_is_google = False
for host, _ in hosts:
if host.lower().find('google.com') > -1:
it_is_google = True
if it_is_google:
try:
if cert.verify('talk.google.com', ssl.PEM_cert_to_DER_cert(raw_cert)):
logging.info('google cert found for %s',
self.boundjid.server)
return
except cert.CertificateError:
pass
logging.error("invalid cert received for %s",
self.boundjid.server)
def __init__(self, jid, password, recipient, msg):
super(SendClient, self).__init__(jid, password)
self.recipient = recipient
self.msg = msg
self.add_event_handler('session_start', self.start)
self.add_event_handler("ssl_invalid_cert", self.ssl_invalid_cert)
def start(self, event):
self.send_presence()
self.get_roster()
self.send_message(mto=self.recipient, mbody=self.msg, mtype='chat')
self.disconnect(wait=True)
def ssl_invalid_cert(self, raw_cert):
self.verify_gtalk_cert(raw_cert) | {
"repo_name": "ustream/openduty",
"path": "notification/notifier/xmppclient.py",
"copies": "1",
"size": "1555",
"license": "mit",
"hash": 77150815012363540,
"line_mean": 32.1063829787,
"line_max": 86,
"alpha_frac": 0.5665594855,
"autogenerated": false,
"ratio": 3.926767676767677,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9983928640488378,
"avg_score": 0.0018797043558598952,
"num_lines": 47
} |
__author__ = 'deathowl'
import uuid
import hmac
from hashlib import sha1
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from django.contrib.auth.models import User
from uuidfield import UUIDField
from django.core.exceptions import ValidationError
from schedule.models import Calendar
from django.contrib.auth import models as auth_models
from django.db.models import signals
from django.conf import settings
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
@python_2_unicode_compatible
class Token(models.Model):
"""
The default authorization token model.
"""
key = models.CharField(max_length=40, primary_key=True)
created = models.DateTimeField(auto_now_add=True)
def save(self, *args, **kwargs):
if not self.key:
self.key = self.generate_key()
return super(Token, self).save(*args, **kwargs)
def generate_key(self):
unique = uuid.uuid4()
return hmac.new(unique.bytes, digestmod=sha1).hexdigest()
def __unicode__(self):
return self.key
def __str__(self):
return self.key
@python_2_unicode_compatible
class SchedulePolicy(models.Model):
"""
Schedule policy
"""
name = models.CharField(max_length=80, unique=True)
repeat_times = models.IntegerField()
class Meta:
verbose_name = _('schedule_policy')
verbose_name_plural = _('schedule_policies')
def __str__(self):
return self.name
def natural_key(self):
return (self.name)
@python_2_unicode_compatible
class Service(models.Model):
"""
Incidents are representations of a malfunction in the system.
"""
name = models.CharField(max_length=80, unique=True)
id = UUIDField(primary_key=True, auto=True)
retry = models.IntegerField(blank=True, null=True)
policy = models.ForeignKey(SchedulePolicy, blank=True, null=True)
escalate_after = models.IntegerField(blank=True, null=True)
notifications_disabled = models.BooleanField(default=False)
class Meta:
verbose_name = _('service')
verbose_name_plural = _('service')
def __str__(self):
return self.name
def natural_key(self):
return (self.id)
@python_2_unicode_compatible
class EventLog(models.Model):
"""
Event Log
"""
ACTIONS = (('acknowledge', 'acknowledge'),
('resolve', 'resolve'),
('silence_service', 'silence service'),
('unsilence_service', 'unsilence service'),
('silence_incident', 'silence incident'),
('unsilence_incident', 'unsilence incident'),
('forward', 'forward'),
('log', 'log'),
('notified','notified'),
('notification_failed', 'notification failed'),
('trigger', 'trigger'))
@property
def color(self):
colort_dict = {'acknowledge': 'warning',
'resolve': 'success',
'silence_service': 'active',
'unsilence_service': 'active',
'silence_incident': 'active',
'unsilence_incident': 'active',
'forward': 'info',
'trigger': 'trigger',
'notified': 'success',
'notification_failed': 'danger',
'log': ''}
return colort_dict[self.action]
user = models.ForeignKey(User, blank=True, default=None, null=True, related_name='users')
incident_key = models.ForeignKey('Incident', blank=True, null=True)
action = models.CharField(choices=ACTIONS, default='log', max_length="100")
service_key = models.ForeignKey(Service)
data = models.TextField()
occurred_at = models.DateTimeField()
class Meta:
verbose_name = _('eventlog')
verbose_name_plural = _('eventlog')
def __str__(self):
return self.data
def natural_key(self):
return (self.service_key, self.id)
@python_2_unicode_compatible
class Incident(models.Model):
TRIGGER = "trigger"
RESOLVE = "resolve"
ACKNOWLEDGE = "acknowledge"
"""
Incidents are representations of a malfunction in the system.
"""
service_key = models.ForeignKey(Service)
incident_key = models.CharField(max_length=200)
event_type = models.CharField(max_length=15)
description = models.CharField(max_length=200)
details = models.TextField()
occurred_at = models.DateTimeField()
@property
def color(self):
colort_dict = {'acknowledge': 'warning',
'resolve': 'success',
'silence_service': 'active',
'silence_incident': 'active',
'forward': 'info',
'trigger': 'trigger',
'log': ''}
return colort_dict[self.event_type]
class Meta:
verbose_name = _('incidents')
verbose_name_plural = _('incidents')
unique_together = (("service_key", "incident_key"),)
def __str__(self):
return self.incident_key
def natural_key(self):
return (self.service_key, self.incident_key)
def clean(self):
if self.event_type not in ['trigger', 'acknowledge', 'resolve']:
raise ValidationError("'%s' is an invalid event type, valid values are 'trigger', 'acknowledge' and 'resolve'" % self.event_type)
@python_2_unicode_compatible
class ServiceTokens(models.Model):
"""
Service tokens
"""
name = models.CharField(max_length=80)
service_id = models.ForeignKey(Service)
token_id = models.ForeignKey(Token)
class Meta:
verbose_name = _('service_tokens')
verbose_name_plural = _('service_tokens')
def __str__(self):
return self.name
@python_2_unicode_compatible
class SchedulePolicyRule(models.Model):
"""
Schedule rule
"""
schedule_policy = models.ForeignKey(SchedulePolicy, related_name='rules')
position = models.IntegerField()
user_id = models.ForeignKey(User, blank=True, null=True)
schedule = models.ForeignKey(Calendar, blank=True, null=True)
escalate_after = models.IntegerField()
class Meta:
verbose_name = _('schedule_policy_rule')
verbose_name_plural = _('schedule_policy_rules')
def __str__(self):
return str(self.id)
@classmethod
def getRulesForService(cls, service):
return cls.objects.filter(schedule_policy=service.policy)
class UserProfile(models.Model):
user = models.OneToOneField('auth.User', related_name='profile')
phone_number = models.CharField(max_length=50)
pushover_user_key = models.CharField(max_length=50)
pushover_app_key = models.CharField(max_length=50)
slack_room_name = models.CharField(max_length=50)
prowl_api_key = models.CharField(max_length=50, blank=True)
prowl_application = models.CharField(max_length=256, blank=True)
prowl_url = models.CharField(max_length=512, blank=True)
rocket_webhook_url = models.CharField(max_length=512, blank=True)
class ServiceSilenced(models.Model):
service = models.ForeignKey(Service)
silenced = models.BooleanField(default=False)
silenced_until = models.DateTimeField()
class IncidentSilenced(models.Model):
incident = models.ForeignKey(Incident)
silenced = models.BooleanField(default=False)
silenced_until = models.DateTimeField()
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
signals.post_save.connect(create_user_profile, sender=User)
signals.post_syncdb.disconnect(
sender=auth_models,
dispatch_uid='django.contrib.auth.management.create_superuser')
| {
"repo_name": "ustream/openduty",
"path": "openduty/models.py",
"copies": "1",
"size": "7875",
"license": "mit",
"hash": 8096037636218187000,
"line_mean": 31.012195122,
"line_max": 141,
"alpha_frac": 0.6322539683,
"autogenerated": false,
"ratio": 3.9493480441323974,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5081602012432398,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Debanjan Mahata'
from twython import TwythonStreamer
from TwitterAuthentication import keyList
from time import sleep
from random import randint
import sys,codecs
# from pymongo import MongoClient
# #connecting to MongoDB database
# mongoObj = MongoClient()
# #setting the MongoDB database
# db = mongoObj[]
# #setting the collection in the database for storing the Tweets
# collection = db[]
sys.stdout = codecs.lookup('iso8859-1')[-1](sys.stdout)
class MyStreamer(TwythonStreamer):
def on_success(self, data):
if data["lang"] == "en":
print data["text"]
# Want to disconnect after the first result?
# self.disconnect()
def on_error(self, status_code, data):
sleep(randint(1,60))
keys = keyList[randint(0,10)]
stream = MyStreamer(keys["APP_KEY"],keys["APP_SECRET"],keys["OAUTH_TOKEN"],keys["OAUTH_TOKEN_SECRET"])
stream.statuses.filter(track="#recipe")
## Requires Authentication as of Twitter API v1.1
while True:
try:
keys = keyList[randint(0,10)]
stream = MyStreamer(keys["APP_KEY"],keys["APP_SECRET"],keys["OAUTH_TOKEN"],keys["OAUTH_TOKEN_SECRET"])
stream.statuses.filter(track='#MissUSA')
except:
continue
| {
"repo_name": "dxmahata/TwitterSentimentAnalysis",
"path": "TwitterDataCollect/StreamingTweetCollection.py",
"copies": "1",
"size": "1271",
"license": "mit",
"hash": -5928706288796709000,
"line_mean": 22.537037037,
"line_max": 110,
"alpha_frac": 0.6624704957,
"autogenerated": false,
"ratio": 3.684057971014493,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9706825556534602,
"avg_score": 0.02794058203597824,
"num_lines": 54
} |
__author__ = 'Debanjan Mahata'
import time
from twython import Twython, TwythonError
from pymongo import MongoClient
import TwitterAuthentication as t_auth
def collect_tweets(path_name):
emotion_sentiment_mapping = {"joy":"positive","sadness":"negative","anger":"negative","fear":"negative","disgust":"negative"}
try:
#connecting to MongoDB database
mongoObj = MongoClient()
#setting the MongoDB database
db = mongoObj["TwitterSentimentAnalysis"]
#setting the collection in the database for storing the Tweets
collection = db["emotion_labeled_tweets"]
except:
print "Could not connect to the MongoDb Database, recheck the connection and the database"
try:
fp = open(path_name)
except IOError:
print "Please provide the right path to the file named labeledTweetSentimentCorpus.csv"
request_count = 0
key_count = 0
auth_key = t_auth.keyList[key_count%11]
for entry in fp:
tweet_id = entry.rstrip().split(":")[0]
try:
tweet_sentiment = emotion_sentiment_mapping[entry.rstrip().split("::")[1].strip()]
except:
tweet_sentiment = ""
twitter = Twython(auth_key["APP_KEY"],auth_key["APP_SECRET"],auth_key["OAUTH_TOKEN"],auth_key["OAUTH_TOKEN_SECRET"])
if request_count == 1499:
request_count = 0
key_count += 1
auth_key = t_auth.keyList[key_count%11]
time.sleep(60)
try:
twitter_status = twitter.show_status(id = tweet_id)
twitter_status["sentiment_label"] = tweet_sentiment
language = twitter_status["lang"]
if language == "en" and tweet_sentiment:
collection.insert(twitter_status)
else:
pass
except TwythonError:
pass
request_count += 1
if __name__ == "__main__":
#call method for collecting and storing the tweets in a MongoDb collection
collect_tweets("../CorpusAndLexicons/labeledTweetEmotionCorpus.txt")
| {
"repo_name": "dxmahata/TwitterSentimentAnalysis",
"path": "TwitterDataCollect/collectEmotionLabeledTweets.py",
"copies": "1",
"size": "2078",
"license": "mit",
"hash": 9055417963577953000,
"line_mean": 27.8611111111,
"line_max": 129,
"alpha_frac": 0.6198267565,
"autogenerated": false,
"ratio": 3.898686679174484,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5018513435674484,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Debanjan Mahata'
import time
from twython import Twython, TwythonError
from pymongo import MongoClient
import TwitterAuthentication as t_auth
def collect_tweets(path_name):
try:
#connecting to MongoDB database
mongoObj = MongoClient()
#setting the MongoDB database
db = mongoObj["TwitterSentimentAnalysis"]
#setting the collection in the database for storing the Tweets
collection = db["sentiment_labeled_tweets"]
except:
print "Could not connect to the MongoDb Database, recheck the connection and the database"
try:
fp = open(path_name)
except IOError:
print "Please provide the right path to the file named labeledTweetSentimentCorpus.csv"
request_count = 0
key_count = 0
auth_key = t_auth.keyList[key_count%11]
for entry in fp:
tweet_id = entry.rstrip().split(",")[2].replace('"',"")
tweet_sentiment = entry.rstrip().split(",")[1].replace('"',"")
twitter = Twython(auth_key["APP_KEY"],auth_key["APP_SECRET"],auth_key["OAUTH_TOKEN"],auth_key["OAUTH_TOKEN_SECRET"])
if request_count == 1499:
request_count = 0
key_count += 1
auth_key = t_auth.keyList[key_count%11]
time.sleep(60)
request_count += 1
try:
twitter_status = twitter.show_status(id = tweet_id)
twitter_status["sentiment_label"] = tweet_sentiment
collection.insert(twitter_status)
except TwythonError:
pass
if __name__ == "__main__":
#call method for collecting and storing the tweets in a MongoDb collection
collect_tweets("../CorpusAndLexicons/labeledTweetSentimentCorpus.csv")
| {
"repo_name": "dxmahata/TwitterSentimentAnalysis",
"path": "TwitterDataCollect/collectSentimentLabeledTweets.py",
"copies": "1",
"size": "1741",
"license": "mit",
"hash": 6342942335825776000,
"line_mean": 27.5409836066,
"line_max": 124,
"alpha_frac": 0.6341183228,
"autogenerated": false,
"ratio": 3.868888888888889,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5003007211688889,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Deedasmi'
import re
# User defined variables. And pi.
udv = {'pi': '3.1415926535'}
# User defined functions. And test function
udf = {'test': (('x', 'y'), "x+2/y")}
# Set of operators
OPERATORS = {"+", "-", "*", "/", "^", "%"}
# Set of all allowed symbols
ALLOWED_SYMBOLS = {"+", "-", "*", "/", "^", "%", "(", ")", "."}
saved = None
def get_udf(equation):
"""
Get user defined function
:param equation: The string to handle
:return: String to be sent through equation_to_list
"""
temp = equation.split("(")
func = temp[0]
all_vars = temp[1][:-1]
func_vars = all_vars.split(",")
equation = udf[func][1]
if len(func_vars) != len(udf[func][0]):
raise SyntaxWarning("{} variables defined, but {} supplied."
.format(str(len(udf[func][0])), str(len(func_vars))))
for i in range(0, len(udf[func][0])):
equation = equation.replace(udf[func][0][i], func_vars[i])
return equation
def handle_user_defined_input(equation, function=False):
"""
Handles all user defined variables
:param equation: Equation to print
:return: On success returns a string to be printed
"""
equation = equation.replace(" ", "")
if equation.count("=") > 1:
raise SyntaxWarning("Unknown operation (too many '=')")
equation = equation.split("=")
key = equation[0]
value = equation[1]
# Working with User Define Function
# TODO replace with function variable
if "(" in key:
if key.count("(") != 1 or key.count(")") != 1:
raise SyntaxWarning("Non matching parenthesis in function declaration"
" or too many parenthesis")
func_vars = key[key.index("(") + 1:key.index(")")].split(",")
for var in func_vars:
if not var.isalpha():
raise UserWarning("All variables must be alpha")
key = key[0:key.index("(")]
if not key.isalpha():
raise UserWarning("Function name must be alpha")
# Delete UDVs with the same name
if key in udv:
del udv[key]
del udv["-" + key]
udf[key] = func_vars, value
return '{} added with function {}'.format(key, value)
else:
if key.isalpha():
try:
float(value)
except:
raise UserWarning("Value must be a valid number")
else:
raise UserWarning("Key must be alpha")
# Delete UDFs with the same name
if key in udf:
del udf[key]
udv[key] = value
udv["-" + key] = float(value) * -1 # allows negative UDV
return '{} added with value {}'.format(key, value)
def equation_to_list(equation):
"""
Splits the equation out into a list
:param equation: The equation to split
:return: Returns a validated list
"""
# Replace alternate characters
equation = equation.replace("\\", "/")
equation = equation.replace("[", "(")
equation = equation.replace("{", "(")
equation = equation.replace("]", ")")
equation = equation.replace("}", ")")
# split eq into numbers and special characters
equation_list = ['']
# iterate for each char in eq
for character in equation:
# if char is a digit, either append it onto the last item (a number)
# or append it onto the list. Do the same check with alpha
if character.isdigit() or character is ".":
if not equation_list[-1].isnumeric() and equation_list[-1] is not ".":
equation_list.append(character)
else:
equation_list[-1] += character
elif character.isalpha():
if not equation_list[-1].isalpha():
equation_list.append(character)
else:
equation_list[-1] += character
# if char is special character, just add to list
else:
if character not in ALLOWED_SYMBOLS:
raise UserWarning("Unknown symbol '{}'".format(character))
equation_list.append(character)
# remove empty strings, Nones (and any other False-y things)
# equation = [x for x in equation if x]
del equation_list[0]
return validate_and_format_list(equation_list)
def validate_and_format_list(equation):
"""
Validates the list as a valid mathematical function
Additionally adds things to the list in certain situations
:param equation:
:return: A list that should work.
"""
i = 0
neg = False
if equation.count("(") != equation.count(")"):
raise SyntaxWarning("Non-matching parenthesis. {} '(' found and {} ')' found"
.format(str(equation.count("(")), str(equation.count(")"))))
# Loop through list
# TODO save to new equation so we aren't modifying the duration of loop during loop. I know I'm bad
while i is not len(equation):
# Replace alpha strings with their value in UDV
if equation[i].isalpha():
if equation[i] in udv:
equation[i] = udv[equation[i]]
else:
raise UserWarning("{} not found in the variable database".format(equation[i]))
# Replace numeric values with the float of their value
if equation[i].isnumeric() or "." in equation[i]:
if isinstance(equation[i-1], float):
raise SyntaxWarning("Missing operator between values")
if equation[i].count(".") > 1:
raise SyntaxWarning("Unknown value. Too many '.'")
if equation[i] is ".":
raise SyntaxWarning("Unknown value. '.' without number.")
equation[i] = float(equation[i])
if neg:
equation[i] *= -1
del equation[i - 1] # Remove negative sign from list
i -= 1
neg = False
else:
# Symbol Features
# Handle equations starting with operators
if i == 0:
if equation[i] in OPERATORS:
if saved:
equation.insert(0, saved)
i += 1
else:
raise UserWarning("No previous value saved.")
i += 1
continue
# Turn 2 * -(2 + 2) into 2 * (0-(2+2))
if neg and equation[i] is "(":
right = find_matching_parenthesis(i, equation) + 2
equation.insert(i-1, "(")
equation.insert(i, 0)
equation.insert(right, ")")
i += 2
neg = False
# Handle implied multiplication
if equation[i] is "(" and isinstance(equation[i - 1], float):
equation.insert(i, "*")
i += 1
# Symbol bug handling
# Handle empty parenthesis
if equation[i] is ")" and equation[i - 1] is "(":
raise SyntaxWarning("Empty parenthesis")
# Handle operators being next to each other
if equation[i-1] in ALLOWED_SYMBOLS:
if equation[i] is "-" and equation[i-1] is not ")":
neg = True
else:
if equation[i] is not "(" and equation[i - 1] is not ")":
raise SyntaxWarning("Missing value between operators")
i += 1
if equation[-1] in OPERATORS:
raise SyntaxWarning("Equation may not end with an operator")
return equation
def do_math(equation):
"""
Recursively solve equation
:param equation: The equation to solve
:return: Float - the solved number
"""
while "(" in equation:
# Please
# Pops out from first ( to matching ) and recursively solves the sub equation
left_paren = equation.index("(")
right_paren = find_matching_parenthesis(left_paren, equation)
sub_equation = []
for i in range(left_paren, right_paren + 1): # Second value of range non-inclusive
sub_equation.append(equation.pop(left_paren))
del sub_equation[0] # removed left parenthesis from sub_equation
del sub_equation[len(sub_equation) - 1] # and removed the right
equation.insert(left_paren, do_math(sub_equation)) # recursively calls to handle nested parenthesis
while len(equation) > 1:
i = 0
# Excuse
if "^" in equation:
i = equation.index("^")
# My Dear
elif "*" in equation or "/" in equation or "%" in equation:
i = min(mdm_what_to_min(equation))
# Aunt Sally
elif "+" in equation and "-" in equation:
i = min(equation.index("+"), equation.index("-"))
elif "+" in equation:
i = equation.index("+")
elif "-" in equation:
i = equation.index("-")
# Math time
i -= 1 # makes popping simple
number1 = equation.pop(i)
operator = equation.pop(i)
number2 = equation.pop(i)
equation.insert(i, do_simple_math(number1, number2, operator))
global saved
saved = equation[0]
return saved
def mdm_what_to_min(equation):
"""
Creates a list of indexes of * / %
:param equation: The equation to work with
:return: List to min()
"""
to_min = []
if "*" in equation:
to_min.append(equation.index("*"))
if "/" in equation:
to_min.append(equation.index("/"))
if "%" in equation:
to_min.append(equation.index("%"))
return to_min
def find_matching_parenthesis(left, equation):
"""
Ghetto function to find ) that matches (
When p = 0 after finding a ), it should be the matching paren
:param left: The parenthesis to match
:param equation: The equation to match it in
:return: int. Index of right paren
"""
nested_parenthesis = 0
for i in range(left, len(equation)): # skip leftmost parenthesis
if equation[i] == "(":
nested_parenthesis += 1
elif equation[i] == ")":
nested_parenthesis -= 1
if nested_parenthesis == 0:
return i
raise SyntaxWarning("No matching parenthesis found") # should never happen because handling in equation_to_list
def do_simple_math(number1, number2, operator):
"""
Does simple math between two numbers and an operator
:param number1: The first number
:param number2: The second number
:param operator: The operator (string)
:return: Float
"""
ans = 0
if operator is "*":
ans = number1 * number2
elif operator is "/":
ans = number1 / number2
elif operator is "+":
ans = number1 + number2
elif operator is "-":
ans = number1 - number2
elif operator is "^":
ans = number1 ** number2
elif operator is "%":
ans = number1 % number2
return ans
def calculate(*args):
"""
Wrapper function
Takes a single string and runs through equation_to_list and doMath
or
Takes 3 arguments for doSimpleMath
Relies on other functions to do error handling
:param args:
:return: Answer
"""
if isinstance(args[0], str) and len(args) == 1:
# remove white space
equation = args[0].replace(" ", "")
# Check if function
if equation[0].isalpha(): # Check this first because re.match is slow
# TODO support inline function calls (i.e: 2 + test(2, 5) / 5)
if re.match(r"(^[a-zA-z]*)\(([\d\,]*)\)$", equation):
equation = get_udf(equation)
equation = equation_to_list(equation)
return do_math(equation)
if len(args) == 3:
return do_simple_math(args[0], args[1], args[2])
raise TypeError("Function handles single strings, or group of 3 arguments (n1, n2, o)")
if __name__ == "__main__": # for speed testing
for r in range(1000):
calculate("4 * 10 / 15 + ( 6 - 2 * 3 )")
calculate("2 +5/1 + (2* 5)/2")
calculate("7^2+16 / (4^4)")
calculate("(1+2)+(2+3)")
calculate("1+ ( ( ( 3 + 2) * 2) - 4)")
calculate("pi*2")
calculate("(2*.5)^3")
handle_user_defined_input("Matt = 20")
calculate("Matt * pi")
calculate(5, 2, "-")
handle_user_defined_input("func(x, y) = x * y / 2")
calculate("func(2,4)")
calculate("test ( 4, 2)")
| {
"repo_name": "Deedasmi/PyCalc",
"path": "pycalc/calc.py",
"copies": "1",
"size": "12456",
"license": "mit",
"hash": 3044726596956056600,
"line_mean": 35.4210526316,
"line_max": 116,
"alpha_frac": 0.5539499037,
"autogenerated": false,
"ratio": 4.09064039408867,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5144590297788669,
"avg_score": null,
"num_lines": null
} |
__author__ = 'deevarvar'
import random
import unittest
import sys
import os
from subprocess import call
print os.getcwd()
current_path = os.path.dirname(os.path.realpath(__file__))
lib_path = current_path + "/../../utlib"
make_path = current_path + "/../../"
print 'lib path is ' + lib_path + ', make path is ' + make_path
sys.path.append(lib_path)
import ut_util
from subprocess import call
#TODO:
#1. add function to process random string
#2. compare two files' content
class TestCopyFile(unittest.TestCase):
def setUp(self):
print 'setup steps:'
print 'change dir'
call(['cd', make_path])
print 'compile binary to make file '
call(['make'])
#TODO: test file name limits http://serverfault.com/questions/9546/filename-length-limits-on-linux
#test file content
#empty file
def test_empty_file(self):
print 'generate src and dst files:'
src_file_name = "src_" + ut_util.string_generator(8)
dst_file_name = "dst_" + ut_util.string_generator(8)
ut_util.touchFile(src_file_name)
ut_util.touchFile(dst_file_name)
print 'src file is '+ src_file_name + ", dst_file_name is " + dst_file_name
src_content = ut_util.string_generator(1025)
with open(src_file_name, 'w') as src:
src.write(src_content)
call(['cd', make_path + '/ut/'])
call(['./copy', src_file_name, dst_file_name])
print 'do the compare of file content'
with open(src_file_name, 'r') as src:
with open(dst_file_name, 'r') as dst:
self.assertEqual(src.read(), dst.read())
#100 chars
#1024 chars
#more than 1024 chars, 2000 chars
if __name__ == '__main__':
unittest.main() | {
"repo_name": "deevarvar/myLab",
"path": "book/tlpi_zhiye/ch3/ut/ch3_ut.py",
"copies": "1",
"size": "1747",
"license": "mit",
"hash": 303608291389019300,
"line_mean": 25.8923076923,
"line_max": 102,
"alpha_frac": 0.6182026331,
"autogenerated": false,
"ratio": 3.425490196078431,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4543692829178431,
"avg_score": null,
"num_lines": null
} |
__author__ = 'deevarvar'
"""
some idea from baidu's interview
1. this file is used to generate the some file template
three column
column1 column2 column3
chars chars(or empty) digits
try to use shell or python to finish
"""
import random
import string
def gen_seperator():
var_len = random.randint(1, 10)
space = ' '
for index in range(var_len):
space += ' '
return space
def gen_column_one():
var_len = random.randint(1, 10)
return ''.join(random.choice(string.ascii_letters) for _ in range(var_len))
def gen_column_two():
choice = random.randint(0,100)
if choice >= 50:
return ' '
else:
var_len = random.randint(1, 10)
return ''.join(random.choice(string.ascii_letters) for _ in range(var_len))
def gen_column_three():
var_len = random.randint(1, 10)
return ''.join(random.choice(string.digits) for _ in range(var_len))
with open('./baidu_interview/log_temp.txt', 'w') as log:
for i in xrange(0, 10):
log.write(gen_column_one()+gen_seperator()+gen_column_two()+gen_seperator()+gen_column_three()+'\n') | {
"repo_name": "deevarvar/myLab",
"path": "interview/gen_file.py",
"copies": "1",
"size": "1140",
"license": "mit",
"hash": 8005731846400504000,
"line_mean": 23.2765957447,
"line_max": 108,
"alpha_frac": 0.6271929825,
"autogenerated": false,
"ratio": 3.229461756373938,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9345312849872475,
"avg_score": 0.002268377800292694,
"num_lines": 47
} |
__author__ = 'degorenko'
import rst2pdf
import docutils
def generate_rst(json_data):
f = open("report.rst", "w")
from email.Utils import formatdate
cur_time = formatdate(timeval=None, localtime=True)
write_headers(f, '{0} {1}{2}'.format("Dependency checker ", cur_time, "\n"), True)
write_parameters(f, ':{0}: {1}\n'.format("Upstream URL", json_data["upstream_url"]))
write_parameters(f, ':{0}: {1}\n'.format("Upstream branch", json_data["upstream_branch"]))
write_parameters(f, ':{0}: {1}\n'.format("MOS gerrit URL", json_data["gerrit_url"]))
write_parameters(f, ':{0}: {1}\n'.format("MOS gerrit branch", json_data["gerrit_branch"]))
projects = json_data["projects"]
for project in projects:
project_name = project.keys()[0]
mos_deps = project[project_name]['mos_deps']
upstream_deps = project[project_name]['upstream_deps']
project_reqs = {}
for key in mos_deps.keys():
project_reqs[key] = [mos_deps[key], upstream_deps[key]]
write_table(f, project_name, project_reqs)
f.close()
#from subprocess import call
#if json_data["output_format"] == "pdf":
# call(["rst2pdf", "report.rst", "-o", "report.pdf"])
#else:
# call(["rst2html", "report.rst", "report.html"])
def write_headers(f, header, main=False):
header_len = len(header) - 1
separator = '-'
if main:
separator = '='
head_sep = get_sequence(separator, header_len)
f.write(header)
f.write('{0}\n'.format(head_sep))
def write_parameters(f, parameter):
f.write(parameter)
def get_sequence(separator, count):
return ''.join([separator for i in xrange(count)])
def get_word_length(dictionary):
# Default length of table columns -> 'Package name', 'MOS', 'Upstream'
length = [12, 3, 8]
for key in dictionary.keys():
if len(key) > length[0]:
length[0] = len(key)
if len(dictionary[key][0]) > length[1]:
length[1] = len(dictionary[key][0])
if len(dictionary[key][1]) > length[2]:
length[2] = len(dictionary[key][1])
return length
def write_table(f, project, requirements):
word_length = get_word_length(requirements)
def align(word, num):
n = word_length[num] - len(word)
if n > 0:
m = n / 2
return '{0}{1}{2}'.format(get_sequence(' ', m),
word,
get_sequence(' ', n - m))
else:
return word
write_headers(f, '\n{0}\n'.format(project))
write_parameters(f, '+{0}+{1}+{2}+\n'.format(get_sequence('-', word_length[0]),
get_sequence('-', word_length[1]),
get_sequence('-', word_length[2])))
write_parameters(f, '|{0}|{1}|{2}|\n'.format(align("Package name", 0),
align("MOS", 1),
align("Upstream", 2)))
write_parameters(f, '+{0}+{1}+{2}+\n'.format(get_sequence('=', word_length[0]),
get_sequence('=', word_length[1]),
get_sequence('=', word_length[2])))
for key in requirements.keys():
write_parameters(f, '|{0}|{1}|{2}|\n'.format(align(key, 0),
align(requirements[key][0], 1),
align(requirements[key][1], 2)))
write_parameters(f, '+{0}+{1}+{2}+\n'.format(get_sequence('-', word_length[0]),
get_sequence('-', word_length[1]),
get_sequence('-', word_length[2]))) | {
"repo_name": "degorenko/dep_checker",
"path": "dep_checker/reporter/report.py",
"copies": "1",
"size": "3857",
"license": "apache-2.0",
"hash": -8420278302341125000,
"line_mean": 37.58,
"line_max": 94,
"alpha_frac": 0.5014259787,
"autogenerated": false,
"ratio": 3.708653846153846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4710079824853846,
"avg_score": null,
"num_lines": null
} |
__author__ = 'dejawa'
from docker import Client
import dockermanager.configure as conf
import psutil
import json
class MinionController:
conn = Client(base_url=conf.host+':'+conf.port)
#docker mongo db image create/start/stop/replset/sharding
def getAllContainers(self):
return self.conn.containers(all=True)
def getUserContainers(self):
#return self.conn.containers(all="true",filters=[{"label":"${userid}"}])
return self.conn.containers(all="true")
def isContainerRunning(self , Id):
status = self.conn.inspect_container(Id)['State']
if status['Running'] :
return True
return False
# if status['']
def createLesser(self, info):
obj = conf.configObj
mongo={}
lesser={}
if info['lesserId'] :
obj['labels']['lesserId'] = info['lesserId']
if info['command'] :
obj['command'] = info['command']
if info['env'] :
obj['env'] = info['env']
if info['name'] :
obj['name'] = info['name']
if info['entrypoint'] :
obj['entrypoint'] = info['entrypoint']
if info['working_dir'] :
obj['working_dir'] = info['working_dir']
# if info['mongo'] :
# mongo = {info['mongo']:('0.0.0.0',conf.mongoPort)}
# if info['lesser'] :
# lesser = {info['lesser']:('0.0.0.0',conf.lesserPort)}
container = self.conn.create_container(
image=obj['image'],
command=obj['command'],
ports=[conf.mongoPort, conf.clientPort],
environment=obj['env'],
volumes=obj['volumes'],
volumes_from=obj['volumes_from'],
name=obj['name'],
entrypoint=obj['entrypoint'],
working_dir=obj['working_dir'],
labels=obj['labels']
)
return container['Id']
def startLesser(self, Id):
self.conn.start ( Id ,publish_all_ports=True)
portInfo = self.conn.inspect_container(Id)['NetworkSettings']['Ports']
ports = portInfo.keys()
info = conf.Info()
for k,v in portInfo.items():
if '/' in k:
k = k.split('/')[0]
if k == conf.mongoPort:
info.mongoPort = v[0]['HostPort']
if k == conf.clientPort:
info.clientPort = v[0]['HostPort']
info.Id = Id
# print ("K is "+ k + " V is " + v[0]['HostPort'] )
return info
#user lesser minion create
# image, command=None, hostname=None, user=None,
# detach=False, stdin_open=False, tty=False,
# mem_limit=None, ports=None, environment=None,
# dns=None, volumes=None, volumes_from=None,
# network_disabled=False, name=None, entrypoint=None,
# cpu_shares=None, working_dir=None, domainname=None,
# memswap_limit=None, cpuset=None, host_config=None,
# mac_address=None, labels=None, volume_driver=None
def upLesser(self, info):
containerId = self.createLesser(info)
return self.startLesser(containerId)
def stopLesser(self, Id):
return self.conn.stop( Id )
#user lesser minion delete
def delLesser(self, Id):
return self.conn.remove_container( Id , v=True )
def downLesser(self, Id):
self.stopLesser(Id)
return self.delLesser(Id)
def statusLesser(self, containerid):
ret = self.conn.stats(containerid)
return ret
def allClearLesser(self):
for i in (self.conn.containers(all=True)) :
print ('Delete Container')
print ( i['Names'],i['Id'] )
#for stat in lesser.statusLesser(i['Id']):
# print (stat)
self.downLesser(i['Id'])
def cliConfigure(self, obj):
return
def diskInfo(self):
disk={"total":'', "used":'', "threshhold":False}
disk['total'] = psutil.disk_usage(conf.path).total
disk['used'] = psutil.disk_usage(conf.path).used
if (disk['total'] - disk['used']) <= conf.diskLimit:
disk["threshhold"]=True
return disk
def memInfo(self):
mem={"total":'', "used":'', "threshhold":False}
mem['total'] = psutil.virtual_memory().total
mem['used'] = psutil.virtual_memory().used
if (mem['total'] - mem['used']) <= conf.memLimit:
mem["threshhold"]=True
return mem
| {
"repo_name": "hoonkim/Lesser",
"path": "dockermanager/controller.py",
"copies": "1",
"size": "4602",
"license": "mit",
"hash": 3732284722979614700,
"line_mean": 29.4768211921,
"line_max": 80,
"alpha_frac": 0.5395480226,
"autogenerated": false,
"ratio": 3.8222591362126246,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48618071588126244,
"avg_score": null,
"num_lines": null
} |
__author__ = 'deksan'
import logging
import urllib
import feedparser
from flexget import plugin, validator
from flexget.entry import Entry
from flexget.event import event
from flexget.plugins.api_tvrage import lookup_series
log = logging.getLogger('newznab')
class Newznab(object):
"""
Newznab search plugin
Provide a url or your website + apikey and a category
Config example::
newznab:
url: "http://website/api?apikey=xxxxxxxxxxxxxxxxxxxxxxxxxx&t=movie&extended=1"
website: https://website
apikey: xxxxxxxxxxxxxxxxxxxxxxxxxx
category: movie
Category is any of: movie, tvsearch, music, book
"""
schema = {
'type': 'object',
'properties': {
'category': {'type': 'string', 'enum': ['movie', 'tvsearch', 'tv', 'music', 'book']},
'url': {'type': 'string', 'format': 'url'},
'website': {'type': 'string', 'format': 'url'},
'apikey': {'type': 'string'}
},
'required': ['category'],
'additionalProperties': False
}
def build_config(self, config):
if config['category'] == 'tv':
config['category'] = 'tvsearch'
log.debug(config['category'])
if 'url' not in config:
if 'apikey' in config and 'website' in config:
params = {
't': config['category'],
'apikey': config['apikey'],
'extended': 1
}
config['url'] = config['website']+'/api?'+urllib.urlencode(params)
return config
def fill_entries_for_url(self, url, config):
entries = []
rss = feedparser.parse(url)
status = rss.get('status', False)
if status != 200 and status != 301: # in cae of redirection...
log.error('Search result not 200 (OK), received %s' % status)
raise
if not len(rss.entries):
log.info('No results returned')
for rss_entry in rss.entries:
new_entry = Entry()
for key in rss_entry.keys():
new_entry[key] = rss_entry[key]
new_entry['url'] = new_entry['link']
entries.append(new_entry)
return entries
def search(self, task, entry, config=None):
config = self.build_config(config)
if config['category'] == 'movie':
return self.do_search_movie(entry, config)
elif config['category'] == 'tvsearch':
return self.do_search_tvsearch(entry, config)
else:
entries = []
log.warning("Not done yet...")
return entries
def do_search_tvsearch(self, arg_entry, config=None):
log.info('Searching for %s' % (arg_entry['title']))
# normally this should be used with emit_series who has provided season and episodenumber
if 'series_name' not in arg_entry or 'series_season' not in arg_entry or 'series_episode' not in arg_entry:
return []
serie_info = lookup_series(arg_entry['series_name'])
if not serie_info:
return []
url = (config['url'] + '&rid=%s&season=%s&ep=%s' %
(serie_info.showid, arg_entry['series_season'], arg_entry['series_episode']))
return self.fill_entries_for_url(url, config)
def do_search_movie(self, arg_entry, config=None):
entries = []
log.info('Searching for %s (imdbid:%s)' % (arg_entry['title'], arg_entry['imdb_id']))
# normally this should be used with emit_movie_queue who has imdbid (i guess)
if 'imdb_id' not in arg_entry:
return entries
imdb_id = arg_entry['imdb_id'].replace('tt', '')
url = config['url'] + '&imdbid=' + imdb_id
return self.fill_entries_for_url(url, config)
@event('plugin.register')
def register_plugin():
plugin.register(Newznab, 'newznab', api_ver=2, groups=['search'])
| {
"repo_name": "v17al/Flexget",
"path": "flexget/plugins/search_newznab.py",
"copies": "1",
"size": "3959",
"license": "mit",
"hash": 6076613967725724000,
"line_mean": 33.4260869565,
"line_max": 115,
"alpha_frac": 0.5657994443,
"autogenerated": false,
"ratio": 3.9043392504930967,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.996642861245532,
"avg_score": 0.0007420164675553936,
"num_lines": 115
} |
__author__ = 'delandtj'
from JumpScale import j
import os
import os.path
import subprocess
import sys
import time
command_name = sys.argv[0]
vsctl = "/usr/bin/ovs-vsctl"
ofctl = "/usr/bin/ovs-ofctl"
ip = "/sbin/ip"
ethtool = "/sbin/ethtool"
PHYSMTU = 2000
# TODO : errorhandling
def send_to_syslog(msg):
pass
# print msg
# pid = os.getpid()
# print ("%s[%d] - %s" % (command_name, pid, msg))
# syslog.syslog("%s[%d] - %s" % (command_name, pid, msg))
def doexec(args):
"""Execute a subprocess, then return its return code, stdout and stderr"""
send_to_syslog(args)
proc = subprocess.Popen(args, stdin=None, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True, bufsize=-1)
rc = proc.wait()
# rc = proc.communicate()
stdout = proc.stdout
stderr = proc.stderr
return rc, stdout, stderr
def dobigexec(args):
"""Execute a subprocess, then return its return code, stdout and stderr"""
send_to_syslog(args)
proc = subprocess.Popen(args, stdin=None, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True, bufsize=-1)
rc = proc.communicate()
return rc
def get_all_namespaces():
cmd = '%s netns ls' % ip
r, s, e = doexec(cmd.split())
return [line.strip() for line in s.readlines()]
def get_all_ifaces():
"""
List of network interfaces
@rtype : dict
"""
netpath = '/sys/class/net'
ifaces = {}
for i in os.listdir(netpath):
addresspath = os.path.join(netpath, i, "address")
if os.path.exists(addresspath):
with open(addresspath) as f:
addr = f.readline().strip()
ifaces[i] = addr
return ifaces
def get_all_bridges():
cmd = '%s list-br' % vsctl
r, s, e = doexec(cmd.split())
l = [line.strip() for line in s.readlines()]
return l
def ip_link_set(device, args):
cmd = "ip l set " + device + " " + args
doexec(cmd.split())
def limit_interface_rate(limit, interface, burst):
cmd = "%s set interface %s ingress_policing_rate=%s"
r, s, e = doexec(cmd.split())
if r:
raise j.exception.RuntimeError(
"Problem with setting rate on interface: %s , problem was : %s " % (interface, e))
cmd = "%s set interface %s ingress_policing_burst=%s"
r, s, e = doexec(cmd.split())
if r:
raise j.exception.RuntimeError(
"Problem with setting burst on interface: %s , problem was : %s " % (interface, e))
def createBridge(name):
cmd = '%s --may-exist add-br %s' % (vsctl, name)
r, s, e = doexec(cmd.split())
if r:
raise j.exceptions.RuntimeError("Problem with creation of bridge %s, err was: %s" % (name, e))
if name == "public":
cmd = '%s set Bridge %s stp_enable=true' % (vsctl, name)
r, s, e = doexec(cmd.split())
if r:
raise j.exceptions.RuntimeError("Problem setting STP on bridge %s, err was: %s" % (name, e))
def destroyBridge(name):
cmd = '%s --if-exists del-br %s' % (vsctl, name)
r, s, e = doexec(cmd.split())
if r:
raise j.exceptions.RuntimeError("Problem with destruction of bridge %s, err was: %s" % (name, e))
def listBridgePorts(name):
cmd = '%s list-ports %s' % (vsctl, name)
r, s, e = doexec(cmd.split())
if r:
raise j.exception.RuntimeError("Problem with listing of bridge %s's ports , err was: %s " % (name, e))
return s.read()
def VlanPatch(parentbridge, vlanbridge, vlanid):
parentpatchport = '%s-%s' % (vlanbridge, str(vlanid))
bridgepatchport = '%s-%s' % (parentbridge, str(vlanid))
cmd = '%s add-port %s %s tag=%s -- set Interface %s type=patch options:peer=%s' % (
vsctl, parentbridge, parentpatchport, vlanid, parentpatchport, bridgepatchport)
r, s, e = doexec(cmd.split())
if r:
raise j.exceptions.RuntimeError("Add extra vlan pair failed %s" % (e.readlines()))
cmd = '%s add-port %s %s -- set Interface %s type=patch options:peer=%s' % (
vsctl, vlanbridge, bridgepatchport, bridgepatchport, parentpatchport)
r, s, e = doexec(cmd.split())
if r:
raise j.exceptions.RuntimeError("Add extra vlan pair failed %s" % (e.readlines()))
def addVlanPatch(parbr, vlbr, id, mtu=None):
def bridge_exists(br):
brexist = "{0} br-exists {1}".format(vsctl, br)
r, s, e = doexec(brexist.split())
return r == 0
def port_exists(br, port):
listprts = "{0} list-ports {1}".format(vsctl, br)
r, s, e = doexec(listprts.split())
return port in s.read()
parport = "{}-{!s}".format(vlbr, id)
brport = "{}-{!s}".format(parbr, id)
if not bridge_exists(vlbr):
brcreate = "{0} add-br {1}".format(vsctl, vlbr)
r, s, e = doexec(brcreate.split())
if not port_exists(vlbr, brport):
addport = "{0} add-port {1} {3} -- set Interface {3} type=patch options:peer={2}".format(
vsctl, vlbr, parport, brport)
r, s, e = doexec(addport.split())
if not port_exists(parbr, parport):
c = "{4} add-port {0} {2} tag={3!s} -- set Interface {2} type=patch options:peer={1}".format(
parbr, brport, parport, id, vsctl)
r, s, e = doexec(c.split())
if mtu:
ip_link_set(vlbr, 'mtu {0}'.format(mtu))
def createNameSpace(name):
if name not in get_all_namespaces():
cmd = '%s netns add %s' % (ip, name)
r, s, e = doexec(cmd.split())
else:
send_to_syslog('Namespace %s already exists, not creating' % name)
def destroyNameSpace(name):
if name in get_all_namespaces():
cmd = '%s netns delete %s' % (ip, name)
r, s, e = doexec(cmd.split())
else:
send_to_syslog('Namespace %s doesn\'t exist, nothing done ' % name)
def createVethPair(left, right):
cmd = '%s link add %s type veth peer name %s' % (ip, left, right)
allifaces = get_all_ifaces()
if left in allifaces or right in allifaces:
# one of them already exists
send_to_syslog("Problem with creation of vet pair %s, %s :one of them exists" % (left, right))
r, s, e = doexec(cmd.split())
# wait for it to come up
time.sleep(.2)
ip_link_set(left, 'up')
ip_link_set(right, 'up') # when sent into namespace, it'll be down again
disable_ipv6(left) # not right, as it can be used in a namespace
def destroyVethPair(left):
cmd = '%s link del %s ' % (ip, left)
r, s, e = doexec(cmd.split())
if r:
raise j.exceptions.RuntimeError("Problem with destruction of Veth pair %s, err was: %s" % (left, e))
def createVXlan(vxname, vxid, multicast, vxbackend):
"""
Always brought up too
Created with no protocol, and upped (no ipv4, no ipv6)
Fixed standard : 239.0.x.x, id
# 0000-fe99 for customer vxlans, ff00-ffff for environments
MTU of VXLAN = 1500
"""
cmd = 'ip link add %s type vxlan id %s group %s ttl 60 dev %s' % (vxname, vxid, multicast, vxbackend)
r, s, e = doexec(cmd.split())
disable_ipv6(vxname)
setMTU(vxname, 1500)
ip_link_set(vxname, 'up')
if r:
send_to_syslog("Problem with creation of vxlan %s, err was: %s" % (vxname, e.readlines()))
def destroyVXlan(name):
cmd = '%s link del %s ' % (ip, name)
r, s, e = doexec(cmd.split())
if r:
send_to_syslog("Problem with destruction of Veth pair %s, err was: %s" % (name, e.readlines()))
exit(1)
def addIPv4(interface, ipobj, namespace=None):
netmask = ipobj.prefixlen
ipv4addr = ipobj.ip
# if ip existst on interface, we assume all ok
if namespace is not None:
cmd = '%s netns exec %s ip addr add %s/%s dev %s' % (ip, namespace, ipv4addr, netmask, interface)
else:
cmd = '%s addr add %s/%s dev %s' % (ip, ipv4addr, netmask, interface)
r, s, e = doexec(cmd.split())
if r:
send_to_syslog('Could not add IP %s to interface %s ' % (ipv4addr, interface))
return r, e
def addIPv6(interface, ipobj, namespace=None):
netmask = ipobj.prefixlen
ipv6addr = ipobj.ip
# if ip existst on interface, we assume all ok
if namespace is not None and namespace in allnamespaces:
cmd = '%s netns exec %s ip addr add %s/%s dev %s' % (ip, namespace, ipv6addr, netmask, interface)
else:
cmd = '%s addr add %s/%s dev %s' % (ip, ipv6addr, netmask, interface)
r, s, e = doexec(cmd.split())
if r:
send_to_syslog('Could not add IP %s to interface %s ' % (ipv6addr, interface))
return r, e
def connectIfToBridge(bridge, interfaces):
for interface in interfaces:
cmd = '%s --if-exists del-port %s %s' % (vsctl, bridge, interface)
r, s, e = doexec(cmd.split())
cmd = '%s --may-exist add-port %s %s' % (vsctl, bridge, interface)
r, s, e = doexec(cmd.split())
if r:
raise j.exceptions.RuntimeError('Error adding port %s to bridge %s' % (interface, bridge))
def removeIfFromBridge(bridge, interfaces):
for interface in interfaces:
cmd = '%s --if-exists del-port %s %s' % (vsctl, bridge, interface)
r, s, e = doexec(cmd.split())
if r:
raise j.exceptions.RuntimeError('Error adding port %s to bridge %s' % (interface, bridge))
def connectIfToNameSpace(nsname, interface):
cmd = '%s link set %s netns %s' % (ip, interface, nsname)
r, s, e = doexec(cmd.split())
if r:
raise j.exceptions.RuntimeError("Error moving %s to namespace %s" % (interface, nsname))
def disable_ipv6(interface):
if interface in get_all_ifaces():
cmd = 'sysctl -w net.ipv6.conf.%s.disable_ipv6=1' % interface
r, s, e = doexec(cmd.split())
def setMTU(interface, mtu):
cmd = 'ip link set %s mtu %s' % (interface, mtu)
r, s, e = doexec(cmd.split())
if r:
raise j.exceptions.RuntimeError('Could not set %s to MTU %s' % (interface, mtu))
def addBond(bridge, bondname, iflist, lacp="active", lacp_time="fast", mode="balance-tcp", trunks=None):
# bond_mode=balance-tcp lacp=active bond_fake_iface=false
# other_config:lacp-time=fast bond_updelay=2000 bond_downdelay=400
"""
Add a bond to a bridge
:param bridge: BridgeName (string)
:param bondname: Bondname (string)
:param iflist: list or tuple
:param lacp: "active" or "passive"
:param lacp_time: mode "fast" or "slow"
:param mode: balance-tcp, balance-slb, active-passive
:param trunks: allowed VLANS (list or tuple)
"""
intf = re.split('\W+', iflist)
if isinstance(trunks, str):
tr = re.split('\W+', trunks)
buildup = "add-bond %s %s " % (bridge, bondname) + " ".join(e for e in list(set(intf))) + " lacp=%s " % lacp
buildup = buildup + " -- set Port %s bond_mode=%s bond_fake_iface=false " % (bondname, mode)
buildup = buildup + "other_config:lacp-time=%s bond_updelay=2000 bond_downdelay=400 " % lacp_time
if trunks is not None:
trlist = ",".join(str(e) for e in list(set(tr)))
buildup = buildup + "trunks=" + trlist
# no use to autoconf ipv6, as this won't work anyway
for i in iflist:
disable_ipv6(i)
r, s, e = doexec(buildup.split())
if e:
raise j.exceptions.RuntimeError("Could not create bond %s for bridge %s" % (bondname, bridge))
| {
"repo_name": "Jumpscale/jumpscale_core8",
"path": "lib/JumpScale/sal/openvswitch/VXNet/utils.py",
"copies": "1",
"size": "11295",
"license": "apache-2.0",
"hash": -8441063652891443000,
"line_mean": 33.8611111111,
"line_max": 112,
"alpha_frac": 0.6052235502,
"autogenerated": false,
"ratio": 3.127076411960133,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9226270148149168,
"avg_score": 0.0012059628021929994,
"num_lines": 324
} |
__author__ = 'delandtj'
from vxlan import *
from netaddr import *
def rebuildVXLan():
nl = NetLayout()
layout = nl.load()
if __name__ == "__main__":
print('Getting Config')
a = NetLayout()
layout = a.load()
layout = a.nicdetail
pprint_dict(layout)
ip_layout = add_ips_to(layout)
# full_layout = add_bridgeconfig_to(ip_layout)
pprint_dict(ip_layout)
print 'vx1, ff31'
vx1 = VXNet(0xff33,backend='p2p1')
vx1.innamespace = True
vx1.ipv4 = IPNetwork('192.168.1.254/24')
vx1.apply()
# vxbackend implied (check for it)
if not 'vxbackend' in get_all_ifaces():
print 'Error: vxbackend doensn\'t exist'
backend = Bridge('vxbackend')
backend.create()
backend.connect('dummy0')
disable_ipv6('dummy0')
print '\n'
print 'vx2, ff32'
vx2 = VXNet('ff32')
vx2.innamespace = False
vx2.inbridge = True
vx2.router = True
vx2.ipv4 = IPNetwork('192.168.1.254/24')
vx2.apply()
print'\n'
print 'vxlan, 3456'
vxlan = VXNet(3456)
vxlan.ipv4 = IPNetwork('10.101.111.9/24')
vxlan.ipv6 = IPNetwork('2a02:578:f33:a01::1/64')
vxlan.backend = 'p2p1'
vxlan.innamespace = False
vxlan.inbridge = False
vxlan.apply()
vx1.destroy()
vx2.destroy()
vxlan.destroy()
| {
"repo_name": "Jumpscale/jumpscale6_core",
"path": "lib/JumpScale/lib/ovsnetconfig/VXNet/tests.py",
"copies": "1",
"size": "1336",
"license": "bsd-2-clause",
"hash": 9030069150539202000,
"line_mean": 22.4385964912,
"line_max": 57,
"alpha_frac": 0.5950598802,
"autogenerated": false,
"ratio": 2.9170305676855897,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8989403335099564,
"avg_score": 0.004537422557205236,
"num_lines": 57
} |
__author__ = 'delandtj'
from vxlan import *
from netaddr import *
def rebuildVXLan():
nl = NetLayout()
layout = nl.load()
if __name__ == "__main__":
print('Getting Config')
a = NetLayout()
layout = a.load()
layout = a.nicdetail
pprint_dict(layout)
ip_layout = add_ips_to(layout)
# full_layout = add_bridgeconfig_to(ip_layout)
pprint_dict(ip_layout)
print('vx1, ff31')
vx1 = VXNet(0xff33, backend='p2p1')
vx1.innamespace = True
vx1.ipv4 = IPNetwork('192.168.1.254/24')
vx1.apply()
# vxbackend implied (check for it)
if not 'vxbackend' in get_all_ifaces():
print('Error: vxbackend doensn\'t exist')
backend = Bridge('vxbackend')
backend.create()
backend.connect('dummy0')
disable_ipv6('dummy0')
print('\n')
print('vx2, ff32')
vx2 = VXNet('ff32')
vx2.innamespace = False
vx2.inbridge = True
vx2.router = True
vx2.ipv4 = IPNetwork('192.168.1.254/24')
vx2.apply()
print('\n')
print('vxlan, 3456')
vxlan = VXNet(3456)
vxlan.ipv4 = IPNetwork('10.101.111.9/24')
vxlan.ipv6 = IPNetwork('2a02:578:f33:a01::1/64')
vxlan.backend = 'p2p1'
vxlan.innamespace = False
vxlan.inbridge = False
vxlan.apply()
vx1.destroy()
vx2.destroy()
vxlan.destroy()
| {
"repo_name": "Jumpscale/jumpscale_core8",
"path": "lib/JumpScale/sal/openvswitch/VXNet/tests.py",
"copies": "1",
"size": "1331",
"license": "apache-2.0",
"hash": 4956388046200989000,
"line_mean": 22.350877193,
"line_max": 52,
"alpha_frac": 0.5972952667,
"autogenerated": false,
"ratio": 2.9317180616740086,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4029013328374008,
"avg_score": null,
"num_lines": null
} |
__author__ = 'delandtj'
from netaddr import *
from netclasses import *
from systemlist import *
command_name = sys.argv[0]
class NetLayout:
def __init__(self):
self.interfaces = get_all_ifaces()
self.nicdetail = {}
self.bridges = {}
def load(self):
self.nicdetail = get_nic_params()
def reload(self):
self.load()
def is_phys(self,interface):
if 'PHYS' in self.nicdetail[interface]['detail']:
return True
return False
def has_ip(self, interface, ipnetobj):
for i in self.nicdetail[interface]['ipaddr']:
if ipnetobj.ip == i.ip:
return True
return False
def exist_ip(self, ipobj):
for interface in self.nicdetail:
if self.nicdetail[interface]['ipaddr'].ip == ipobj.ip:
return True
return False
def exist_interface(self,interface):
if interface in self.interfaces:
return True
return False
class VXNet(object):
def __init__(self,netid,backend='vxbackend'):
self.netid = NetID(netid)
self.ipv6 = None
self.ipv4 = None
self.backend = backend
self.existing = NetLayout()
def apply(self):
"""
ethpairs : left always to bridge, right to namespace
"""
self.existing.load()
if self.innamespace == True:
# IP in Namespace
vxlan = VXlan(self.netid,self.backend)
if vxlan.name in self.existing.nicdetail:
send_to_syslog('VXLan %s exists, not creating' % vxlan.name)
else:
vxlan.create()
vxlan.no6()
bridge = VXBridge(self.netid)
self.bridge = bridge
if bridge.name in self.existing.nicdetail:
send_to_syslog('Bridge %s exists, not creating' % bridge.name)
else:
bridge.create()
bridge.no6()
bridge.connect(vxlan.name)
namespace = VXNameSpace(self.netid)
if namespace.name in self.existing.nicdetail:
send_to_syslog('NameSpace %s exists, not creating' % namespace.name)
else:
namespace.create()
veth = VethPair(self.netid)
veth.create()
bridge.connect(veth.left)
namespace.connect(veth.right)
addIPv4(veth.right, self.ipv4, namespace=namespace.name)
if not self.ipv6 == None : addIPv6(veth.right, self.ipv6, namespace=namespace.name)
elif self.inbridge == True:
# IP on bridge
vxlan = VXlan(self.netid, self.backend)
vxlan.create()
vxlan.no6()
bridge = VXBridge(self.netid)
self.bridge = bridge
bridge.create()
bridge.connect(vxlan.name)
if self.ipv4 is not None: addIPv4(bridge.name, self.ipv4)
if self.ipv6 is not None: addIPv6(bridge.name, self.ipv6)
else:
# no bridge, no namespace, just IP
vxlan = VXlan(self.netid, self.backend)
vxlan.create()
addIPv4(vxlan.name, self.ipv4)
addIPv6(vxlan.name, self.ipv6)
def rebuild(self,netid):
# destroy all connected with id
pass
def destroy(self,netid):
# destroy all connected with id
pass
| {
"repo_name": "Jumpscale/jumpscale6_core",
"path": "lib/JumpScale/lib/ovsnetconfig/VXNet/vxlan.py",
"copies": "1",
"size": "3426",
"license": "bsd-2-clause",
"hash": 2163112171545138200,
"line_mean": 28.7913043478,
"line_max": 95,
"alpha_frac": 0.5528312901,
"autogenerated": false,
"ratio": 3.9020501138952164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9892858933667383,
"avg_score": 0.012404494065566684,
"num_lines": 115
} |
__author__ = 'delandtj'
from netaddr import *
from VXNet.netclasses import *
from VXNet.systemlist import *
command_name = sys.argv[0]
class NetLayout:
def __init__(self):
self.interfaces = get_all_ifaces()
self.nicdetail = {}
self.bridges = {}
def load(self):
self.nicdetail = get_nic_params()
def reload(self):
self.load()
def is_phys(self, interface):
if 'PHYS' in self.nicdetail[interface]['detail']:
return True
return False
def has_ip(self, interface, ipnetobj):
for i in self.nicdetail[interface]['ipaddr']:
if ipnetobj.ip == i.ip:
return True
return False
def exist_ip(self, ipobj):
for interface in self.nicdetail:
if self.nicdetail[interface]['ipaddr'].ip == ipobj.ip:
return True
return False
def exist_interface(self, interface):
if interface in self.interfaces:
return True
return False
class VXNet:
def __init__(self, netid, backend='vxbackend'):
self.netid = NetID(netid)
self.ipv6 = None
self.ipv4 = None
self.backend = backend
self.existing = NetLayout()
def apply(self):
"""
ethpairs : left always to bridge, right to namespace
"""
self.existing.load()
if self.innamespace == True:
# IP in Namespace
vxlan = VXlan(self.netid, self.backend)
if vxlan.name in self.existing.nicdetail:
send_to_syslog('VXLan %s exists, not creating' % vxlan.name)
else:
vxlan.create()
vxlan.no6()
bridge = VXBridge(self.netid)
self.bridge = bridge
if bridge.name in self.existing.nicdetail:
send_to_syslog('Bridge %s exists, not creating' % bridge.name)
else:
bridge.create()
bridge.no6()
bridge.connect(vxlan.name)
namespace = VXNameSpace(self.netid)
if namespace.name in self.existing.nicdetail:
send_to_syslog('NameSpace %s exists, not creating' % namespace.name)
else:
namespace.create()
veth = VethPair(self.netid)
veth.create()
bridge.connect(veth.left)
namespace.connect(veth.right)
addIPv4(veth.right, self.ipv4, namespace=namespace.name)
if not self.ipv6 is None:
addIPv6(veth.right, self.ipv6, namespace=namespace.name)
elif self.inbridge == True:
# IP on bridge
vxlan = VXlan(self.netid, self.backend)
vxlan.create()
vxlan.no6()
bridge = VXBridge(self.netid)
self.bridge = bridge
bridge.create()
bridge.connect(vxlan.name)
if self.ipv4 is not None:
addIPv4(bridge.name, self.ipv4)
if self.ipv6 is not None:
addIPv6(bridge.name, self.ipv6)
else:
# no bridge, no namespace, just IP
vxlan = VXlan(self.netid, self.backend)
vxlan.create()
addIPv4(vxlan.name, self.ipv4)
addIPv6(vxlan.name, self.ipv6)
def rebuild(self, netid):
# destroy all connected with id
pass
def destroy(self, netid):
# destroy all connected with id
pass
| {
"repo_name": "Jumpscale/jumpscale_core8",
"path": "lib/JumpScale/sal/openvswitch/VXNet/vxlan.py",
"copies": "1",
"size": "3483",
"license": "apache-2.0",
"hash": 547517932346788000,
"line_mean": 28.7692307692,
"line_max": 84,
"alpha_frac": 0.5455067471,
"autogenerated": false,
"ratio": 3.926719278466742,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4972226025566742,
"avg_score": null,
"num_lines": null
} |
__author__ = 'delandtj'
from utils import *
import fcntl
import time
import re
from netaddr import *
from utils import *
def acquire_lock(path):
"""
little tool to do EAGAIN until lockfile released
:param path:
:return: path
"""
lock_file = open(path, 'w')
while True:
send_to_syslog("attempting to acquire lock %s" % path)
try:
fcntl.lockf(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
send_to_syslog("acquired lock %s" % path)
return lock_file
except IOError, e:
send_to_syslog("failed to acquire lock %s because '%s' - waiting 1 second" % (path, e))
time.sleep(1)
def wait_for_if(interface):
pass
def pprint_dict(a):
from pprint import pprint
pprint(dict(a.items()))
def get_nic_params():
nictypes = {}
bridges = get_all_bridges()
namespaces = get_all_namespaces()
def populatenictypes(lines, namespace=None):
for l in lines:
if not 'state' in l: continue
entry = l.strip().split()
intf = entry[1].rstrip(':')
if intf == 'lo' : continue
nictypes[intf] = {}
if 'vxlan' in entry :
want = ('state','id' ,'mtu','id','group','dev','port')
params = parse_ipl_line(entry,want)
params['type'] = 'vxlan'
elif 'veth' in entry:
want = ('state','id','mtu')
params = parse_ipl_line(entry,want)
params['peer'] = find_veth_peer(intf,ns=namespace)
params['type'] = 'veth'
elif 'tun' in entry:
want = ('state','id','mtu')
params = parse_ipl_line(entry,want)
params['type'] = 'tun'
elif intf in bridges:
want = ('state','id','mtu')
params = parse_ipl_line(entry,want)
params['type'] = 'bridge'
else:
want = ('state','id','mtu')
params = parse_ipl_line(entry,want)
nictypes[intf]['params'] = params
if namespace == None:
nictypes[intf]['detail'] = get_nic_detail(intf)
nictypes[intf]['namespace'] = None
else:
nictypes[intf]['namespace'] = namespace
return nictypes
# local
cmd = '%s -o -d link show ' % ip
intflist = dobigexec(cmd.split())
lines = intflist[0].split('\n')
nictypes = populatenictypes(lines)
# all namespaces
for ns in namespaces:
cmd = '%s netns exec %s %s -o -d link show' % (ip,ns,ip)
(r,s,e) = doexec(cmd.split())
lines = s.readlines()
nictypes = dict(populatenictypes(lines,namespace=ns).items() + nictypes.items())
return nictypes
def parse_ipl_line(line, params):
"""
Get NIC settings in line
:param line: list of ip -o -d link show
:param params: tuple of keywords
"""
nicsettings = {}
for p in params:
if p == 'state':
nicsettings[p] = line[2].lstrip('<').rstrip('>').split(',')
elif p == 'id':
nicsettings[p] = line[0].rstrip(':')
else:
nicsettings[p] = line[line.index(p)+1] #watch out for index out of range
return nicsettings
def get_nic_detail(interface):
prefix= '/sys/class/net'
# every interface has a mac
carrier = None; speed = None; peer = None
with open(os.path.join(prefix,interface,"address")) as f:
addr = f.readline().strip()
# if linked to somewhere in pci it prolly is physical
if 'pci' in os.readlink(os.path.join(prefix,interface)):
typ = 'PHYS'
elif 'virtual' in os.readlink(os.path.join(prefix,interface)):
typ = 'VIRT'
else:
typ = 'UNKNOWN'
if typ == 'PHYS':
# verify if link has carrier
(r,s,e) = doexec(['ethtool',interface])
# Link Detected and speed
out = s.readlines()
for i in out:
string = i.strip().split(':')
if string[0] == 'Link detected' :
carrier = True if string[1].strip() == 'yes' else False
if carrier == True:
for i in out:
string=i.strip().split(':')
if string[0] == 'Speed' : speed = string[1].strip()
return [typ,addr,carrier,speed]
def find_veth_peer(interface,ns=None):
"""
Left or right part of veth
@param interface:
@return: name
"""
cmd = '%s -S %s'% (ethtool, interface)
if ns != None:
cmd = '%s netns exec %s ' % (ip,ns) + cmd
r,s,e = doexec(cmd.split())
a=s.readlines()
peer = [int(x.split(':')[1].rstrip()) for x in a if 'ifindex' in x]
if len(peer) > 0 :
return peer[0]
else: return None
def add_ips_to(physlayout):
fullset = {}
iplist = get_ip_addrs()
for key in iplist: # you can have interfaces without ip
fullset[key] = physlayout[key]
fullset[key]['ipaddrs'] = iplist[key]
# merge rest
for key in physlayout:
if key not in fullset:
fullset[key] = physlayout[key]
if physlayout[key]['namespace'] != None:
fullset[key]['ipaddrs'] = get_ip_addrs(namespace=physlayout[key]['namespace'])
return fullset
def get_ip_addrs(onlypermanent=False, namespace=None):
if namespace == None:
cmd = '%s -o addr show' % ip
else:
cmd = '%s netns exec %s %s -o addr show' % (ip, namespace, ip)
(r,s,e) = doexec(cmd.split())
lines = s.readlines()
iplist = {}
for l in lines:
i = l.strip().split()
if not 'forever' in l and onlypermanent: continue
iface = i[1].rstrip(':'); ipstr = i[3]
if iface == 'lo' : continue
ipobj = IPNetwork(ipstr)
if iface not in iplist:
iplist[iface] = {}
iplist[iface]['ipaddrs'] = []
iplist[iface]['ipaddrs'].append(ipobj)
else:
iplist[iface]['ipaddrs'].append(ipobj)
return iplist
def isup(interface):
cmd = '%s -o link show dev %s' % interface
r,s,e = doexec(cmd.split())
line = s.readlines() ; l = line[0].strip().split()
state = l[2].lstrip('<').rstrip('>').split(',')
if 'UP' in state:
return True
return False
def getnetworkstructure(onlypermanent=True,without_ip=False):
"""
@param onlypermanent:
@param without_ip:
@return:
"""
(r,s,e) = doexec('ip -o addr show'.split())
interfaces = s.readlines()
s = {}
for l in interfaces:
i = l.split()
if not 'forever' in l and onlypermanent and not without_ip: continue
id = re.match('\d+',i[0]).group()
intf = i[1]; inet = i[2]; ipstr = i[3]
if not s.has_key(intf): s[intf] = {}
s[intf]['id'] = id
if not s[intf].has_key(inet): s[intf][inet] = []
s[intf][inet].append(IPNetwork(ipstr))
nictype,mac,carrier,speed = get_nic_detail(intf)
s[intf]['nictype'] = nictype
s[intf]['mac'] = mac
if carrier:
s[intf]['link'] = 'detected'
s[intf]['speed'] = speed
return s
def cleanup_flows(bridge_name,interface):
"""
flows of which ports do not exist any more get removed (generic cleanup)
@param bridge_name:
"""
flowports = list_ports_in_of()
activeports = [int(get_vswitch_port(x)) for x in list_ports(interface)]
ap = set(activeports)
todelete = [x for x in flowports if x not in ap]
for i in todelete:
clear_vswitch_rules(bridge_name, i)
def list_ports_in_of(interface):
"""
list ports in openFlow tables
@return:
"""
ipm = re.compile('(?<=in_port\=)\d{1,5}')
cmd = ofctl + " dump-flows " + interface
(r, s, e) = doexec(cmd.split())
li = [line.strip() for line in s.readlines() if 'in_port' in line]
ports = [int(ipm.search(x).group(0)) for x in li]
return ports
def get_attached_mac_port(virt_vif):
"""
@param virt_vif:
@return: port and mac
"""
if virt_vif:
cmd = vsctl + ' -f table -d bare --no-heading -- --columns=ofport,external_ids list Interface ' + virt_vif
(r, s, e) = doexec(cmd.split())
o = s.readline().split()
port = o.pop(0)
mac = o.pop(0).split('=')[1]
return port, mac
else:
send_to_syslog("No matching virt port found in get_attached_mac_port(virt_vif)")
sys.exit(0)
def get_bridge_name(vif_name):
"""
@param vif_name:
@return: bridge
"""
(rc, stdout, stderr) = doexec([vsctl, "port-to-br", vif_name])
return stdout.readline().strip()
def list_ports(bridge_name):
"""
@param bridge_name:
@return: all ports on bridge
"""
(rc, stdout, stderr) = doexec([vsctl, "list-ports", bridge_name])
ports = [line.strip() for line in stdout.readlines()]
return ports
def get_vswitch_port(vif_name):
"""
@param vif_name:
@return: all ports
"""
(rc, stdout, stderr) = doexec([vsctl, "get", "interface", vif_name, "ofport"])
return stdout.readline().strip()
def clear_vswitch_rules(bridge_name, port):
"""
@param bridge_name:
@param port:
"""
doexec([ofctl, "del-flows", bridge_name, "in_port=%s" % port])
if __name__ =='__main__':
a = get_nic_params()
pprint_dict(a)
| {
"repo_name": "Jumpscale/jumpscale6_core",
"path": "lib/JumpScale/lib/ovsnetconfig/VXNet/systemlist.py",
"copies": "1",
"size": "9345",
"license": "bsd-2-clause",
"hash": -6103297761821517000,
"line_mean": 29.0482315113,
"line_max": 114,
"alpha_frac": 0.5472445158,
"autogenerated": false,
"ratio": 3.4080962800875274,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44553407958875274,
"avg_score": null,
"num_lines": null
} |
__author__ = 'delandtj'
from utils import *
class VXlan(object):
def __init__(self,oid,backend='vxbackend'):
def bytes(num):
return num >> 8, num & 0xFF
self.multicastaddr = '239.0.%s.%s' % bytes(oid.oid)
self.id = oid
self.backend = backend
self.name = 'vx-' + oid.tostring()
def create(self):
createVXlan(self.name, self.id.oid, self.multicastaddr, self.backend)
def destroy(self):
destroyVXlan(self.name)
def no6(self):
disable_ipv6(self.name)
def verify(self):
pass
class Bridge(object):
def __init__(self, name):
self.name = name
def create(self):
createBridge(self.name)
def destroy(self):
destroyBridge(self.name)
def connect(self,interface):
connectIfToBridge(self.name,interface)
def no6(self):
disable_ipv6(self.name)
class VXBridge(Bridge):
def __init__(self,oid):
assert isinstance(oid.tostring, object)
self.name = 'space_' + oid.tostring()
class BondBridge(object):
def __init__(self, name, interfaces, bondname=None, trunks=None):
self.name = name
self.interfaces = interfaces
self.trunks = trunks
if bondname is not None:
self.bondname = "%s-Bond" % self.name
else:
self.bondname = bondname
def create(self):
createBridge(self.name)
addBond(self.name, self.bondname, self.interfaces,trunks=self.trunks)
def destroy(self):
destroyBridge(self.name)
class NameSpace(object):
def __init__(self, name):
self.name = name
def create(self):
createNameSpace(self.name)
def destroy(self):
destroyNameSpace(self.name)
def connect(self,interface):
connectIfToNameSpace(self.name,interface)
class VXNameSpace(NameSpace):
def __init__(self,oid):
self.name = 'ns-' + oid.tostring()
class NetID(object):
def __init__(self,oid):
if type(oid) is str:
self.oid = int(oid,16)
else:
self.oid = oid
def tostring(self):
# netidstring = str(hex(self.netid,16))[2:]
oidstring = '%04x' % self.oid
return oidstring
class VethPair(object):
def __init__(self,oid):
self.left = 'veth-left-%s' % oid.tostring()
self.right = 'veth-right-%s' % oid.tostring()
def create(self):
createVethPair(self.left, self.right)
# left has never an ip
disable_ipv6(self.left)
def destroy(self):
destroyVethPair(self.left)
| {
"repo_name": "Jumpscale/jumpscale6_core",
"path": "lib/JumpScale/lib/ovsnetconfig/VXNet/netclasses.py",
"copies": "1",
"size": "2587",
"license": "bsd-2-clause",
"hash": -6823635308117742000,
"line_mean": 25.1313131313,
"line_max": 77,
"alpha_frac": 0.5929648241,
"autogenerated": false,
"ratio": 3.481830417227456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4574795241327456,
"avg_score": null,
"num_lines": null
} |
__author__ = 'delandtj'
from VXNet.utils import *
import fcntl
import re
from netaddr import *
def acquire_lock(path):
"""
little tool to do EAGAIN until lockfile released
:param path:
:return: path
"""
lock_file = open(path, 'w')
while True:
send_to_syslog("attempting to acquire lock %s" % path)
try:
fcntl.lockf(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
send_to_syslog("acquired lock %s" % path)
return lock_file
except IOError as e:
send_to_syslog("failed to acquire lock %s because '%s' - waiting 1 second" % (path, e))
time.sleep(1)
def wait_for_if(interface):
pass
def pprint_dict(a):
from pprint import pprint
pprint(dict(list(a.items())))
def get_nic_params():
nictypes = {}
bridges = get_all_bridges()
namespaces = get_all_namespaces()
def populatenictypes(lines, namespace=None):
for l in lines:
if not 'state' in l:
continue
entry = l.strip().split()
intf = entry[1].rstrip(':')
if intf == 'lo':
continue
nictypes[intf] = {}
if 'vxlan' in entry:
want = ('state', 'id', 'mtu', 'id', 'group', 'dev', 'port')
params = parse_ipl_line(entry, want)
params['type'] = 'vxlan'
elif 'veth' in entry:
want = ('state', 'id', 'mtu')
params = parse_ipl_line(entry, want)
params['peer'] = find_veth_peer(intf, ns=namespace)
params['type'] = 'veth'
elif 'tun' in entry:
want = ('state', 'id', 'mtu')
params = parse_ipl_line(entry, want)
params['type'] = 'tun'
elif intf in bridges:
want = ('state', 'id', 'mtu')
params = parse_ipl_line(entry, want)
params['type'] = 'bridge'
else:
want = ('state', 'id', 'mtu')
params = parse_ipl_line(entry, want)
nictypes[intf]['params'] = params
if namespace is None:
nictypes[intf]['detail'] = get_nic_detail(intf)
nictypes[intf]['namespace'] = None
else:
nictypes[intf]['namespace'] = namespace
return nictypes
# local
cmd = '%s -o -d link show ' % ip
intflist = dobigexec(cmd.split())
lines = intflist[0].decode().splitlines()
nictypes = populatenictypes(lines)
# all namespaces
for ns in namespaces:
cmd = '%s netns exec %s %s -o -d link show' % (ip, ns, ip)
(r, s, e) = doexec(cmd.split())
lines = s.readlines()
nictypes = dict(list(populatenictypes(lines, namespace=ns).items()) + list(nictypes.items()))
return nictypes
def parse_ipl_line(line, params):
"""
Get NIC settings in line
:param line: list of ip -o -d link show
:param params: tuple of keywords
"""
nicsettings = {}
for p in params:
if p == 'state':
nicsettings[p] = line[2].lstrip('<').rstrip('>').split(',')
elif p == 'id':
nicsettings[p] = line[0].rstrip(':')
else:
nicsettings[p] = line[line.index(p) + 1] # watch out for index out of range
return nicsettings
def get_nic_detail(interface):
prefix = '/sys/class/net'
# every interface has a mac
carrier = None
speed = None
peer = None
# TODO *2 Should it give errors if paths don't exist?
addr = ''
if os.path.exists(os.path.join(prefix, interface, "address")):
with open(os.path.join(prefix, interface, "address")) as f:
addr = f.readline().strip()
# if linked to somewhere in pci it prolly is physical
typ = 'UNKNOWN'
if os.path.exists(os.path.join(prefix, interface)):
if 'pci' in os.readlink(os.path.join(prefix, interface)):
typ = 'PHYS'
elif 'virtual' in os.readlink(os.path.join(prefix, interface)):
typ = 'VIRT'
if typ == 'PHYS':
# verify if link has carrier
(r, s, e) = doexec(['ethtool', interface])
# Link Detected and speed
out = s.readlines()
for i in out:
string = i.strip().split(':')
if string[0] == 'Link detected':
carrier = True if string[1].strip() == 'yes' else False
if carrier == True:
for i in out:
string = i.strip().split(':')
if string[0] == 'Speed':
speed = string[1].strip()
return [typ, addr, carrier, speed]
def find_veth_peer(interface, ns=None):
"""
Left or right part of veth
@param interface:
@return: name
"""
cmd = '%s -S %s' % (ethtool, interface)
if ns is not None:
cmd = '%s netns exec %s ' % (ip, ns) + cmd
r, s, e = doexec(cmd.split())
a = s.readlines()
peer = [int(x.split(':')[1].rstrip()) for x in a if 'ifindex' in x]
if len(peer) > 0:
return peer[0]
else:
return None
def add_ips_to(physlayout):
fullset = {}
iplist = get_ip_addrs()
for key in iplist: # you can have interfaces without ip
fullset[key] = physlayout[key]
fullset[key]['ipaddrs'] = iplist[key]
# merge rest
for key in physlayout:
if key not in fullset:
fullset[key] = physlayout[key]
if physlayout[key]['namespace'] is not None:
fullset[key]['ipaddrs'] = get_ip_addrs(namespace=physlayout[key]['namespace'])
return fullset
def get_ip_addrs(onlypermanent=False, namespace=None):
if namespace is None:
cmd = '%s -o addr show' % ip
else:
cmd = '%s netns exec %s %s -o addr show' % (ip, namespace, ip)
(r, s, e) = doexec(cmd.split())
lines = s.readlines()
iplist = {}
for l in lines:
i = l.strip().split()
if not 'forever' in l and onlypermanent:
continue
iface = i[1].rstrip(':')
ipstr = i[3]
if iface == 'lo':
continue
ipobj = IPNetwork(ipstr)
if iface not in iplist:
iplist[iface] = {}
iplist[iface]['ipaddrs'] = []
iplist[iface]['ipaddrs'].append(ipobj)
else:
iplist[iface]['ipaddrs'].append(ipobj)
return iplist
def isup(interface):
cmd = '%s -o link show dev %s' % interface
r, s, e = doexec(cmd.split())
line = s.readlines()
l = line[0].strip().split()
state = l[2].lstrip('<').rstrip('>').split(',')
if 'UP' in state:
return True
return False
def getnetworkstructure(onlypermanent=True, without_ip=False):
"""
@param onlypermanent:
@param without_ip:
@return:
"""
(r, s, e) = doexec('ip -o addr show'.split())
interfaces = s.readlines()
s = {}
for l in interfaces:
i = l.split()
if not 'forever' in l and onlypermanent and not without_ip:
continue
id = re.match('\d+', i[0]).group()
intf = i[1]
inet = i[2]
ipstr = i[3]
if intf not in s:
s[intf] = {}
s[intf]['id'] = id
if inet not in s[intf]:
s[intf][inet] = []
s[intf][inet].append(IPNetwork(ipstr))
nictype, mac, carrier, speed = get_nic_detail(intf)
s[intf]['nictype'] = nictype
s[intf]['mac'] = mac
if carrier:
s[intf]['link'] = 'detected'
s[intf]['speed'] = speed
return s
def cleanup_flows(bridge_name, interface):
"""
flows of which ports do not exist any more get removed (generic cleanup)
@param bridge_name:
"""
flowports = list_ports_in_of()
activeports = [int(get_vswitch_port(x)) for x in list_ports(interface)]
ap = set(activeports)
todelete = [x for x in flowports if x not in ap]
for i in todelete:
clear_vswitch_rules(bridge_name, i)
def list_ports_in_of(interface):
"""
list ports in openFlow tables
@return:
"""
ipm = re.compile('(?<=in_port\=)\d{1,5}')
cmd = ofctl + " dump-flows " + interface
(r, s, e) = doexec(cmd.split())
li = [line.strip() for line in s.readlines() if 'in_port' in line]
ports = [int(ipm.search(x).group(0)) for x in li]
return ports
def get_attached_mac_port(virt_vif):
"""
@param virt_vif:
@return: port and mac
"""
if virt_vif:
cmd = vsctl + ' -f table -d bare --no-heading -- --columns=ofport,external_ids list Interface ' + virt_vif
(r, s, e) = doexec(cmd.split())
o = s.readline().split()
port = o.pop(0)
mac = o.pop(0).split('=')[1]
return port, mac
else:
send_to_syslog("No matching virt port found in get_attached_mac_port(virt_vif)")
sys.exit(0)
def get_bridge_name(vif_name):
"""
@param vif_name:
@return: bridge
"""
(rc, stdout, stderr) = doexec([vsctl, "port-to-br", vif_name])
return stdout.readline().strip()
def list_ports(bridge_name):
"""
@param bridge_name:
@return: all ports on bridge
"""
(rc, stdout, stderr) = doexec([vsctl, "list-ports", bridge_name])
ports = [line.strip() for line in stdout.readlines()]
return ports
def get_vswitch_port(vif_name):
"""
@param vif_name:
@return: all ports
"""
(rc, stdout, stderr) = doexec([vsctl, "get", "interface", vif_name, "ofport"])
return stdout.readline().strip()
def clear_vswitch_rules(bridge_name, port):
"""
@param bridge_name:
@param port:
"""
doexec([ofctl, "del-flows", bridge_name, "in_port=%s" % port])
if __name__ == '__main__':
a = get_nic_params()
pprint_dict(a)
| {
"repo_name": "Jumpscale/jumpscale_core8",
"path": "lib/JumpScale/sal/openvswitch/VXNet/systemlist.py",
"copies": "1",
"size": "9763",
"license": "apache-2.0",
"hash": -8516178205446831000,
"line_mean": 28.5848484848,
"line_max": 114,
"alpha_frac": 0.5375396907,
"autogenerated": false,
"ratio": 3.4669744318181817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4504514122518182,
"avg_score": null,
"num_lines": null
} |
__author__ = 'delandtj'
from VXNet.utils import *
class VXlan:
def __init__(self, oid, backend='vxbackend'):
def bytes(num):
return num >> 8, num & 0xFF
self.multicastaddr = '239.0.%s.%s' % bytes(oid.oid)
self.id = oid
self.backend = backend
self.name = 'vx-' + oid.tostring()
def create(self):
createVXlan(self.name, self.id.oid, self.multicastaddr, self.backend)
def destroy(self):
destroyVXlan(self.name)
def no6(self):
disable_ipv6(self.name)
def verify(self):
pass
class Bridge:
def __init__(self, name):
self.name = name
def create(self):
createBridge(self.name)
def destroy(self):
destroyBridge(self.name)
def connect(self, interface):
connectIfToBridge(self.name, interface)
def no6(self):
disable_ipv6(self.name)
class VXBridge(Bridge):
def __init__(self, oid):
assert isinstance(oid.tostring, object)
self.name = 'space_' + oid.tostring()
class BondBridge:
def __init__(self, name, interfaces, bondname=None, trunks=None):
self.name = name
self.interfaces = interfaces
self.trunks = trunks
if bondname is not None:
self.bondname = "%s-Bond" % self.name
else:
self.bondname = bondname
def create(self):
createBridge(self.name)
addBond(self.name, self.bondname, self.interfaces, trunks=self.trunks)
def destroy(self):
destroyBridge(self.name)
class NameSpace:
def __init__(self, name):
self.name = name
def create(self):
createNameSpace(self.name)
def destroy(self):
destroyNameSpace(self.name)
def connect(self, interface):
connectIfToNameSpace(self.name, interface)
class VXNameSpace(NameSpace):
def __init__(self, oid):
self.name = 'ns-' + oid.tostring()
class NetID:
def __init__(self, oid):
if isinstance(oid, str):
self.oid = int(oid, 16)
else:
self.oid = oid
def tostring(self):
# netidstring = str(hex(self.netid,16))[2:]
oidstring = '%04x' % self.oid
return oidstring
class VethPair:
def __init__(self, oid):
self.left = 'veth-left-%s' % oid.tostring()
self.right = 'veth-right-%s' % oid.tostring()
def create(self):
createVethPair(self.left, self.right)
# left has never an ip
disable_ipv6(self.left)
def destroy(self):
destroyVethPair(self.left)
| {
"repo_name": "Jumpscale/jumpscale_core8",
"path": "lib/JumpScale/sal/openvswitch/VXNet/netclasses.py",
"copies": "1",
"size": "2582",
"license": "apache-2.0",
"hash": -8023947589560889000,
"line_mean": 20.5166666667,
"line_max": 78,
"alpha_frac": 0.5836560806,
"autogenerated": false,
"ratio": 3.517711171662125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9601367252262125,
"avg_score": 0,
"num_lines": 120
} |
__author__ = 'delandtj'
import os
import os.path
import subprocess
import sys
import syslog
import time
command_name = sys.argv[0]
vsctl = "/usr/bin/ovs-vsctl"
ofctl = "/usr/bin/ovs-ofctl"
ip = "/sbin/ip"
ethtool = "/sbin/ethtool"
PHYSMTU = 2000
# TODO : errorhandling
def send_to_syslog(msg):
pass
#print msg
# pid = os.getpid()
# print ("%s[%d] - %s" % (command_name, pid, msg))
# syslog.syslog("%s[%d] - %s" % (command_name, pid, msg))
def doexec(args):
"""Execute a subprocess, then return its return code, stdout and stderr"""
send_to_syslog(args)
proc = subprocess.Popen(args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, bufsize=-1)
rc = proc.wait()
# rc = proc.communicate()
stdout = proc.stdout
stderr = proc.stderr
return rc, stdout, stderr
def dobigexec(args):
"""Execute a subprocess, then return its return code, stdout and stderr"""
send_to_syslog(args)
proc = subprocess.Popen(args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, bufsize=-1)
rc = proc.communicate()
return rc
def get_all_namespaces():
cmd = '%s netns ls' % ip
r, s, e = doexec(cmd.split())
return [line.strip() for line in s.readlines()]
def get_all_ifaces():
"""
List of network interfaces
@rtype : dict
"""
netpath = '/sys/class/net'; ifaces = {}
for i in os.listdir(netpath):
with open(os.path.join(netpath, i, "address")) as f:
addr = f.readline().strip()
ifaces[i] = addr
return ifaces
def get_all_bridges():
cmd = '%s list-br' % vsctl
r, s, e = doexec(cmd.split())
l = [line.strip() for line in s.readlines()]
return l
def ip_link_set(device, args):
cmd = "ip l set " + device + " " + args
doexec(cmd.split())
def createBridge(name):
cmd = '%s --may-exist add-br %s' % (vsctl, name)
r,s,e = doexec(cmd.split())
if r:
raise RuntimeError("Problem with creation of bridge %s, err was: %s" % (name,e))
def destroyBridge(name):
cmd = '%s --if-exists del-br %s' % (vsctl, name)
r,s,e = doexec(cmd.split())
if r:
raise RuntimeError("Problem with destruction of bridge %s, err was: %s" % (name,e))
def VlanPatch(parentbridge, vlanbridge, vlanid):
parentpatchport = '%s-%s' % (vlanbridge, str(vlanid))
bridgepatchport = '%s-%s' % (parentbridge, str(vlanid))
cmd = '%s add-port %s %s tag=%s -- set Interface %s type=patch options:peer=%s' % (vsctl, parentbridge, parentpatchport, vlanid, parentpatchport, bridgepatchport)
r,s,e = doexec(cmd.split())
if r:
raise RuntimeError("Add extra vlan pair failed %s" % (e.readlines()))
cmd = '%s add-port %s %s -- set Interface %s type=patch options:peer=%s' % (vsctl, vlanbridge, bridgepatchport, bridgepatchport, parentpatchport)
r,s,e = doexec(cmd.split())
if r:
raise RuntimeError("Add extra vlan pair failed %s" % (e.readlines()))
def addVlanPatch(parbr,vlbr,id, mtu=None):
parport = "{}-{!s}".format(vlbr,id)
brport = "{}-{!s}".format(parbr,id)
c = "{0} add-br {1} -- add-port {1} {3} -- set Interface {3} type=patch options:peer={2}".format(vsctl,vlbr,parport,brport)
c = c + " -- add-port {0} {2} tag={3!s} -- set Interface {2} type=patch options:peer={1}".format(parbr,brport,parport,id)
r,s,e = doexec(c.split())
if mtu:
ip_link_set(vlbr,'mtu {0}'.format(mtu))
if r:
raise RuntimeError("Add extra vlan pair failed %s" % (e.readlines()))
def createNameSpace(name):
if name not in get_all_namespaces():
cmd = '%s netns add %s' % (ip,name)
r,s,e = doexec(cmd.split())
else:
send_to_syslog('Namespace %s already exists, not creating' % name)
def destroyNameSpace(name):
if name in get_all_namespaces():
cmd = '%s netns delete %s' % (ip, name)
r,s,e = doexec(cmd.split())
else:
send_to_syslog('Namespace %s doesn\'t exist, nothing done ' % name)
def createVethPair(left,right):
cmd = '%s link add %s type veth peer name %s' %(ip, left, right)
allifaces = get_all_ifaces()
if left in allifaces or right in allifaces:
# one of them already exists
send_to_syslog("Problem with creation of vet pair %s, %s :one of them exists" %(left,right))
r,s,e = doexec(cmd.split())
# wait for it to come up
time.sleep(.2)
ip_link_set(left,'up')
ip_link_set(right,'up') # when sent into namespace, it'll be down again
disable_ipv6(left) # not right, as it can be used in a namespace
def destroyVethPair(left):
cmd = '%s link del %s ' %(ip, left)
r,s,e = doexec(cmd.split())
if r:
raise RuntimeError("Problem with destruction of Veth pair %s, err was: %s" % (left,e))
def createVXlan(vxname,vxid,multicast,vxbackend):
"""
Always brought up too
Created with no protocol, and upped (no ipv4, no ipv6)
Fixed standard : 239.0.x.x, id
# 0000-fe99 for customer vxlans, ff00-ffff for environments
MTU of VXLAN = 1500
"""
cmd = 'ip link add %s type vxlan id %s group %s ttl 60 dev %s' % (vxname, vxid, multicast, vxbackend)
r,s,e = doexec(cmd.split())
disable_ipv6(vxname)
ip_link_set(vxname,'mtu 1500 up')
if r:
send_to_syslog("Problem with creation of vxlan %s, err was: %s" % (vxname ,e.readlines()))
def destroyVXlan(name):
cmd = '%s link del %s ' %(ip, name)
r,s,e = doexec(cmd.split())
if r:
send_to_syslog("Problem with destruction of Veth pair %s, err was: %s" % (name,e.readlines()))
exit(1)
def addIPv4(interface,ipobj,namespace=None):
netmask = ipobj.prefixlen
ipv4addr = ipobj.ip
# if ip existst on interface, we assume all ok
if namespace != None:
cmd = '%s netns exec %s ip addr add %s/%s dev %s' % (ip, namespace, ipv4addr, netmask, interface)
else:
cmd = '%s addr add %s/%s dev %s' % (ip, ipv4addr, netmask, interface)
r,s,e = doexec(cmd.split())
if r:
send_to_syslog('Could not add IP %s to interface %s ' % (ipv4addr, interface))
return r,e
def addIPv6(interface, ipobj, namespace=None):
netmask = ipobj.prefixlen
ipv6addr = ipobj.ip
# if ip existst on interface, we assume all ok
if namespace != None and namespace in allnamespaces:
cmd = '%s netns exec %s ip addr add %s/%s dev %s' % (ip, namespace, ipv6addr, netmask, interface)
else:
cmd = '%s addr add %s/%s dev %s' % (ip, ipv6addr, netmask, interface)
r,s,e = doexec(cmd.split())
if r:
send_to_syslog('Could not add IP %s to interface %s ' % (ipv6addr, interface))
return r, e
def connectIfToBridge(bridge,interface):
cmd = '%s --if-exists del-port %s %s' %(vsctl,bridge,interface)
r,s,e = doexec(cmd.split())
cmd = '%s --may-exist add-port %s %s' %(vsctl,bridge,interface)
r,s,e = doexec(cmd.split())
if r:
raise RuntimeError('Error adding port %s to bridge %s' %(interface,bridge))
def connectIfToNameSpace(nsname,interface):
cmd = '%s link set %s netns %s' %( ip, interface, nsname)
r,s,e = doexec(cmd.split())
if r:
raise RuntimeError("Error moving %s to namespace %s" %(interface,nsname))
def disable_ipv6(interface):
if interface in get_all_ifaces():
cmd = 'sysctl -w net.ipv6.conf.%s.disable_ipv6=1' % interface
r,s,e = doexec(cmd.split())
def setMTU(interface,mtu):
cmd = 'ip link set %s mtu %s' % (interface,mtu)
r,s,e = doexec(cmd.split())
if r:
raise RuntimeError('Could not set %s to MTU %s' %(interface,mtu))
def addBond(bridge, bondname, iflist, lacp="active", lacp_time="fast", mode="balance-tcp", trunks=None):
# bond_mode=balance-tcp lacp=active bond_fake_iface=false other_config:lacp-time=fast bond_updelay=2000 bond_downdelay=400
"""
Add a bond to a bridge
:param bridge: BridgeName (string)
:param bondname: Bondname (string)
:param iflist: list or tuple
:param lacp: "active" or "passive"
:param lacp_time: mode "fast" or "slow"
:param mode: balance-tcp, balance-slb, active-passive
:param trunks: allowed VLANS (list or tuple)
"""
intf = re.split('\W+', iflist)
if type(trunks) is str:
tr = re.split('\W+', trunks)
buildup = "add-bond %s %s " % (bridge, bondname) + " ".join(e for e in list(set(intf))) + " lacp=%s " % lacp
buildup = buildup + " -- set Port %s bond_mode=%s bond_fake_iface=false " % (bondname, mode)
buildup = buildup + "other_config:lacp-time=%s bond_updelay=2000 bond_downdelay=400 " % lacp_time
if trunks is not None:
trlist = ",".join(str(e) for e in list(set(tr)))
buildup = buildup + "trunks=" + trlist
# no use to autoconf ipv6, as this won't work anyway
for i in iflist:
disable_ipv6(i)
r,s,e = doexec(buildup.split())
if e:
raise RuntimeError("Could not create bond %s for bridge %s" % (bondname,bridge))
| {
"repo_name": "Jumpscale/jumpscale6_core",
"path": "lib/JumpScale/lib/ovsnetconfig/VXNet/utils.py",
"copies": "1",
"size": "8985",
"license": "bsd-2-clause",
"hash": 1908044100854457000,
"line_mean": 33.2938931298,
"line_max": 166,
"alpha_frac": 0.6208124652,
"autogenerated": false,
"ratio": 3.0436991869918697,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41645116521918696,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, unicode_literals
from django import forms
from django.forms.formsets import BaseFormSet
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, ButtonHolder, Submit, Fieldset, HTML, MultiField, Div, Field
from django.contrib.auth import get_user_model
from mmb_repo.mmb_data.models import Genre, Instrument
from .models import Band, BandMember, BandVacancy
from .app_settings import MEMBER_TYPE, CITIES
class BandVacancyForm(forms.ModelForm):
instrument = forms.ModelChoiceField(queryset=Instrument.objects.none())
type = forms.ChoiceField(choices=MEMBER_TYPE)
class Meta:
model = BandVacancy
fields = ('instrument', 'type')
def __init__(self, *args, **kwargs):
super(BandVacancyForm, self).__init__(*args, **kwargs)
self.fields['instrument'].queryset = Instrument.objects.all()
self.helper = FormHelper()
self.helper.layout = Layout(
'instrument',
'type',
ButtonHolder(
Submit('submit', 'Create Vacancy', css_class='button white')
)
)
class BandMemberForm(forms.Form):
member = forms.ModelChoiceField(queryset=((0,0),))
instrument = forms.ModelChoiceField(queryset=Instrument.objects.none())
type = forms.ChoiceField(choices=MEMBER_TYPE)
def __init__(self, *args, **kwargs):
super(BandMemberForm, self).__init__(*args, **kwargs)
self.fields['member'].queryset = get_user_model().objects.all()
self.fields['instrument'].queryset = Instrument.objects.all()
self.fields['member'].widget.attrs['class'] = 'controls textInput form-control chosen'
self.fields['instrument'].widget.attrs['class'] = 'controls textInput form-control chosen'
self.fields['type'].widget.attrs['class'] = 'controls textInput form-control chosen'
class BandForm(forms.ModelForm):
genre = forms.MultipleChoiceField(label='Genre',
widget=forms.SelectMultiple(attrs={'class': 'controls textInput form-control chosen'}))
class Meta:
model = Band
fields = ('name', 'location', 'label', 'year', 'desc')
def __init__(self, *args, **kwargs):
super(BandForm, self).__init__(*args, **kwargs)
self.fields['genre'].choices = [(i.genre, i.genre) for i in Genre.objects.all()]
self.fields['name'].widget = forms.TextInput(attrs={'class': 'controls textInput form-control'})
self.fields['label'].widget = forms.TextInput(attrs={'class': 'controls textInput form-control'})
self.fields['desc'].widget = forms.Textarea(attrs={'class': 'controls textInput form-control', 'rows': 4})
self.fields['location'].widget.attrs['class'] = 'controls textInput form-control'
self.fields['year'].widget.attrs['class'] = 'controls textInput form-control'
class BaseBandFormset(BaseFormSet):
def clean(self):
if any(self.errors):
return
# class BandUploadSongForm(forms.ModelForm):
#
# class Meta:
# model = BandSong
# fields = ("tags", "name", "upload",)
#
# def __init__(self, *args, **kwargs):
# super(BandUploadSongForm, self).__init__(*args, **kwargs)
# self.helper = FormHelper()
# self.helper.layout = Layout(
# Field('upload', css_class="btn btn-success form-control "),
# HTML("""
# <br>
# <p>
# Name must be relevant to song, <strong>please set name accordingly.</strong></p>
# """),
# Field('name'),
# Field('tags'),
# ButtonHolder(
# Submit('submit', 'Submit', css_class='button white')
# )
# )
#
# def clean_upload(self):
# cleaned_data = super(BandUploadSongForm, self).clean()
# file = cleaned_data.get('upload',False)
# if file:
# if file._size > 15*1024*1024:
# raise ValidationError("Audio file too large ( > 15mb )")
# if not file.content_type in ["audio/mpeg","video/mp4","audio/mp3"]:
# raise ValidationError("Content-Type is not mpeg")
# if not os.path.splitext(file.name)[-1] in [".mp3",".wav",".mp4"]:
# raise ValidationError("Doesn't have proper extension")
# # Here we need to now to read the file and see if it's actually
# # a valid audio file. I don't know what the best library is to
# # to do this
# # if not some_lib.is_audio(file.content):
# # raise ValidationError("Not a valid audio file")
# return file
# else:
# raise ValidationError("Couldn't read uploaded file")
| {
"repo_name": "ajay2611/mmb",
"path": "mmb_repo/bands/forms.py",
"copies": "1",
"size": "4801",
"license": "bsd-3-clause",
"hash": 2665529842853038600,
"line_mean": 40.0341880342,
"line_max": 125,
"alpha_frac": 0.6084149136,
"autogenerated": false,
"ratio": 3.884304207119741,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9943499070928011,
"avg_score": 0.009844009958345851,
"num_lines": 117
} |
__author__ = "Dell-Ray Sackett"
__version__ = "0.1"
import pickle
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256
from Crypto.Cipher import AES
from Crypto import Random
import base64
class Message:
"""
This is a class to hold an encrypted message. It is specifically
designed to be pickeled and stored. Nothing more. Although I guess it
could have some use in networking?
"""
def __init__(self, public_key, symmetric_key, signature, message):
"""
Initialize the object.
Keyword Arguments:
public_key -- The public key of the sending party. Should be a x509 DER
sequence that can be directly imported by pyCrypto.
symmetric_key -- The asymmetrically encrypted symmetric key for AES256
encryption.
signature -- Message .
message -- The message encrypted.
"""
self._publicKey = public_key
self._symmetricKey = symmetric_key
self._signature = signature
self._message = message
"""
There is no real reason to only get 1 of these values. So I am only
providing a method for returning everything.
"""
def get_message(self):
"""Return a list containing all the message information."""
return [self._publicKey, self._symmetricKey, self._signature, self._message]
# Pickle and Unpickle
def dump_message(self):
"""Pickle the message and return it."""
return pickle.dumps(self)
@staticmethod
def load_message(message):
"""
Unpickle a message string and return the object.
Mandatory Arguments:
message -- A pickled message string.
"""
return pickle.loads(message)
class CryptoHelper:
"""
This class will do the encryption and decryption of a message object.
It will be almost completely static hah!
"""
"""
I took pad and unpad from a stack exchange post.
http://stackoverflow.com/a/12525165
"""
# Define the symmetric block size as a static variable.
BS = 16
# Static Methods
@staticmethod
def pad(s):
"""
Takes string s and returns it padded to blocksize CryptoHelper.BS.
"""
return s + (CryptoHelper.BS - len(s) % CryptoHelper.BS) * chr(
CryptoHelper.BS - len(s) % CryptoHelper.BS)
@staticmethod
def unpad(s):
"""
Takes padded string s and returns it sans padding.
"""
return s[:-ord(s[len(s) - 1:])]
@staticmethod
def generate_keys(filename, passphrase, modulus=2048):
"""
Generates a RSA keypair and returns it.
Manditory Arguments:
filename -- The name of the file to save the key to.
passphrase -- The passphrase for the key. If you think your key
doesn't need a key. You are dumb.
Optional Arguments:
modulus -- The size modulus to use. (String, default=2048)
"""
key = RSA.generate(modulus)
if passphrase == "" or passphrase is None:
raise ValueError("Passphrase cannot be empty")
if filename[len(filename) - 4:] != ".pem":
filename += ".pem"
try:
keyfile = open(filename, "w")
pubkeyfile = open(filename[:len(filename) - 4] + "_publiconly.pem", "w")
except Exception as e:
raise e
keyfile.write(key.exportKey(format="PEM", passphrase=passphrase))
pubkeyfile.write(key.exportKey(format="PEM", pkcs=8))
keyfile.close()
pubkeyfile.close()
return key
@staticmethod
def import_keys(filename, passphrase):
"""
Import a key from file and return a RSA key object
Mandatory Arguments:
filename -- The filename of the key to import.
passphrase -- The passphrase for your key
"""
try:
keyfile = open(filename, "r")
except Exception as e:
raise e
return RSA.importKey(keyfile.read(), passphrase)
@staticmethod
def get_public_key(key_pair):
"""Return the PEM encoded public key"""
return key_pair.publickey().exportKey()
@staticmethod
def symmetric_encrypt(plaintext):
"""
Takes a string and encrypts it. Returning a tuple with the IV,
the symmetric key (plaintext) and the encrypted
string. The IV and the ciphertext will be concatenated together with
the IV in front.
Mandatory Arguments:
plaintext -- A string to be encrypted.
"""
paddedplaintext = CryptoHelper.pad(plaintext)
IV = Random.new().read(AES.block_size)
key = SHA256.new(Random.new().read(1024)).digest()
cryptor = AES.new(key, AES.MODE_CBC, IV)
return (key, base64.b64encode(
IV + cryptor.encrypt(paddedplaintext)))
@staticmethod
def symmetric_decrypt(key, ciphertext):
"""
Takes a key and base64 encoded ciphertext with an IV concatenated at
the beginning and returns a plaintext string.
"""
decodedciphertext = base64.b64decode(ciphertext)
IV = decodedciphertext[:16]
cryptor = AES.new(key, AES.MODE_CBC, IV)
return CryptoHelper.unpad(cryptor.decrypt(decodedciphertext[16:]))
@staticmethod
def encrypt_message(message, encryption_key_filename, signing_key_filename, signing_key_passphrase):
"""
Takes a String message and encrypts it with the publickey from
the RSA publickey in the file from encryptionKeyFilename. Also signs
the message with the RSA keypair from file signingKeyFilename. Returns
a Message object.
Mandatory Arguments:
message -- A message in the form of a string.
encryptionKeyFilename -- Filename of the publickey to use for
encryption as a String.
signingKeyFilename -- Filename of the RSA keypair to use for
signing the message as a string
signingKeyPassphrase -- The passphrase to the singing keypair.
"""
enckey = CryptoHelper.import_keys(encryption_key_filename, "")
sigkey = CryptoHelper.import_keys(signing_key_filename,
signing_key_passphrase)
myhash = SHA256.new()
myhash.update(message)
cipheredmessage = CryptoHelper.symmetric_encrypt(message)
messagesig = base64.b64encode(str(sigkey.sign(myhash.digest(), "")[0]))
symmetrickey = base64.b64encode(enckey.encrypt(cipheredmessage[0], 32)[0])
pubkey = CryptoHelper.get_public_key(sigkey)
return Message(pubkey, symmetrickey, messagesig, cipheredmessage[1])
@staticmethod
def decrypt_message(message_object, decryption_key_filename, decryption_key_passphrase):
"""
Takes a message Object and a string containing the filename of the
decryption keypair. Decrypts and verifies the message. If the message
is verified returns a string containing the plaintext message.
Mandatory Arguments:
messageObject -- A Message object containing the encrypted message.
With senders publicKey and a signature.
deryptionKeyFilename -- String containing the filename of the RSA
keypair to be used for decryption.
decryptionKeyPassphrase -- String containing the passphrase for
decrypting the decryption key.
"""
try:
decryptkey = CryptoHelper.import_keys(decryption_key_filename, decryption_key_passphrase)
except Exception as e:
raise e
# A list with [publicKey, signature, encMessage]
expandedmessage = message_object.get_message()
sigkey = RSA.importKey(expandedmessage[0])
symmetrickey = decryptkey.decrypt(base64.b64decode(expandedmessage[1]))
plaintext = CryptoHelper.symmetric_decrypt(symmetrickey, expandedmessage[3])
messagehash = SHA256.new(plaintext).digest()
signature = (long(base64.b64decode(expandedmessage[2])),)
if not sigkey.verify(messagehash, signature):
raise ValueError("The message could not be verified")
else:
return plaintext
| {
"repo_name": "lospheris/steganographer",
"path": "message.py",
"copies": "1",
"size": "8337",
"license": "mit",
"hash": 5174927510728330000,
"line_mean": 33.5933609959,
"line_max": 104,
"alpha_frac": 0.6280436608,
"autogenerated": false,
"ratio": 4.494339622641509,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5622383283441509,
"avg_score": null,
"num_lines": null
} |
__author__ = 'demi'
# By Ashwath from forums
# Given a list of lists representing a n * n matrix as input,
# define a procedure that returns True if the input is an identity matrix
# and False otherwise.
# An IDENTITY matrix is a square matrix in which all the elements
# on the principal/main diagonal are 1 and all the elements outside
# the principal diagonal are 0.
# (A square matrix is a matrix in which the number of rows
# is equal to the number of columns)
def is_identity_matrix(matrix):
if len(matrix) == 0:
return True
if len(matrix) != len(matrix[0]):
return False
i = 0
j = 0
while i < len(matrix):
while j < len(matrix):
if i == j:
if matrix[i][j] != 1:
return False
else:
if matrix[i][j] != 0:
return False
j += 1
j = 0
i += 1
return True
# Test Cases:
matrix1 = [[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]]
print(is_identity_matrix(matrix1))
#>>>True
matrix2 = [[1,0,0],
[0,1,0],
[0,0,0]]
print(is_identity_matrix(matrix2))
#>>>False
matrix3 = [[2,0,0],
[0,2,0],
[0,0,2]]
print(is_identity_matrix(matrix3))
#>>>False
matrix4 = [[1,0,0,0],
[0,1,1,0],
[0,0,0,1]]
print(is_identity_matrix(matrix4))
#>>>False
matrix5 = [[1,0,0,0,0,0,0,0,0]]
print(is_identity_matrix(matrix5))
#>>>False
matrix6 = [[1,0,0,0],
[0,1,0,1],
[0,0,1,0],
[0,0,0,1]]
print(is_identity_matrix(matrix6))
#>>>False
matrix7 = [[1, -1, 1],
[0, 1, 0],
[0, 0, 1]]
print(is_identity_matrix(matrix7))
#>>>False | {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson03/is_identity_matrix.py",
"copies": "1",
"size": "1735",
"license": "mit",
"hash": -6356846708647035000,
"line_mean": 19.6666666667,
"line_max": 74,
"alpha_frac": 0.5187319885,
"autogenerated": false,
"ratio": 2.9357021996615904,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8763089942129845,
"avg_score": 0.0382688492063492,
"num_lines": 84
} |
__author__ = 'demi'
import string
# Write a procedure, rotate which takes as its input a string of lower case
# letters, a-z, and spaces, and an integer n, and returns the string constructed
# by shifting each of the letters n steps, and leaving the spaces unchanged.
# Note that 'a' follows 'z'. You can use an additional procedure if you
# choose to as long as rotate returns the correct string.
# Note that n can be positive, negative or zero.
def rotate(str, n):
res = ''
for c in str:
res += shift_n_letters(c, n)
return res
def shift_n_letters(letter, n):
if letter == ' ':
return letter
lowers = [i for i in string.ascii_lowercase]
size = len(lowers)
num = 26
counter = 0
for c in lowers:
if c == letter:
num = counter
break
counter += 1Recursive Factorial
num += n
return lowers[num % size]
print(rotate('sarah', 13))
#>>> 'fnenu'
print(rotate('fnenu', 13))
#>>> 'sarah'
print(rotate('dave', 5))
#>>>'ifaj'
print(rotate('ifaj', -5))
#>>>'dave'
print(rotate(("zw pfli tfuv nfibj tfiivtkcp pfl jyflcu "
"sv rscv kf ivru kyzj"), -17))
#>>> ??? | {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson05/Rotate.py",
"copies": "1",
"size": "1167",
"license": "mit",
"hash": -4900166669832249000,
"line_mean": 26.1627906977,
"line_max": 80,
"alpha_frac": 0.618680377,
"autogenerated": false,
"ratio": 3.2416666666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43603470436666664,
"avg_score": null,
"num_lines": null
} |
__author__ = 'demi'
# 1 Gold Star
# The built-in <string>.split() procedure works
# okay, but fails to find all the words on a page
# because it only uses whitespace to split the
# string. To do better, we should also use punctuation
# marks to split the page into words.
# Define a procedure, split_string, that takes two
# inputs: the string to split and a string containing
# all of the characters considered separators. The
# procedure should return a list of strings that break
# the source string up by the characters in the
# splitlist.
def split_string(source, splitlist):
res = [source]
for delimiter in splitlist:
counter = 0
for item in res:
splitted = item.split(delimiter)
if len(splitted) > 1:
res.remove(item)
for split_item in reversed(splitted):
if len(split_item) > 0:
res.insert(counter, split_item)
counter += 1
return res
out = split_string("This is a test-of the,string separation-code!", " ,!-")
print(out)
#>>> ['This', 'is', 'a', 'test', 'of', 'the', 'string', 'separation', 'code']
out = split_string("After the flood ... all the colors came out.", " .")
print(out)
#>>> ['After', 'the', 'flood', 'all', 'the', 'colors', 'came', 'out']
out = split_string("First Name,Last Name,Street Address,City,State,Zip Code",",")
print(out)
#>>>['First Name', 'Last Name', 'Street Address', 'City', 'State', 'Zip Code'] | {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson04/split_string.py",
"copies": "1",
"size": "1487",
"license": "mit",
"hash": -1974656770260861700,
"line_mean": 32.0666666667,
"line_max": 81,
"alpha_frac": 0.6213853396,
"autogenerated": false,
"ratio": 3.5404761904761903,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46618615300761906,
"avg_score": null,
"num_lines": null
} |
__author__ = 'demi'
# 2 Gold Stars
# One way search engines rank pages
# is to count the number of times a
# searcher clicks on a returned link.
# This indicates that the person doing
# the query thought this was a useful
# link for the query, so it should be
# higher in the rankings next time.
# (In Unit 6, we will look at a different
# way of ranking pages that does not depend
# on user clicks.)
# Modify the index such that for each url in a
# list for a keyword, there is also a number
# that counts the number of times a user
# clicks on that link for this keyword.
# The result of lookup(index,keyword) should
# now be a list of url entries, where each url
# entry is a list of a url and a number
# indicating the number of times that url
# was clicked for this query keyword.
# You should define a new procedure to simulate
# user clicks for a given link:
# record_user_click(index,word,url)
# that modifies the entry in the index for
# the input word by increasing the count associated
# with the url by 1.
# You also will have to modify add_to_index
# in order to correctly create the new data
# structure, and to prevent the repetition of
# entries as in homework 4-5.
def record_user_click(index, keyword, url):
for entry in index:
if entry[0] == keyword:
for list_item in entry[1]:
if url in list_item:
list_item[1] += 1
return
def add_to_index(index, keyword, url):
for entry in index:
if entry[0] == keyword:
for list_item in entry[1]:
if url in list_item:
return
entry[1].append([url, 0])
return
# not found, add new keyword to index
index.append([keyword, [[url, 0]]])
def get_page(url):
try:
if url == "http://www.udacity.com/cs101x/index.html":
return '''<html> <body> This is a test page for learning to crawl!
<p> It is a good idea to
<a href="http://www.udacity.com/cs101x/crawling.html">
learn to crawl</a> before you try to
<a href="http://www.udacity.com/cs101x/walking.html">walk</a> or
<a href="http://www.udacity.com/cs101x/flying.html">fly</a>.</p></body></html>'''
elif url == "http://www.udacity.com/cs101x/crawling.html":
return '''<html> <body> I have not learned to crawl yet, but I am
quite good at <a href="http://www.udacity.com/cs101x/kicking.html">kicking</a>.
</body> </html>'''
elif url == "http://www.udacity.com/cs101x/walking.html":
return '''<html> <body> I cant get enough
<a href="http://www.udacity.com/cs101x/index.html">crawling</a>!</body></html>'''
elif url == "http://www.udacity.com/cs101x/flying.html":
return '<html><body>The magic words are Squeamish Ossifrage!</body></html>'
except:
return ""
return ""
def union(a, b):
for e in b:
if e not in a:
a.append(e)
def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def get_all_links(page):
links = []
while True:
url, endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
def crawl_web(seed):
tocrawl = [seed]
crawled = []
index = []
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
content = get_page(page)
add_page_to_index(index, page, content)
union(tocrawl, get_all_links(content))
crawled.append(page)
return index
def add_page_to_index(index, url, content):
words = content.split()
for word in words:
add_to_index(index, word, url)
def lookup(index, keyword):
for entry in index:
if entry[0] == keyword:
return entry[1]
return None
#Here is an example showing a sequence of interactions:
index = crawl_web('http://www.udacity.com/cs101x/index.html')
print(lookup(index, 'good'))
#>>> [['http://www.udacity.com/cs101x/index.html', 0],
#>>> ['http://www.udacity.com/cs101x/crawling.html', 0]]
record_user_click(index, 'good', 'http://www.udacity.com/cs101x/crawling.html')
print(lookup(index, 'good'))
#>>> [['http://www.udacity.com/cs101x/index.html', 0],
#>>> ['http://www.udacity.com/cs101x/crawling.html', 1]] | {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson04/Counting clicks.py",
"copies": "1",
"size": "4532",
"license": "mit",
"hash": 8789291507669038000,
"line_mean": 29.6283783784,
"line_max": 87,
"alpha_frac": 0.6206972639,
"autogenerated": false,
"ratio": 3.276934201012292,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4397631464912292,
"avg_score": null,
"num_lines": null
} |
__author__ = 'demi'
# 6. In video 28. Update, it was suggested that some of the duplicate code in
# lookup and update could be avoided by a better design. We can do this by
# defining a procedure that finds the entry corresponding to a given key, and
# using that in both lookup and update.
# Here are the original procedures:
def hashtable_update(htable, key, value):
entry = find_entry(htable, key)
if entry:
entry[1] = value
return
bucket = hashtable_get_bucket(htable, key)
bucket.append([key, value])
def hashtable_lookup(htable, key):
entry = find_entry(htable, key)
if entry:
return entry[1]
return None
def make_hashtable(size):
table = []
for unused in range(0, size):
table.append([])
return table
def hash_string(s, size):
h = 0
for c in s:
h = h + ord(c)
return h % size
def hashtable_get_bucket(htable, key):
return htable[hash_string(key, len(htable))]
def find_entry(htable, key):
bucket = hashtable_get_bucket(htable, key)
for entry in bucket:
if entry[0] == key:
return entry
return None
# Whenever we have duplicate code like the loop that finds the entry in
# hashtable_update and hashtable_lookup, we should think if there is a better way
# to write this that would avoid the duplication. We should be able to rewrite
# these procedures to be shorter by defining a new procedure and rewriting both
# hashtable_update and hashtable_lookup to use that procedure.
# Modify the code for both hashtable_update and hashtable_lookup to have the same
# behavior they have now, but using fewer lines of code in each procedure. You
# should define a new procedure to help with this. Your new version should have
# approximately the same running time as the original version, but neither
# hashtable_update or hashtable_lookup should include any for or while loop, and
# the block of each procedure should be no more than 6 lines long.
# Your procedures should have the same behavior as the originals. For example,
table = make_hashtable(10)
hashtable_update(table, 'Python', 'Monty')
hashtable_update(table, 'CLU', 'Barbara Liskov')
hashtable_update(table, 'JavaScript', 'Brendan Eich')
hashtable_update(table, 'Python', 'Guido van Rossum')
print(hashtable_lookup(table, 'Python'))
#>>> Guido van Rossum
| {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson05/Refactoring.py",
"copies": "1",
"size": "2359",
"license": "mit",
"hash": 1393976746018527000,
"line_mean": 33.6911764706,
"line_max": 81,
"alpha_frac": 0.7108944468,
"autogenerated": false,
"ratio": 3.4640234948604993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4674917941660499,
"avg_score": null,
"num_lines": null
} |
__author__ = 'demi'
# A list is symmetric if the first row is the same as the first column,
# the second row is the same as the second column and so on. Write a
# procedure, symmetric, which takes a list as input, and returns the
# boolean True if the list is symmetric and False if it is not.
def symmetric(lists):
if len(lists) == 0:
return True
if len(lists) != len(lists[0]):
return False
rowCounter1 = 0
rowCounter2 = 0
colCounter1 = 0
colCounter2 = 0
while rowCounter1 < len(lists):
while colCounter1 < len(lists[0]):
if lists[rowCounter1][colCounter1] != lists[rowCounter2][colCounter2]:
return False
colCounter1 += 1
rowCounter2 += 1
colCounter1 = 0
rowCounter2 = 0
rowCounter1 += 1
colCounter2 += 1
return True
print(symmetric([[1, 2, 3],
[2, 3, 4],
[3, 4, 1]]))
#>>> True
print(symmetric([["cat", "dog", "fish"],
["dog", "dog", "fish"],
["fish", "fish", "cat"]]))
#>>> True
print(symmetric([["cat", "dog", "fish"],
["dog", "dog", "dog"],
["fish", "fish", "cat"]]))
#>>> False
print(symmetric([[1, 2],
[2, 1]]))
#>>> True
print(symmetric([[1, 2, 3, 4],
[2, 3, 4, 5],
[3, 4, 5, 6]]))
#>>> False
print(symmetric([[1, 2, 3],
[2, 3, 1]]))
#>>> False | {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson03/symmetric.py",
"copies": "1",
"size": "1473",
"license": "mit",
"hash": 2356242675527159000,
"line_mean": 25.3214285714,
"line_max": 82,
"alpha_frac": 0.4976238968,
"autogenerated": false,
"ratio": 3.5408653846153846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4538489281415385,
"avg_score": null,
"num_lines": null
} |
__author__ = 'demi'
# By Dimitris_GR from forums
# Modify Problem Set 31's (Optional) Symmetric Square to return True
# if the given square is antisymmetric and False otherwise.
# An nxn square is called antisymmetric if A[i][j]=-A[j][i]
# for each i=0,1,...,n-1 and for each j=0,1,...,n-1.
def antisymmetric(A):
if len(A) == 0:
return True
if len(A) != len(A[0]):
return False
i = 0
j = 0
while i < len(A):
while j < len(A[0]):
if A[i][j] != -A[j][i]:
return False
j += 1
j = 0
i += 1
return True
# Test Cases:
print(antisymmetric([[0, 1, 2],
[-1, 0, 3],
[-2, -3, 0]]))
#>>> True
print(antisymmetric([[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]))
#>>> True
print(antisymmetric([[0, 1, 2],
[-1, 0, -2],
[2, 2, 3]]))
#>>> False
print(antisymmetric([[1, 2, 5],
[0, 1, -9],
[0, 0, 1]]))
#>>> False | {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson03/antisymmetric.py",
"copies": "1",
"size": "1066",
"license": "mit",
"hash": 8649133717669489000,
"line_mean": 20.7755102041,
"line_max": 68,
"alpha_frac": 0.4240150094,
"autogenerated": false,
"ratio": 3.144542772861357,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4068557782261357,
"avg_score": null,
"num_lines": null
} |
__author__ = 'demi'
# Define a procedure,
#
# hashtable_add(htable,key,value)
#
# that adds the key to the hashtable (in
# the correct bucket), with the correct
# value and returns the new hashtable.
#
# (Note that the video question and answer
# do not return the hashtable, but your code
# should do this to pass the test cases.)
def hashtable_add(htable, key, value):
hashtable_get_bucket(htable, key).append([key, value])
def hashtable_get_bucket(htable,keyword):
return htable[hash_string(keyword,len(htable))]
def hash_string(keyword,buckets):
out = 0
for s in keyword:
out = (out + ord(s)) % buckets
return out
def make_hashtable(nbuckets):
table = []
for unused in range(0,nbuckets):
table.append([])
return table
table = make_hashtable(5)
hashtable_add(table,'Bill', 17)
hashtable_add(table,'Coach', 4)
hashtable_add(table,'Ellis', 11)
hashtable_add(table,'Francis', 13)
hashtable_add(table,'Louis', 29)
hashtable_add(table,'Nick', 2)
hashtable_add(table,'Rochelle', 4)
hashtable_add(table,'Zoe', 14)
print(table)
#>>> [[['Ellis', 11], ['Francis', 13]], [], [['Bill', 17], ['Zoe', 14]],
#>>> [['Coach', 4]], [['Louis', 29], ['Nick', 2], ['Rochelle', 4]]]
| {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson05/Adding Keywords.py",
"copies": "1",
"size": "1225",
"license": "mit",
"hash": -2600142782262828000,
"line_mean": 25.6304347826,
"line_max": 72,
"alpha_frac": 0.6530612245,
"autogenerated": false,
"ratio": 2.7904328018223237,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8780704373675086,
"avg_score": 0.03255793052944747,
"num_lines": 46
} |
__author__ = 'demi'
# Define a procedure,
# hashtable_lookup(htable,key)
# that takes two inputs, a hashtable
# and a key (string),
# and returns the value associated
# with that key.
def hashtable_lookup(htable, key):
bucket = hashtable_get_bucket(htable, key)
index_item = None
for item in bucket:
if item[0] == key:
index_item = item[1]
break
return index_item
def hashtable_add(htable,key,value):
bucket = hashtable_get_bucket(htable,key)
bucket.append([key,value])
def hashtable_get_bucket(htable,keyword):
return htable[hash_string(keyword,len(htable))]
def hash_string(keyword,buckets):
out = 0
for s in keyword:
out = (out + ord(s)) % buckets
return out
def make_hashtable(nbuckets):
table = []
for unused in range(0,nbuckets):
table.append([])
return table
table = [[['Ellis', 11], ['Francis', 13]], [], [['Bill', 17], ['Zoe', 14]],
[['Coach', 4]], [['Louis', 29], ['Nick', 2], ['Rochelle', 4]]]
print(hashtable_lookup(table, 'Francis'))
#>>> 13
print(hashtable_lookup(table, 'Louis'))
#>>> 29
print(hashtable_lookup(table, 'Zoe'))
#>>> 14
| {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson05/Lookup.py",
"copies": "1",
"size": "1166",
"license": "mit",
"hash": -797689363073214300,
"line_mean": 20.2,
"line_max": 75,
"alpha_frac": 0.614922813,
"autogenerated": false,
"ratio": 2.9370277078085643,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40519505208085643,
"avg_score": null,
"num_lines": null
} |
__author__ = 'demi'
# Define a procedure,
# hashtable_update(htable,key,value)
# that updates the value associated with key. If key is already in the
# table, change the value to the new value. Otherwise, add a new entry
# for the key and value.
# Hint: Use hashtable_lookup as a starting point.
# Make sure that you return the new htable
def hashtable_update(htable, key, value):
bucket = hashtable_get_bucket(htable,key)
found = False
for entry in bucket:
if entry[0] == key:
found = entry
break
if found:
entry[1] = value
else:
hashtable_add(htable, key, value)
return htable
def hashtable_lookup(htable,key):
bucket = hashtable_get_bucket(htable,key)
for entry in bucket:
if entry[0] == key:
return entry[1]
return None
def hashtable_add(htable,key,value):
bucket = hashtable_get_bucket(htable,key)
bucket.append([key,value])
def hashtable_get_bucket(htable,keyword):
return htable[hash_string(keyword,len(htable))]
def hash_string(keyword,buckets):
out = 0
for s in keyword:
out = (out + ord(s)) % buckets
return out
def make_hashtable(nbuckets):
table = []
for unused in range(0,nbuckets):
table.append([])
return table
table = [[['Ellis', 11], ['Francis', 13]], [], [['Bill', 17], ['Zoe', 14]],
[['Coach', 4]], [['Louis', 29], ['Nick', 2], ['Rochelle', 4]]]
hashtable_update(table, 'Bill', 42)
hashtable_update(table, 'Rochelle', 94)
hashtable_update(table, 'Zed', 68)
print(table)
#>>> [[['Ellis', 11], ['Francis', 13]], [['Zed', 68]], [['Bill', 42],
#>>> ['Zoe', 14]], [['Coach', 4]], [['Louis', 29], ['Nick', 2],
#>>> ['Rochelle', 94]]]
| {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson05/Update.py",
"copies": "1",
"size": "1718",
"license": "mit",
"hash": -48380783061770296,
"line_mean": 25.4307692308,
"line_max": 75,
"alpha_frac": 0.6094295693,
"autogenerated": false,
"ratio": 3.008756567425569,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4118186136725569,
"avg_score": null,
"num_lines": null
} |
__author__ = 'demi'
# define a procedure that takes in a string of numbers from 1-9 and
# outputs a list with the following parameters:
# Every number in the string should be inserted into the list.
# If a number x in the string is less than or equal
# to the preceding number y, the number x should be inserted
# into a sublist. Continue adding the following numbers to the
# sublist until reaching a number z that
# is greater than the number y.
# Then add this number z to the normal list and continue.
# Hint - "int()" turns a string's element into a number
def numbers_in_lists(string):
res = []
prec = False
sublist = []
for i in string:
cur = int(i)
if prec != False:
if cur <= prec:
sublist.append(cur)
else:
if len(sublist) > 0:
res.append(sublist)
res.append(cur)
sublist = []
prec = cur
else:
res.append(cur)
prec = cur
if len(sublist) > 0:
res.append(sublist)
return res
# testcases
string = '543987'
result = [5, [4, 3], 9, [8, 7]]
print(repr(string), numbers_in_lists(string) == result)
string = '987654321'
result = [9, [8, 7, 6, 5, 4, 3, 2, 1]]
print(repr(string), numbers_in_lists(string) == result)
string = '455532123266'
result = [4, 5, [5, 5, 3, 2, 1, 2, 3, 2], 6, [6]]
print(repr(string), numbers_in_lists(string) == result)
print(numbers_in_lists(string))
string = '123456789'
result = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print(repr(string), numbers_in_lists(string) == result)
| {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson03/numbers_in_lists.py",
"copies": "1",
"size": "1607",
"license": "mit",
"hash": 8659992919514006000,
"line_mean": 27.6964285714,
"line_max": 67,
"alpha_frac": 0.5948973242,
"autogenerated": false,
"ratio": 3.3134020618556703,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.440829938605567,
"avg_score": null,
"num_lines": null
} |
__author__ = 'demi'
# Dictionaries of Dictionaries (of Dictionaries)
# The next several questions concern the data structure below for keeping
# track of Udacity's courses (where all of the values are strings):
# { <hexamester>, { <class>: { <property>: <value>, ... },
# ... },
# ... }
# For example,
courses = {
'feb2012': { 'cs101': {'name': 'Building a Search Engine',
'teacher': 'Dave',
'assistant': 'Peter C.'},
'cs373': {'name': 'Programming a Robotic Car',
'teacher': 'Sebastian',
'assistant': 'Andy'}},
'apr2012': { 'cs101': {'name': 'Building a Search Engine',
'teacher': 'Dave',
'assistant': 'Sarah'},
'cs212': {'name': 'The Design of Computer Programs',
'teacher': 'Peter N.',
'assistant': 'Andy',
'prereq': 'cs101'},
'cs253':
{'name': 'Web Application Engineering - Building a Blog',
'teacher': 'Steve',
'prereq': 'cs101'},
'cs262':
{'name': 'Programming Languages - Building a Web Browser',
'teacher': 'Wes',
'assistant': 'Peter C.',
'prereq': 'cs101'},
'cs373': {'name': 'Programming a Robotic Car',
'teacher': 'Sebastian'},
'cs387': {'name': 'Applied Cryptography',
'teacher': 'Dave'}},
'jan2044': { 'cs001': {'name': 'Building a Quantum Holodeck',
'teacher': 'Dorina'},
'cs003': {'name': 'Programming a Robotic Robotics Teacher',
'teacher': 'Jasper'},
}
}
# For the following questions, you will find the
# for <key> in <dictionary>:
# <block>
# construct useful. This loops through the key values in the Dictionary. For
# example, this procedure returns a list of all the courses offered in the given
# hexamester:
def courses_offered(courses, hexamester):
res = []
for c in courses[hexamester]:
res.append(c)
return res
# Define a procedure, involved(courses, person), that takes
# as input a courses structure and a person and returns a Dictionary that
# describes all the courses the person is involved in. A person is involved
# in a course if they are a value for any property for the course. The output
# Dictionary should have hexamesters as its keys, and each value should be a
# list of courses that are offered that hexamester (the courses in the list
# can be in any order).
def involved(courses, person):
res = {}
for date in courses:
for course in courses[date]:
for key in courses[date][course]:
if courses[date][course][key] == person:
if date in res:
res[date].append(course)
else:
res[date] = [course]
break
return res
# For example:
print(involved(courses, 'Dave'))
#>>> {'apr2012': ['cs101', 'cs387'], 'feb2012': ['cs101']}
print(involved(courses, 'Peter C.'))
#>>> {'apr2012': ['cs262'], 'feb2012': ['cs101']}
print(involved(courses, 'Dorina'))
#>>> {'jan2044': ['cs001']}
print(involved(courses, 'Peter'))
#>>> {}
print(involved(courses, 'Robotic'))
#>>> {}
print(involved(courses, ''))
#>>> {}
| {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson05/Involved.py",
"copies": "1",
"size": "3674",
"license": "mit",
"hash": -3867942097067552000,
"line_mean": 34.6699029126,
"line_max": 80,
"alpha_frac": 0.5038105607,
"autogenerated": false,
"ratio": 4.041804180418042,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5045614741118042,
"avg_score": null,
"num_lines": null
} |
__author__ = 'demi'
# Double Gold Star
# Khayyam Triangle
# The French mathematician, Blaise Pascal, who built a mechanical computer in
# the 17th century, studied a pattern of numbers now commonly known in parts of
# the world as Pascal's Triangle (it was also previously studied by many Indian,
# Chinese, and Persian mathematicians, and is known by different names in other
# parts of the world).
# The pattern is shown below:
# 1
# 1 1
# 1 2 1
# 1 3 3 1
# 1 4 6 4 1
# ...
# Each number is the sum of the number above it to the left and the number above
# it to the right (any missing numbers are counted as 0).
# Define a procedure, triangle(n), that takes a number n as its input, and
# returns a list of the first n rows in the triangle. Each element of the
# returned list should be a list of the numbers at the corresponding row in the
# triangle.
def triangle(n):
res = []
for i in range(0, n):
il = []
for j in range(i + 1):
if j == 0 or j == i:
il.append(1)
else:
leftI = j - 1
left = res[i - 1][leftI]
rightI = j
right = res[i - 1][rightI]
il.append(left + right)
res.append(il)
return res
#For example:
print(triangle(0))
#>>> []
print(triangle(1))
#>>> [[1]]
print(triangle(2))
#>> [[1], [1, 1]]
print(triangle(3))
#>>> [[1], [1, 1], [1, 2, 1]]
print(triangle(6))
#>>> [[1], [1, 1], [1, 2, 1], [1, 3, 3, 1], [1, 4, 6, 4, 1], [1, 5, 10, 10, 5, 1]] | {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson06/Khayyam Triangle.py",
"copies": "1",
"size": "1635",
"license": "mit",
"hash": -3550361104564382000,
"line_mean": 24.9682539683,
"line_max": 82,
"alpha_frac": 0.5431192661,
"autogenerated": false,
"ratio": 3.2058823529411766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42490016190411767,
"avg_score": null,
"num_lines": null
} |
__author__ = 'demi'
#Feeling Lucky
#In Unit 6, we implemented a page ranking algorithm, but didn't finish the final
#step of using it to improve our search results. For this question, you will use
#the page rankings to produce the best output for a given query.
#Define a procedure, lucky_search, that takes as input an index, a ranks
#dictionary (the result of compute_ranks), and a keyword, and returns the one
#URL most likely to be the best site for that keyword. If the keyword does not
#appear in the index, lucky_search should return None.
def lucky_search(index, ranks, keyword):
urls = lookup(index, keyword)
if not urls:
return None
best_url = urls[0]
for url in urls:
if ranks[url] > ranks[best_url]:
best_url = url
return best_url
cache = {
'http://udacity.com/cs101x/urank/index.html': """<html>
<body>
<h1>Dave's Cooking Algorithms</h1>
<p>
Here are my favorite recipies:
<ul>
<li> <a href="http://udacity.com/cs101x/urank/hummus.html">Hummus Recipe</a>
<li> <a href="http://udacity.com/cs101x/urank/arsenic.html">World's Best Hummus</a>
<li> <a href="http://udacity.com/cs101x/urank/kathleen.html">Kathleen's Hummus Recipe</a>
</ul>
For more expert opinions, check out the
<a href="http://udacity.com/cs101x/urank/nickel.html">Nickel Chef</a>
and <a href="http://udacity.com/cs101x/urank/zinc.html">Zinc Chef</a>.
</body>
</html>
""",
'http://udacity.com/cs101x/urank/zinc.html': """<html>
<body>
<h1>The Zinc Chef</h1>
<p>
I learned everything I know from
<a href="http://udacity.com/cs101x/urank/nickel.html">the Nickel Chef</a>.
</p>
<p>
For great hummus, try
<a href="http://udacity.com/cs101x/urank/arsenic.html">this recipe</a>.
</body>
</html>
""",
'http://udacity.com/cs101x/urank/nickel.html': """<html>
<body>
<h1>The Nickel Chef</h1>
<p>
This is the
<a href="http://udacity.com/cs101x/urank/kathleen.html">
best Hummus recipe!
</a>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/kathleen.html': """<html>
<body>
<h1>
Kathleen's Hummus Recipe
</h1>
<p>
<ol>
<li> Open a can of garbonzo beans.
<li> Crush them in a blender.
<li> Add 3 tablesppons of tahini sauce.
<li> Squeeze in one lemon.
<li> Add salt, pepper, and buttercream frosting to taste.
</ol>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/arsenic.html': """<html>
<body>
<h1>
The Arsenic Chef's World Famous Hummus Recipe
</h1>
<p>
<ol>
<li> Kidnap the <a href="http://udacity.com/cs101x/urank/nickel.html">Nickel Chef</a>.
<li> Force her to make hummus for you.
</ol>
</body>
</html>
""",
'http://udacity.com/cs101x/urank/hummus.html': """<html>
<body>
<h1>
Hummus Recipe
</h1>
<p>
<ol>
<li> Go to the store and buy a container of hummus.
<li> Open it.
</ol>
</body>
</html>
""",
}
def get_page(url):
if url in cache:
return cache[url]
return ""
def get_next_target(page):
start_link = page.find('<a href=')
if start_link == -1:
return None, 0
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def get_all_links(page):
links = []
while True:
url, endpos = get_next_target(page)
if url:
links.append(url)
page = page[endpos:]
else:
break
return links
def union(a, b):
for e in b:
if e not in a:
a.append(e)
def add_page_to_index(index, url, content):
words = content.split()
for word in words:
add_to_index(index, word, url)
def add_to_index(index, keyword, url):
if keyword in index:
index[keyword].append(url)
else:
index[keyword] = [url]
def lookup(index, keyword):
if keyword in index:
return index[keyword]
else:
return None
def crawl_web(seed): # returns index, graph of inlinks
tocrawl = [seed]
crawled = []
graph = {} # <url>, [list of pages it links to]
index = {}
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
content = get_page(page)
add_page_to_index(index, page, content)
outlinks = get_all_links(content)
graph[page] = outlinks
union(tocrawl, outlinks)
crawled.append(page)
return index, graph
def compute_ranks(graph):
d = 0.8 # damping factor
numloops = 10
ranks = {}
npages = len(graph)
for page in graph:
ranks[page] = 1.0 / npages
for i in range(0, numloops):
newranks = {}
for page in graph:
newrank = (1 - d) / npages
for node in graph:
if page in graph[node]:
newrank = newrank + d * (ranks[node] / len(graph[node]))
newranks[page] = newrank
ranks = newranks
return ranks
#Here's an example of how your procedure should work on the test site:
index, graph = crawl_web('http://udacity.com/cs101x/urank/index.html')
ranks = compute_ranks(graph)
print(lucky_search(index, ranks, 'Hummus'))
#>>> http://udacity.com/cs101x/urank/kathleen.html
print(lucky_search(index, ranks, 'the'))
#>>> http://udacity.com/cs101x/urank/nickel.html
print(lucky_search(index, ranks, 'babaganoush'))
#>>> None
| {
"repo_name": "dmitry-izmerov/Udacity-Intro-to-computer-science",
"path": "Lesson06/Feeling Lucky.py",
"copies": "1",
"size": "5297",
"license": "mit",
"hash": -6536172430794465000,
"line_mean": 20.8884297521,
"line_max": 89,
"alpha_frac": 0.6237492921,
"autogenerated": false,
"ratio": 2.774751178627554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38985004707275533,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.