text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = 'cpaulson'
import pyKriging
from pyKriging.krige import kriging
from pyKriging.samplingplan import samplingplan
# The Kriging model starts by defining a sampling plan, we use an optimal Latin Hypercube here
sp = samplingplan(2)
X = sp.optimallhc(15)
# Next, we define the problem we would like to solve
testfun = pyKriging.testfunctions().paulson1
y = testfun(X)
# We can choose between a ga and a pso here
optimizer = 'ga'
# Now that we have our initial data, we can create an instance of a kriging model
print 'Setting up the Kriging Model'
k = kriging(X, y, testfunction=testfun, name='simple_ei', testPoints=300)
k.train(optimizer=optimizer)
k.snapshot()
# Add 10 points based on model error reduction
for i in range(5):
newpoints = k.infill(1, method='error')
for point in newpoints:
print 'Adding point {}'.format(point)
k.addPoint(point, testfun(point)[0])
k.train(optimizer=optimizer)
k.snapshot()
# Infill ten points based on the expected improvement criterion
for i in range(5):
newpoints = k.infill(1, method='ei')
for point in newpoints:
print 'Adding point {}'.format(point)
k.addPoint(point, testfun(point)[0])
k.train(optimizer=optimizer)
k.snapshot()
# And plot the results
print 'Now plotting final results...'
k.plot()
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/pyKriging-master/examples/2D_simple_train_expected_improvement.py",
"copies": "1",
"size": "1321",
"license": "mit",
"hash": 7022152080519651000,
"line_mean": 29.7209302326,
"line_max": 94,
"alpha_frac": 0.7161241484,
"autogenerated": false,
"ratio": 3.29426433915212,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.451038848755212,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cpaulson'
import numpy as np
from numpy.matlib import ones, eye
from pyKriging import kriging
class coKriging():
def __init__(self, Xc, yc, Xe, ye):
# Create the data arrays
self.Xc = np.atleast_2d(Xc).T
self.yc = yc
self.nc = self.Xc.shape[0]
self.Xe = np.atleast_2d(Xe).T
self.ye = ye
self.ne = self.Xe.shape[0]
# rho regression parameter
self.rho = 1.9961
self.reorder_data()
# self.traincheap()
self.k = self.Xc.shape[1]
# if self.Xe.shape[1] != self.Xc.shape[1]:
# print 'Xc and Xe must have the same number of design variables. Fatal error -- Exiting...'
# exit()
# Configure the hyperparameter arrays
self.thetad = np.ones(self.k)
self.thetac = None
# self.thetac = self.kc.theta
self.pd = np.ones(self.k) * 2.
# self.pc = self.kc.pl
self.pc = np.ones(self.k) * 2.
# Matrix Operations
self.one = ones([self.ne + self.nc, 1])
self.y = [self.yc, self.ye]
print 'here1'
def reorder_data(self):
xe = []
ye = []
xc = []
yc = []
Xd = []
yd = []
for enu, entry in enumerate(self.Xc):
if entry in self.Xe:
print 'Found this value in XE!!'
for enu1, test in enumerate(self.Xe):
# if entry[0] == test[0] and entry[1] == test[1]:
if entry == test:
xe.append(test.tolist())
ye.append(self.ye[enu1].tolist())
xc.append(entry.tolist())
yc.append(self.yc[enu].tolist())
Xd.append(entry.tolist())
yd.append(self.ye[enu1].tolist() - self.rho * self.yc[enu].tolist())
break
else:
xc.insert(0, entry.tolist())
yc.insert(0, self.yc[enu].tolist())
self.Xe = np.array(xe)
self.ye = np.array(ye)
self.Xc = np.array(xc)
self.yc = np.array(yc)
self.Xd = np.array(Xd)
self.yd = np.atleast_2d(np.array(yd))
def updateData(self):
self.nc = self.Xc.shape[0]
self.ne = self.Xe.shape[0]
self.distanceXc()
self.distanceXe()
self.distanceXcXe()
def traincheap(self):
self.kc = kriging(self.Xc, self.yc)
self.kc.train()
print
def distanceXc(self):
self.distanceXc = np.zeros((self.nc, self.nc, self.k))
for i in range(self.nc):
for j in xrange(i + 1, self.nc):
self.distanceXc[i][j] = np.abs((self.Xc[i] - self.Xc[j]))
def distanceXe(self):
self.distanceXe = np.zeros((self.ne, self.ne, self.k))
for i in range(self.ne):
for j in xrange(i + 1, self.ne):
self.distanceXe[i][j] = np.abs((self.Xe[i] - self.Xe[j]))
def distanceXcXe(self):
self.distanceXcXe = np.zeros((self.nc, self.ne, self.k))
for i in range(self.nc):
for j in xrange(self.ne):
self.distanceXcXe[i][j] = np.abs((self.Xc[i] - self.Xe[j]))
def updatePsi(self):
self.PsicXc = np.zeros((self.nc, self.nc), dtype=np.float)
self.PsicXe = np.zeros((self.ne, self.ne), dtype=np.float)
self.PsicXcXe = np.zeros((self.nc, self.ne), dtype=np.float)
#
# print self.thetac
# print self.pc
# print self.distanceXc
newPsicXc = np.exp(-np.sum(self.thetac * np.power(self.distanceXc, self.pc), axis=2))
print newPsicXc[0]
self.PsicXc = np.triu(newPsicXc, 1)
self.PsicXc = self.PsicXc + self.PsicXc.T + np.mat(eye(self.nc)) + np.multiply(np.mat(eye(self.nc)),
np.spacing(1))
self.UPsicXc = np.linalg.cholesky(self.PsicXc)
self.UPsicXc = self.UPsicXc.T
print self.PsicXc[0]
print self.UPsicXc
exit()
newPsicXe = np.exp(-np.sum(self.thetac * np.power(self.distanceXe, self.pc), axis=2))
self.PsicXe = np.triu(newPsicXe, 1)
self.PsiXe = self.PsicXe + self.PsicXe.T + np.mat(eye(self.ne)) + np.multiply(np.mat(eye(self.ne)),
np.spacing(1))
self.UPsicXe = np.linalg.cholesky(self.PsicXe)
self.UPsicXe = self.UPsicXe.T
newPsiXeXc = np.exp(-np.sum(self.thetad * np.power(self.distanceXcXe, self.pd), axis=2))
self.PsicXcXe = np.triu(newPsiXeXc, 1)
def neglnlikehood(self):
a = np.linalg.solve(self.UPsicXc.T, np.matrix(self.yc).T)
b = np.linalg.solve(self.UPsicXc, a)
c = ones([self.nc, 1]).T * b
d = np.linalg.solve(self.UPsicXc.T, ones([self.nc, 1]))
e = np.linalg.solve(self.UPsicXc, d)
f = ones([self.nc, 1]).T * e
self.muc = c / f
# This only works if yc is transposed, then its a scalar under two layers of arrays. Correct? Not sure
print 'y', self.yd.T
a = np.linalg.solve(self.UPsicXe.T, self.yd)
print 'a', a
b = np.linalg.solve(self.UPsicXe, a)
print 'b', b
c = ones([self.ne, 1]) * b
print 'c', c
d = np.linalg.solve(self.UPsicXe.T, ones([self.ne, 1], dtype=float))
print d
e = np.linalg.solve(self.UPsicXe, d)
print e
f = ones([self.ne, 1]).T * e
print f
self.mud = c / f
a = np.linalg.solve(self.UPsicXc.T, (self.yc - ones([self.nc, 1]) * self.muc)) / self.nc
b = np.linalg.solve(self.UPsicXc, a)
self.SigmaSqrc = (self.yc - ones([self.nc, 1]) * self.muc).T * b
print self.ne
print self.mud
print self.UPsicXe.T
a = np.linalg.solve(self.UPsicXe.T, (self.yd - ones([self.ne, 1]) * self.mud)) / self.ne
b = np.linalg.solve(self.UPsicXe, a)
self.SigmaSqrd = (self.yd - ones([self.ne, 1]) * self.mud).T * b
self.C = np.array([self.SigmaSqrc * self.PsicXc, self.rho * self.SigmaSqrc * self.PsicXcXe,
self.rho * self.SigmaSqrc * self.PsicXeXc,
np.power(self.rho, 2) * self.SigmaSqrc * self.PsicXe + self.SigmaSqrd * self.PsidXe])
np.reshape(c, [2, 2])
self.UC = np.linalg.cholesky(self.C)
# self.mu=(self.one.T *(self.UC\(self.UC.T\y)))/(one'*(ModelInfo.UC\(ModelInfo.UC'\one)));
def fc(X):
return np.power(X[:, 0], 2) + X[:, 0] + np.power(X[:, 1], 2) + X[:, 1]
def fe(X):
return np.power(X[:, 0], 2) + np.power(X[:, 1], 2)
if __name__ == '__main__':
import samplingplan
import random
sp = samplingplan.samplingplan(2)
X = sp.optimallhc(20)
Xe = np.array(random.sample(X, 6))
yc = fc(X)
ye = fe(Xe)
ck = coKriging(X, yc, Xe, ye)
ck.updateData()
ck.updatePsi()
ck.neglnlikehood()
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/pyKriging-master/pyKriging/coKriging.py",
"copies": "1",
"size": "7053",
"license": "mit",
"hash": 3295755510962204700,
"line_mean": 32.1126760563,
"line_max": 112,
"alpha_frac": 0.5185027648,
"autogenerated": false,
"ratio": 2.9060568603213843,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8921071855450546,
"avg_score": 0.0006975539341677225,
"num_lines": 213
} |
__author__ = 'cpaulson'
import numpy as np
from numpy.matlib import rand,zeros,ones,empty,eye
from pyKriging import kriging
class coKriging():
def __init__(self, Xc, yc, Xe, ye):
# Create the data arrays
self.Xc = np.atleast_2d(Xc).T
self.yc = yc
self.nc = self.Xc.shape[0]
self.Xe = np.atleast_2d(Xe).T
self.ye = ye
self.ne = self.Xe.shape[0]
# rho regression parameter
self.rho = 1.9961
self.reorder_data()
# self.traincheap()
self.k = self.Xc.shape[1]
# if self.Xe.shape[1] != self.Xc.shape[1]:
# print 'Xc and Xe must have the same number of design variables. Fatal error -- Exiting...'
# exit()
# Configure the hyperparameter arrays
self.thetad = np.ones(self.k)
self.thetac = None
# self.thetac = self.kc.theta
self.pd = np.ones(self.k) * 2.
# self.pc = self.kc.pl
self.pc = np.ones(self.k) * 2.
# Matrix Operations
self.one=ones([self.ne+self.nc,1])
self.y=[self.yc, self.ye]
print('here1')
def reorder_data(self):
xe = []
ye = []
xc = []
yc = []
Xd = []
yd = []
for enu,entry in enumerate(self.Xc):
if entry in self.Xe:
print('Found this value in XE!!')
for enu1,test in enumerate(self.Xe):
# if entry[0] == test[0] and entry[1] == test[1]:
if entry == test:
xe.append(test.tolist())
ye.append(self.ye[enu1].tolist())
xc.append(entry.tolist())
yc.append(self.yc[enu].tolist())
Xd.append(entry.tolist())
yd.append(self.ye[enu1].tolist() - self.rho * self.yc[enu].tolist())
break
else:
xc.insert(0,entry.tolist())
yc.insert(0,self.yc[enu].tolist())
self.Xe = np.array(xe)
self.ye = np.array(ye)
self.Xc = np.array(xc)
self.yc = np.array(yc)
self.Xd = np.array(Xd)
self.yd = np.atleast_2d(np.array(yd))
def updateData(self):
self.nc = self.Xc.shape[0]
self.ne = self.Xe.shape[0]
self.distanceXc()
self.distanceXe()
self.distanceXcXe()
def traincheap(self):
self.kc = kriging(self.Xc, self.yc)
self.kc.train()
print()
def distanceXc(self):
self.distanceXc = np.zeros((self.nc,self.nc, self.k))
for i in range( self.nc ):
for j in range(i+1,self.nc):
self.distanceXc[i][j] = np.abs((self.Xc[i]-self.Xc[j]))
def distanceXe(self):
self.distanceXe = np.zeros((self.ne,self.ne, self.k))
for i in range( self.ne ):
for j in range(i+1,self.ne):
self.distanceXe[i][j] = np.abs((self.Xe[i]-self.Xe[j]))
def distanceXcXe(self):
self.distanceXcXe = np.zeros((self.nc,self.ne, self.k))
for i in range( self.nc ):
for j in range(self.ne):
self.distanceXcXe[i][j] = np.abs((self.Xc[i]-self.Xe[j]))
def updatePsi(self):
self.PsicXc = np.zeros((self.nc,self.nc), dtype=np.float)
self.PsicXe = np.zeros((self.ne,self.ne), dtype=np.float)
self.PsicXcXe = np.zeros((self.nc,self.ne), dtype=np.float)
#
# print self.thetac
# print self.pc
# print self.distanceXc
newPsicXc = np.exp(-np.sum(self.thetac*np.power(self.distanceXc,self.pc), axis=2))
print(newPsicXc[0])
self.PsicXc = np.triu(newPsicXc,1)
self.PsicXc = self.PsicXc + self.PsicXc.T + np.mat(eye(self.nc))+np.multiply(np.mat(eye(self.nc)),np.spacing(1))
self.UPsicXc = np.linalg.cholesky(self.PsicXc)
self.UPsicXc = self.UPsicXc.T
print(self.PsicXc[0])
print(self.UPsicXc)
exit()
newPsicXe = np.exp(-np.sum(self.thetac*np.power(self.distanceXe,self.pc), axis=2))
self.PsicXe = np.triu(newPsicXe,1)
self.PsiXe = self.PsicXe + self.PsicXe.T + np.mat(eye(self.ne))+np.multiply(np.mat(eye(self.ne)),np.spacing(1))
self.UPsicXe = np.linalg.cholesky(self.PsicXe)
self.UPsicXe = self.UPsicXe.T
newPsiXeXc = np.exp(-np.sum(self.thetad*np.power(self.distanceXcXe,self.pd), axis=2))
self.PsicXcXe = np.triu(newPsiXeXc,1)
def neglnlikehood(self):
a = np.linalg.solve(self.UPsicXc.T, np.matrix(self.yc).T)
b = np.linalg.solve( self.UPsicXc, a )
c = ones([self.nc,1]).T * b
d = np.linalg.solve(self.UPsicXc.T, ones([self.nc,1]))
e = np.linalg.solve(self.UPsicXc, d)
f = ones([self.nc,1]).T * e
self.muc = c/f
# This only works if yc is transposed, then its a scalar under two layers of arrays. Correct? Not sure
print('y',self.yd.T)
a = np.linalg.solve(self.UPsicXe.T, self.yd)
print('a',a)
b = np.linalg.solve(self.UPsicXe, a)
print('b', b)
c = ones([self.ne,1]) * b
print('c', c)
d = np.linalg.solve(self.UPsicXe.T, ones([self.ne,1], dtype=float))
print(d)
e = np.linalg.solve(self.UPsicXe, d)
print(e)
f = ones([self.ne,1]).T * e
print(f)
self.mud= c/f
a = np.linalg.solve(self.UPsicXc.T,(self.yc-ones([self.nc,1])*self.muc))/self.nc
b = np.linalg.solve(self.UPsicXc, a)
self.SigmaSqrc=(self.yc-ones([self.nc,1])*self.muc).T* b
print(self.ne)
print(self.mud)
print(self.UPsicXe.T)
a = np.linalg.solve(self.UPsicXe.T,(self.yd-ones([self.ne,1])*self.mud))/self.ne
b = np.linalg.solve(self.UPsicXe, a)
self.SigmaSqrd=(self.yd-ones([self.ne,1])*self.mud).T* b
self.C=np.array([self.SigmaSqrc*self.PsicXc, self.rho*self.SigmaSqrc*self.PsicXcXe, self.rho*self.SigmaSqrc*self.PsicXeXc, np.power(self.rho,2)*self.SigmaSqrc*self.PsicXe+self.SigmaSqrd*self.PsidXe])
np.reshape(c,[2,2])
self.UC = np.linalg.cholesky(self.C)
# self.mu=(self.one.T *(self.UC\(self.UC.T\y)))/(one'*(ModelInfo.UC\(ModelInfo.UC'\one)));
def fc(X):
return np.power(X[:,0], 2) + X[:,0] + np.power(X[:,1], 2) + X[:,1]
def fe(X):
return np.power(X[:,0], 2) + np.power(X[:,1], 2)
if __name__=='__main__':
from . import samplingplan
import random
sp = samplingplan.samplingplan(2)
X = sp.optimallhc(20)
Xe = np.array( random.sample(X, 6) )
yc = fc(X)
ye = fe(Xe)
ck = coKriging(X, yc, Xe, ye)
ck.updateData()
ck.updatePsi()
ck.neglnlikehood()
| {
"repo_name": "capaulson/pyKriging",
"path": "pyKriging/coKriging.py",
"copies": "1",
"size": "6760",
"license": "mit",
"hash": -7312813251629930000,
"line_mean": 30.0091743119,
"line_max": 207,
"alpha_frac": 0.5431952663,
"autogenerated": false,
"ratio": 2.7739023389413213,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38170976052413214,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cpierret'
import io
import struct
_bpk = bytes(b'PK\003\004')
_dirpk = bytes(b'PK\001\002')
_firstHeader = {
'[': 'Content_Types].xml',
'x': 'l/',
'd': 'ocProps/',
'_': 'rels/.rels'
}
_excelOffsetStringDict = {
0: b'\x09\x04\x06\x00\x00\x00\x10\x00',
2080: b"Microsoft Excel 5.0 Worksheet",
2080: b"Foglio di lavoro Microsoft Exce",
2114: b"Biff5",
2121: b"Biff5"
}
_msoffice = b'\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1'
_workbook = b'W\x00o\x00r\x00k\x00b\x00o\x00o\x00k\x00'
_unpacker = struct.Struct('<HII')
def isExcel(file):
"""
Detects if a file is in an Excel 2007+ (MS OpenXml format) or another Excel version binary file (OLE CF format).
Adapted and improved from libmagic rule files: msdos and msooxml
False negatives:
- MS Office HTML Excel files
- Some files generated by non Microsoft 3rd party applications may also not be recognized
- encrypted Excel files
:param file: the path to the file to be tested
:return: True if Excel file detected, False otherwise (may still be a valid Excel file)
"""
try:
with open(file, "rb") as f:
bytes = f.read(4)
if bytes != _bpk:
# Check for formats prior to Excel 2007
for offset, magic in _excelOffsetStringDict.iteritems():
f.seek(offset)
if f.read(len(magic)) == magic:
return True
f.seek(0)
if f.read(len(_msoffice)) == _msoffice:
f.seek(0x480)
if f.read(len(_workbook)) == _workbook:
return True
f.seek(-500,io.SEEK_END)
if f.read(500).find(_workbook) != -1:
return True
return False
# possibly OpenXML Excel2007+, parse zip central directory
statinfo = os.stat(file)
filesize = min(statinfo.st_size,65534)
blocksize = 2114
bytes = b''
f.seek(-blocksize, io.SEEK_END)
while filesize>=blocksize:
bytes = bytearray(f.read(blocksize)) + bytes
position = bytes.find(b'PK\x05\x06')
if position != -1:
position + 12
direntries, dirsize, diroffset = _unpacker.unpack(bytes[position+10:position+20])
if diroffset+dirsize > statinfo.st_size:
return False
f.seek(diroffset)
directory = bytearray(f.read(dirsize))
pos = 0
for i in xrange(direntries):
if directory[pos+0:pos+4] != _dirpk:
return False
curs = pos+0x1b
sizes = (directory[curs]*256+directory[curs+1],directory[curs+2]*256+directory[curs+3],directory[curs+4]*256+directory[curs+5])
if directory[pos+0x2E:pos+0x2E+sizes[0]] == "xl/workbook.xml":
return True
pos = pos + 0x2E+ sum(sizes)
if pos + 0x2E > len(directory):
return False
return False
blocksize *= 2
f.seek(-blocksize)
filesize -= blocksize
return False
return False
except IOError as e:
return False
if __name__ == '__main__':
import sys, os
_xlsExt = ['.xls','.xlsx','.xlsm','.xlam']
root = 'c:\\'
print "Finding all files with an Excel extension that are not detected as an Excel file in "+root
for path, subdirs, files in os.walk(root):
for name in files:
if name[0] != '~' and name[0] != '$' and os.path.splitext(name)[1].lower() in _xlsExt:
filename = os.path.join(path, name)
if not isExcel(filename):
print filename
| {
"repo_name": "cpierret/pyxl-magic",
"path": "pyxl-magic.py",
"copies": "1",
"size": "4051",
"license": "apache-2.0",
"hash": 5605617627699429000,
"line_mean": 39.1089108911,
"line_max": 151,
"alpha_frac": 0.5104912367,
"autogenerated": false,
"ratio": 3.8325449385052033,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9823258605826556,
"avg_score": 0.0039555138757295,
"num_lines": 101
} |
__author__ = 'cpt'
"""Initial extraction of sas queries from wrdsapi.
Idea to make into a general class to generate queries to be passed through
wrdsapi.
"""
from . import utility as wrds_util
import os
import re
def wrds_sas_script(download_path, dataset, year, month=0, day=0, rows=[]):
"""Generates a .sas file.
To be executed on the WRDS server to produce the desired dataset.
e.g. sample request.
DATA new_data;
SET crsp.dsf (where = ((year(date) between 2008 and 2008) and
(month(date) between 2 and 2) and (day(date) between 2 and 2)));
IF (1<= _N_<= 10000000);
proc export data = new_data
outfile = "~/crsp_dsf20080202rows1to10000000.tsv"
dbms = tab
replace;
putnames = yes;
run;
:param download_path: path for local sas script.
:param dataset:
:param year:
:param month:
:param day:
:param rows:
:return [sas_file, output_file, dataset]:
"""
ystr = '' + ('_' + str(year)) * (year != 'all')
mstr = '' + (month != 0)*('0'*(month < 10) + str(month))
dstr = '' + (day != 0)*('0'*(day < 10) + str(day))
ymdstr = ystr + mstr + dstr
sas_file = 'wrds_export_' + re.sub('\.', '_', dataset)
if rows:
row_str = 'rows' + str(rows[0]) + 'to' + str(rows[1])
sas_file += ymdstr + row_str
else:
sas_file += ymdstr
sas_file += '.sas'
[dataset, output_file] = \
wrds_util.fix_input_name(dataset, year, month, day, rows)
with open(os.path.join(download_path, sas_file), 'wb') as fd:
fd.write('DATA new_data;\n')
fd.write('\tSET ' + dataset)
if year != 'all':
where_query = ' (where = ('
year_query = ('(year(' + wrds_util.wrds_datevar(dataset) + ')'
+ ' between ' + str(year) + ' and ' + str(year) + ')')
where_query += year_query
if month != 0:
month_query = (' and (month(' + wrds_util.wrds_datevar(dataset)
+ ') between ' + str(month) + ' and ' + str(month)+')')
where_query += month_query
if day != 0:
day_query = (' and (day(' + wrds_util.wrds_datevar(dataset)
+ ') between ' + str(day) + ' and ' + str(day) + ')')
where_query += day_query
where_query += '));\n'
fd.write(where_query)
else:
fd.write(';\n')
if rows:
row_query = ('\tIF (' + str(rows[0]) + '<= _N_<= ' + str(rows[1]) +
'); \n')
fd.write(row_query)
fd.write('\n')
fd.write('proc export data = new_data\n')
fd.write(('\toutfile = "~/' + output_file + '" \n'
+ '\tdbms = tab \n'
+ '\treplace; \n'
+ '\tputnames = yes; \n'
+ 'run; \n'))
return [sas_file, output_file, dataset]
| {
"repo_name": "Craig-PT/pywrds",
"path": "pywrds/sas_query.py",
"copies": "1",
"size": "2980",
"license": "bsd-3-clause",
"hash": 5161933224125479000,
"line_mean": 31.3913043478,
"line_max": 79,
"alpha_frac": 0.4872483221,
"autogenerated": false,
"ratio": 3.3596392333709133,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43468875554709135,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cpt'
import os
import json
import re
import datetime
import time
import math
import shutil
import paramiko
from _wrds_db_descriptors import WRDS_DOMAIN, _GET_ALL, FIRST_DATES, \
FIRST_DATE_GUESSES, AUTOEXEC_TEXT, WRDS_USER_QUOTA
from pywrds import sshlib
from pywrds import utility as wrds_util
from . import sas_query
class WrdsSession(object):
"""
Class to hold a WRDS session. Extracts user info and initialises SSH
connection.
"""
def __init__(self):
"""
"""
# if username is not None:
# self._username = username
# Read in the user info from file and assign variables.
this_file = os.path.abspath(__file__)
self.user_path = os.path.join(this_file.split('pywrds')[0], 'pywrds')
self.user_info_filename = os.path.join(self.user_path, 'user_info.txt')
if os.path.exists(self.user_info_filename):
with open(self.user_info_filename, 'r') as f: # r instead of rb for Python3
# compatibility #
content = f.read()
content = content.replace(u'\xe2\x80\x9c', u'"')
content = content.replace(u'\xe2\x80\x9d', u'"')
try:
self.user_info = json.loads(content)
except ValueError:
print ('WrdsSession warning: user_info.txt file does not '
+ 'conform to json format. Please address this '
+ 'and reload ectools.')
else:
print ('WrdsSession warning: Please create a user_info.txt '
+ 'file conforming to the format given in the '
+ 'user_info_example.txt file.')
self.download_path = os.path.join(self.user_path, 'output')
if 'download_path' in self.user_info:
self.download_path = self.user_info['download_path']
self.wrds_institution = []
if 'wrds_institution' in self.user_info.keys():
self.wrds_institution = self.user_info['wrds_institution']
self.wrds_username =[]
if 'wrds_username' in self.user_info.keys():
self.wrds_username = self.user_info['wrds_username']
if 'last_wrds_download' not in self.user_info.keys():
self.user_info['last_wrds_download'] = {}
self.last_wrds_download = self.user_info['last_wrds_download']
self.now = time.localtime()
[self.this_year, self.this_month, self.today] = \
[self.now.tm_year, self.now.tm_mon, self.now.tm_mday]
# Initialise SSH Client. Only works with key authentication setup.
# TODO: Generalise login to cases without key authentication
try:
[self.ssh, self.sftp] = \
sshlib.getSSH(ssh=None, sftp=None, domain=WRDS_DOMAIN,
username=self.wrds_username)
except:
raise Warning("Need to implement login without key authentication")
def get_ymd_range(self, min_date, dataset, weekdays=1):
"""Gets a list of tuples [year, month, date] over which to iterate in
wrds_loop.
Some datasets include very large files and need to be queried at a
monthly or daily frequency to prevent giant files from causing
problems on the server.
:param min_date:
:param dataset:
:param weekdays:
:return ymdrange:
"""
[min_year, min_month, min_day] = self.min_ymd(min_date, dataset)
ymdrange = []
years = xrange(min_year, self.now.tm_year+1)
for year in years:
frequency = wrds_util.get_loop_frequency(dataset, year)
if frequency == 'Y':
new_ymd = [year, 0, 0]
ymdrange.append(new_ymd)
elif frequency == 'M':
new_ymd = [[year, x, 0] for x in range(1, 13)]
ymdrange = ymdrange + new_ymd
elif frequency == 'D':
new_ymd = [[year, x, y] for x in range(1, 13) for y in range(
1, 32)]
new_ymd = wrds_util.fix_weekdays(new_ymd, weekdays)
ymdrange = ymdrange + new_ymd
ymdrange = [x for x in ymdrange if x <= [self.this_year,
self.this_month, self.today]]
ymdrange = [x for x in ymdrange if x >= [min_year, min_month, min_day]]
return ymdrange
def update_user_info(self, n_files, new_files, fname, dataset, year,
month=0, day=0):
"""update_user_info(n_files, new_files, fname, dataset, year, month=0, day=0)
amends the user_info file to reflect the most recent download dates
for wrds files.
return
"""
if new_files > 0:
n_files = n_files + new_files
if 'last_wrds_download' not in self.user_info.keys():
self.user_info['last_wrds_download'] = {}
self.user_info['last_wrds_download'][dataset] = \
year*10000 + month*100 + day
with open(self.user_info_filename, 'wb') as fd:
fd.write(json.dumps(self.user_info, indent=4))
else:
print ('Could not retrieve: ' + fname)
return
def min_ymd(self, min_date, dataset):
"""Finds (year,month,day) at which to start wrds_loop when
downloading the entirety of a dataset.
It checks user_info to find what files have already been downloaded.
:param min_date:
:param dataset:
:return [min_year, min_month, min_day]:
"""
if dataset in _GET_ALL:
return [-1, -1, -1]
if 'last_wrds_download' not in self.user_info:
self.user_info['last_wrds_download'] = {}
if dataset not in self.user_info['last_wrds_download']:
if dataset in FIRST_DATES:
self.user_info['last_wrds_download'][dataset] = FIRST_DATES[
dataset]
else:
self.user_info['last_wrds_download'][dataset] = 18000000
if not isinstance(min_date, (int, float)):
min_date = 0
if min_date == 0:
min_date = self.user_info['last_wrds_download'][dataset]
min_date = str(min_date)
if not min_date.isdigit() or len(min_date) != 8:
min_date = 0
print ('user_info["last_wrds_download"]["' + dataset + '"]='
+ min_date + ' error, should be an eight digit integer.')
min_year = int(float(min_date[:4]))
min_month = int(float(min_date[4:6]))
min_day = int(float(min_date[6:]))
if min_month == min_day == 0:
min_year += 1
elif min_day == 0:
min_month += 1
if min_month == 13:
min_month = 1
min_year += 1
else:
min_day += 1
try:
wday = datetime.date(min_month, min_month,
min_day).weekday()
except:
min_day = 1
min_month += 1
if min_month == 13:
min_month = 1
min_year += 1
if min_date != 0:
if min_date < 1880:
min_day = 0
min_month = 0
min_year = 1880
print ('Setting min_year = 1880. This will result in '
+'many empty data files and unnecessary looping. '
+'This can be prevented by a) inputting a higher '
+'min_date or b) finding the first date at which '
+'this dataset is available on WRDS and letting '
+'Brock know so he can update the code appropriately.')
elif min_date < 2050:
min_day = 0
min_month = 0
min_year = int(min_date)
elif 188000 < min_date < 1880000:
min_month = min_date%100
min_year = (min_date - (min_date%100))/100
elif min_date < 20500000:
min_day = min_date%100
min_month = (min_date%10000 - min_day)/100
min_year = (min_date - (min_date%10000))/10000
if min_date == 0:
if dataset in FIRST_DATES.keys():
min_day = FIRST_DATES[dataset]%100
min_month = ((FIRST_DATES[dataset] - min_day)%10000)/100
min_year = (FIRST_DATES[dataset] - 100*min_month -
min_day) / 10000
elif any(re.search(x, dataset) for x in FIRST_DATE_GUESSES.keys()):
key = [x for x in FIRST_DATE_GUESSES.keys()
if re.search(x, dataset)][0]
if dataset in FIRST_DATE_GUESSES.keys():
key = dataset
if FIRST_DATE_GUESSES[key] == -1:
return [-1, -1, -1]
min_day = FIRST_DATE_GUESSES[key]%100
min_month = ((FIRST_DATE_GUESSES[key]-min_day)%10000)/100
min_year = (FIRST_DATE_GUESSES[key]-100*min_month-min_day)/10000
else:
min_day = 0
min_month = 0
min_year = 1880
return [min_year, min_month, min_day]
def setup_wrds_key(self):
"""setup_wrds_key() sets up a key-based authentication on
the wrds server, so that the user can log in without a
password going forward.
return [ssh, sftp]
"""
if not self.wrds_username:
print('setup_wrds_key() cannot run until wrds_username is '
+'specified in the user_info.txt file.')
return [None, None]
institution = self.get_wrds_institution()
return [self.ssh, self.sftp]
def get_wrds_institution(self):
"""Returns the institution associated with the user's account on the
wrds server.
:return institution_path:
"""
if not self.sftp:
return None
try:
wrds_path = self.sftp.normalize(path='.')
except IOError:
print ('sftp cannot resolve a path on the wrds server')
return None
institution_path = re.sub('/home/', '', wrds_path).split('/')[0]
if self.wrds_institution != institution_path:
if not self.wrds_institution:
self.wrds_institution = institution_path
self.user_info['wrds_institution'] = self.wrds_institution
with open(self.user_info_filename, 'wb') as fd:
fd.write(json.dumps(self.user_info, indent=4))
else:
print ('user_info["wrds_institution"] does not '
+ 'match the directory "' + institution_path + '" '
+ 'found on the wrds server. '
+ 'This mismatch may cause errors '
+ 'in the download process.')
return institution_path
def find_wrds(self, filename):
"""Query WRDS for a list of tables available from dataset_name.
E.g. setting dataset_name = 'crsp' returns a file with a list of names
including "dsf" (daily stock file) and "msf" (monthly stock file).
:param filename:
:return: [file_list, ssh, sftp]
"""
tic = time.time()
local_sas_file = os.path.join(self.download_path, 'wrds_dicts.sas')
with open(local_sas_file, 'wb') as fd:
fd.write('\tproc sql;\n')
fd.write('\tselect memname\n')
# optional: "select distinct memname" #
fd.write('\tfrom dictionary.tables\n')
fd.write('\twhere libname = "' + filename.upper() + '";\n')
fd.write('\tquit;\n')
for fname in ['wrds_dicts.sas', 'wrds_dicts.lst', 'wrds_dicts.log']:
try:
self.sftp.remove(fname)
except KeyboardInterrupt:
raise KeyboardInterrupt
except: # TODO: Handle case when file doesn't exist explicitly.
pass
[put_success] = self._try_put(local_sas_file, 'wrds_dicts.sas')
sas_command = 'sas -noterminal wrds_dicts.sas'
[stdin, stdout, stderr] = self.ssh.exec_command(sas_command)
exit_status = -1
while exit_status == -1:
time.sleep(10)
exit_status = stdout.channel.recv_exit_status()
local_path = os.path.join(self.download_path, filename + '_dicts.lst')
remote_path = ('/home/' + self.wrds_institution + '/' +
self.wrds_username + '/wrds_dicts.lst')
remote_files = self._try_listdir('.')
remote_list = remote_files.keys()
if exit_status in [0, 1] and 'wrds_dicts.lst' in remote_list:
[get_success, dt] = self._try_get(local_path, remote_path)
else:
print('find_wrds did not generate a wrds_dicts.lst '
+ 'file for input: ' + repr(filename))
try:
self.sftp.remove('wrds_dicts.sas')
except (IOError, EOFError, paramiko.SSHException):
pass
os.remove(local_sas_file)
flist = []
if os.path.exists(local_path):
with open(local_path, 'rb') as fd:
flist = fd.read().splitlines()
flist = [x.strip() for x in flist]
flist = [x for x in flist if x != '']
dash_line = [x for x in range(len(flist)) if flist[x].strip('- ') == '']
if dash_line:
dnum = dash_line[0]
flist = flist[dnum:]
return [flist]
def get_wrds(self, dataset, Y, M=0, D=0, recombine=1):
"""Remotely download a file from the WRDS server. For example,
the command
x = get_wrds('crsp.msf', 2010, 6)
will log in to the WRDS server, issue a query to generate
a tab-separated(*) file containing the entire CRSP Monthly
Stock File dataset for June 2010, then download that file
to your download_path (which you can edit in the user
information section above). The output x is a pair
[indicator,elapsed_time] where indicator is a one if the
download was successful, zero otherwise.
The arguments Y, M, D stand for Year, Month, Day, respectively.
Ommitting the month argument
get_wrds(dataset_name, year)
will retrieve a single file for the entire year.
(*) Tab-separated files (tsv) tend to work slightly
better than comma-separated files (csv) because sometimes
company names have commas e.g. Company Name, Inc.
:param dataset:
:param Y:
:param M:
:param D:
:param recombine:
:return [n_files, total_rows, time_elapsed]:
"""
keep_going = 1
[startrow, n_files, total_rows, tic] = [1, 0, 0, time.time()]
rows_per_file = wrds_util.rows_per_file_adjusted(dataset)
[dset2, outfile] = wrds_util.fix_input_name(dataset, Y, M, D, [])
# Check if output file in local dir, if not send request.
if os.path.exists(os.path.join(self.download_path, outfile)):
keep_going = 0
while keep_going:
R = [startrow, startrow - 1 + rows_per_file]
[dset2, outfile] = wrds_util.fix_input_name(dataset, Y, M, D, R)
if not os.path.exists(os.path.join(self.download_path, outfile)):
[keep_going, dt] = self._get_wrds_chunk(dataset, Y, M, D, R)
if keep_going > 0:
n_files += 1
if os.path.exists(os.path.join(self.download_path, outfile)):
log_lines = wrds_util.get_n_lines_from_log(
outfile, dname=self.download_path)
n_lines = wrds_util.get_n_lines(os.path.join(
self.download_path, outfile))
if log_lines > n_lines:
print('get_wrds error: file "%s" has %s lines, but %s '
'were expected.',
(outfile, str(n_lines), str(log_lines)))
keep_going = 0
total_rows += n_lines
if n_lines < rows_per_file:
keep_going = 0
if log_lines == n_lines < rows_per_file:
keep_going = 0
if not (log_lines == -1 or log_lines == n_lines):
print('get_wrds warning: '
+'log_lines = '+str(log_lines))
if startrow == 1:
subfrom = 'rows1to' + str(rows_per_file)
newname = re.sub(subfrom, '', outfile)
newp2f = os.path.join(self.download_path, newname)
oldp2f = os.path.join(self.download_path, outfile)
os.rename(oldp2f, newp2f)
else:
subfrom = 'to' + str(R[-1])
subto = 'to' + str(R[0] - 1 + n_lines)
newname = re.sub(subfrom, subto, outfile)
oldp2f = os.path.join(self.download_path, outfile)
newp2f = os.path.join(self.download_path, newname)
os.rename(oldp2f, newp2f)
if recombine == 1:
subfrom = 'rows[0-9]*to[0-9]*\.tsv'
recombine_name = re.sub(subfrom, '', outfile)
wrds_util.recombine_files(recombine_name,
dname=self.download_path)
else:
startrow += rows_per_file
newname = outfile
else:
keep_going = 0
return [n_files, total_rows, time.time()-tic]
def _get_wrds_chunk(self, dataset, Y, M=0, D=0, R=[]):
"""Helper fn to manage server data storage limits.
Some files requested by get_wrds are too large to fit in a user's
allotted space on the wrds server. For these files, get_wrds will
split the request into multiple smaller requests to retrieve multiple
files and run each of them through _get_wrds_chunk.
If the argument "recombine" is set to its default value of 1,
these files will be recombined once the loop completes.
:param dataset:
:param Y:
:param M:
:param D:
:param R:
:return [success, time_elapsed]:
"""
tic = time.time()
[sas_file, outfile, dataset] = \
sas_query.wrds_sas_script(self.download_path, dataset, Y, M, D, R)
log_file = re.sub('\.sas$', '.log', sas_file)
put_success = self._put_sas_file(outfile, sas_file)
exit_status = self._sas_step(sas_file, outfile)
exit_status = self._handle_sas_failure(exit_status, outfile, log_file)
if exit_status in [0, 1]:
remote_files = self._try_listdir('.')
file_list = remote_files.keys()
if outfile not in file_list:
print('exit_status in [0, 1] suggests SAS succeeded, but the '
'desired output_file %s is not present in the file '
'list:', outfile)
print(file_list)
else:
remote_size = self._wait_for_sas_file_completion(outfile)
[get_success, dt] = self._retrieve_file(outfile, remote_size)
local_size \
= wrds_util.wait_for_retrieve_completion(outfile, get_success)
compare_success = \
self._compare_local_to_remote(outfile, remote_size,
local_size)
got_log = self._get_log_file(log_file, sas_file)
checkfile = os.path.join(self.download_path, outfile)
if os.path.exists(checkfile) or exit_status == 0:
return [1, time.time()-tic]
return [0, time.time()-tic]
def _rename_after_download(self):
return NotImplementedError
def wrds_loop(self, dataset, min_date=0, recombine=1):
"""Executes get_wrds(database_name,...) over all years and months for
which data is available for the specified data set. File separated
into chunks for downloading will be recombined into their original
forms if recombine is set to its default value 1.
:param dataset:
:param min_date:
:param recombine:
:return [n_files, time_elapsed]:
"""
tic = time.time()
[n_files, n_lines, n_lines0] = [0, 0, 0]
[min_year, min_month, min_day] = self.min_ymd(min_date, dataset)
flist = os.listdir(self.download_path)
if [min_year, min_month, min_day] == [-1, -1, -1]:
Y = 'all'
get_output = self.get_wrds(dataset, Y, M=0, D=0, recombine=recombine)
[new_files, total_lines, dt] = self.get_output
if new_files > 0:
n_files += 1
return [n_files, time.time()-tic]
for ymd in self.get_ymd_range(min_date, dataset, 1):
[Y, M, D] = ymd
[dset2, outfile] = wrds_util.fix_input_name(dataset, Y, M, D, [])
if outfile in flist:
continue
get_output = self.get_wrds(dataset, Y, M=M, D=D, recombine=recombine)
[new_files, total_lines, dt] = self.get_output
n_files += new_files
self.update_user_info(n_files, new_files, fname=outfile,
dataset=dataset, year=Y, month=M, day=D)
return [n_files, time.time()-tic]
def _put_sas_file(self, outfile, sas_file):
"""Puts sas_file in home directory on wrds server, checks autoexec
and removes existing run and log scripts.
Checks autoexec file present, and adds if not. Removes all previous sas
and log files with the wrds_export prefix and any result files with the
rows<x>to<y>.tsv format.
1. Removes old files which may interfere with the new files.
Assumes export files in format wrds_export_<outfile>.sas,
2. Checks enough space in user account on wrds server to run sas_file.
3. Checks necessary autoexec.sas files are present in the directory.
:param outfile:
:param sas_file:
:return put_success (bool):
"""
remote_files = self._try_listdir('.')
initial_files = remote_files.values()
# 1. Removes old files, both .sas and .log files with wrds_export prefix
old_export_files = \
[x for x in initial_files
if re.search('wrds_export.*sas$', x.filename)
or re.search('wrds_export.*log$', x.filename)
or x.filename == sas_file]
for old_file in old_export_files:
try:
self.sftp.remove(old_file.filename)
except (IOError, EOFError, paramiko.SSHException):
pass
initial_files.remove(old_file)
# Catch the row fragment of any output files, e.g. rows1to1000000.tsv.
pattern = '[0-9]*rows[0-9]+to[0-9]+\.tsv$'
old_outfiles = [x for x in initial_files
if re.sub(pattern, '', x.filename) == re.sub(pattern, '', outfile)]
for old_file in old_outfiles:
try:
self.sftp.remove(old_file.filename)
except (IOError, EOFError, paramiko.SSHException):
pass
initial_files.remove(old_file)
# TODO: see if the file is something want before deleting it.
# 2. Check available space on remote.
file_sizes = [initial_file.st_size for initial_file in initial_files]
total_file_size = sum(file_sizes)
if total_file_size > WRDS_USER_QUOTA:
MBs = int(float(total_file_size)/1000000)
print('You are using approximately ' + str(MBs) + ' megabytes of'
' your 1 GB quota on the WRDS server. This may cause '
'WrdsSession.get_wrds to operate incorrectly. The files '
'present are: ')
print([x.filename for x in initial_files])
# 3. Check necessary autoexec.sas files are present on remote.
auto_names = ['autoexec.sas', '.autoexecsas']
autoexecs = [x.filename for x in initial_files if x.filename in auto_names]
if autoexecs == ['.autoexecsas']:
# if 'autoexec.sas' is not present, the sas program will fail a
# backup copy is stored by default in .autoexecsas
ssh_command = 'cp .autoexecsas autoexec.sas'
[exec_succes, stdin, stdout, stderr] = self._try_exec(ssh_command)
elif autoexecs == ['autoexec.sas']:
ssh_command = 'cp autoexec.sas .autoexecsas'
[exec_succes, stdin, stdout, stderr] = self._try_exec(ssh_command)
elif autoexecs == []:
with open('autoexec.sas', 'wb') as fd:
fd.write(AUTOEXEC_TEXT)
local_path = 'autoexec.sas'
remote_path = 'autoexec.sas'
[put_success] = self._try_put(local_path, remote_path)
ssh_command = 'cp autoexec.sas .autoexecsas'
[exec_succes, stdin, stdout, stderr] = self._try_exec(ssh_command)
os.remove('autoexec.sas')
local_path = os.path.join(self.download_path, sas_file)
remote_path = sas_file
return self._try_put(local_path, remote_path)
def _sas_step(self, sas_file, outfile):
"""Wraps running of sas command (_run_sas_command).
TODO: Retrying and re-initializing the network connection if necessary.
:param sas_file:
:param outfile:
:return exit_status:
"""
[sas_completion, n_sas_trys, max_sas_trys] = [0, 0, 3]
while sas_completion == 0 and n_sas_trys < max_sas_trys:
exit_status = self._run_sas_command(sas_file, outfile)
n_sas_trys += 1
sas_completion = 1
if exit_status in [42, 104]:
# 42 = network read failed, 104 = connection reset by peer
# TODO: Deal with reinitiating a session - this will break.
sas_completion = 0
if not self.sftp:
return exit_status
remote_files = self._try_listdir('.')
if outfile in remote_files.keys():
exit_status = 0
sas_completion = 1
elif 'log_file' in remote_files.keys():
exit_status = -1
sas_completion = 1
return exit_status
def _run_sas_command(self, sas_file, outfile):
"""Executes sas_file on wrds server. Waits for return of exit status.
:param sas_file:
:param outfile:
:return exit_status:
"""
sas_command = ('sas -noterminal ' + sas_file)
[stdin, stdout, stderr] = self.ssh.exec_command(sas_command)
[exit_status, exit_status2, waited, maxwait] = [-1, -1, 0, 1200]
while exit_status == -1 and waited < maxwait:
time.sleep(10)
waited += 10
exit_status = stdout.channel.recv_exit_status()
if waited >= maxwait:
print('get_wrds stopped waiting for SAS completion at step 1: '
+ outfile)
return exit_status
def _handle_sas_failure(self, exit_status, outfile, log_file):
"""Checks sas exit status returned by wrds server and responds
appropriately to any statuses other than success.
:param exit_status:
:param outfile:
:param log_file:
:return exit_status:
"""
real_failure = 1
remote_files = self._try_listdir('.')
if exit_status == 2 and log_file in remote_files.keys():
with self.sftp.file(log_file) as fd:
logcontent = fd.read()
if re.search('error: file .* does not exist.', logcontent,
flags=re.I):
real_failure = 0
if exit_status not in [0, 1] and real_failure == 1:
# 1 is "SAS system issued warnings", non-fatal #
if outfile in remote_files.keys():
print('SAS is apparently returning an incorrect exit status: '
+ str(exit_status) + ', ' + outfile + '. ectools is ' +
'downloading the file for user inspection.')
remote_path = outfile
local_path = os.path.join(self.download_path, outfile)
[get_success, dt] = self._try_get(local_path, remote_path)
if get_success == 0:
print('File download failure.')
else:
print('get_wrds failed on file "' + outfile + '"\n' +
'exit_status = ' + str(exit_status) + '\n' + 'For '
'details, see log file "' + log_file + '"')
return exit_status
def _wait_for_sas_file_completion(self, outfile):
"""Checks size of outfile on the wrds server within get_wrds.
Until it observes two successive measurements with the same file
size, it infers that the sas script is still writing the file.
:param outfile:
:return remote_size:
"""
[remote_size, remote_size_delayed, mtime, total_wait, max_wait] \
= [0, 1, time.time(), 0, 1200]
while self.sftp and ((total_wait < max_wait) and
remote_size != remote_size_delayed):
remote_size = remote_size_delayed
time.sleep(10)
total_wait += 10
try:
output_stat = self.sftp.stat(outfile)
remote_size_delayed = output_stat.st_size
mtime = output_stat.st_mtime
except (IOError, EOFError, paramiko.SSHException):
raise NotImplementedError
# TODO: ssh reconnect
if total_wait >= max_wait:
print('get_wrds stopped waiting for SAS completion at step 2',
+ remote_size + remote_size_delayed + mtime)
remote_size = 0
# should i remove the file in this case?
return remote_size
def _retrieve_file(self, outfile, remote_size):
"""Retrieves the outfile produced on the wrds server in
get_wrds, including correct handling of several common network errors.
:param outfile:
:param remote_size:
:return get_success:
"""
tic = time.time()
if remote_size == 0:
return [0, time.time()-tic]
if remote_size >= 10**7:
# skip messages for small files
print('starting retrieve_file: ' + outfile + ' (' + repr(
remote_size) + ') bytes')
vfs = os.statvfs(self.download_path)
free_local_space = vfs.f_bavail * vfs.f_frsize
if remote_size > free_local_space:
print('get_wrds cannot download file ' + outfile + ', only '
+ str(free_local_space)+' bytes available on drive for '
+ str(remote_size) + '-byte file.')
return [0, time.time()-tic]
remote_path = ('/home/' + self.wrds_institution + '/' +
self.wrds_username + '/' + outfile)
write_file = '.' + outfile + '--writing'
local_path = os.path.join(os.path.expanduser('~'), write_file)
[get_success, dt] = self._try_get(local_path, remote_path)
print('retrieve_file: ' + repr(outfile) + ' ('+repr(remote_size) +
' bytes) ' + ' time elapsed=' + repr(time.time()-tic))
return [get_success, time.time()-tic]
def _compare_local_to_remote(self, outfile, remote_size, local_size):
"""Compares the size of the file "outfile" downloaded (local_size) to
the size of the file as listed on the server (remote_size) to
check download completed properly.
:param outfile:
:param remote_size:
:param local_size:
:return compare_success (bool):
"""
compare_success = 0
write_file = '.' + outfile + '--writing'
local_path = os.path.join(os.path.expanduser('~'), write_file)
if remote_size == local_size != 0:
[exec_succes, stdin, stdout, stderr] = \
self._try_exec('rm ' + outfile)
to_path = os.path.join(self.download_path, outfile)
shutil.move(local_path, to_path)
compare_success = 1
elif local_size != 0:
print(['remote_size != local_size', outfile, remote_size, local_size])
log_size = math.log(local_size, 2)
if log_size == int(log_size):
print('The error appears to involve '
+'the download stopping at 2^' + repr(log_size) + ' bytes.')
error_file = '.' + outfile + '--size_error'
from_file = os.path.join(os.path.expanduser('~'), error_file)
to_file = os.path.join(self.download_path, outfile)
shutil.move(from_file, to_file)
compare_success = 0
return compare_success
def _get_log_file(self, log_file, sas_file):
"""Attempts to retrieve SAS log file generated by _get_wrds_chunk from
the WRDS server.
Also removes the sas_file from the local directory, though strictly
speaking this belongs in a separate function.
:param log_file:
:param sas_file:
:return success (bool):
"""
success = 0
remote_path = ('/home/' + self.wrds_institution + '/' +
self.wrds_username + '/' + log_file)
local_path = os.path.join(self.download_path, log_file)
[success, dt] = \
self._try_get(local_path, remote_path)
[exec_succes, stdin, stdout, stderr] = self._try_exec('rm ' + sas_file)
[exec_succes, stdin, stdout, stderr] = self._try_exec('rm wrds_export*')
saspath = os.path.join(self.download_path, sas_file)
if os.path.exists(saspath):
os.remove(saspath)
return [success]
def _try_put(self, local_path, remote_path, domain=None, username=None, ports=[22]):
"""Transfers file from local_path to remote_path using the sftp client.
TODO: Reinitiating the ssh connection if needbe.
:param local_path:
:param remote_path:
:param domain:
:param username:
:param ports:
:return success:
"""
local_stat = os.stat(local_path)
[success, n_tries, max_tries] = [0, 0, 3]
while not success and n_tries < max_tries:
try:
remote_attrs = self.sftp.put(local_path, remote_path)
# Check file transferred is same as local version
if remote_attrs.st_size == local_stat.st_size:
success = 1
except KeyboardInterrupt:
try:
self.sftp.remove(remote_path)
except:
pass
raise KeyboardInterrupt
except (IOError, EOFError, paramiko.SSHException):
# TODO: Handle sftp error, try to reconnect.
try:
self.sftp.remove(remote_path)
except (IOError, EOFError, paramiko.SSHException):
pass
n_tries += 1
return [success]
def _try_get(self, local_path, remote_path, domain=None, username=None, ports=[22]):
"""Tries three times to download file from remote_path to local_path
using the sftp client.
TODO: If a connection error occurs, it is re-established.
Does *not* check that the remote file exists, that the local_path is
not already in use, or that there is enough space free on the local
disk to complete the download.
:param local_path:
:param remote_path:
:param domain:
:param username:
:param ports:
:return [success (bool), time_elapsed]:
"""
tic = time.time()
[success, n_tries, max_tries] = [0, 0, 3]
while not success and n_tries < max_tries:
try:
self.sftp.get(remotepath=remote_path, localpath=local_path)
success = 1
except (paramiko.SSHException, paramiko.SFTPError, IOError,
EOFError):
if os.path.exists(local_path):
os.remove(local_path)
# TODO: Handle sftp error, try to reconnect.
n_tries += 1
except KeyboardInterrupt:
if os.path.exists(local_path):
os.remove(local_path)
raise KeyboardInterrupt
return [success, time.time()-tic]
def _try_exec(self, command, domain=None, username=None, ports=[22]):
"""
:param command:
:param domain:
:param username:
:param ports:
:return [success, stdin, stdout, stderr]:
"""
[success, n_tries, max_tries] = [0, 0, 3]
[stdin, stdout, stderr] = [None, None, None]
while not success and n_tries < max_tries:
try:
[stdin, stdout, stderr] = self.ssh.exec_command(command)
success = 1
except (IOError, EOFError, paramiko.SSHException):
# TODO: Handle sftp error, try to reconnect.
n_tries += 1
return [success, stdin, stdout, stderr]
def _try_listdir(self, remote_dir, domain=None, username=None, ports=[22]):
"""Tries three times to get a a list of files and their attributes
from the directory remote_dir on the remote server.
TODO: reinitiating the ssh connection if needbe.
:param remote_dir:
:param domain:
:param username:
:param ports:
:return (dict): {filename: [attributes]} across files in the remote
directory
"""
remote_list = []
[success, n_tries, max_tries] = [0, 0, 3]
while not success and n_tries < max_tries:
try:
remote_list = self.sftp.listdir_attr(remote_dir)
success = 1
except (IOError, EOFError, paramiko.SSHException):
# TODO: Handle sftp error
n_tries += 1
return {x.filename: x for x in remote_list}
| {
"repo_name": "Craig-PT/pywrds",
"path": "pywrds/wrdsapi.py",
"copies": "1",
"size": "38936",
"license": "bsd-3-clause",
"hash": -4342997565028721000,
"line_mean": 39.5583333333,
"line_max": 88,
"alpha_frac": 0.535006164,
"autogenerated": false,
"ratio": 3.932134922237932,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9956108839167084,
"avg_score": 0.0022064494141695507,
"num_lines": 960
} |
__author__ = 'cpt'
"""
Utility functions for WRDS manipulations.
Generally, taking outside of wrdsapi any static methods.
"""
import re
import datetime
import os
import time
def rows_per_file_adjusted(dataset):
""" Chooses a number of rows to query to ensure that the files produced
do not approach the 1 GB server limit on WRDS.
For most datasets, 10^7 rows in a file is not a problem. For
optionm.opprcd, this number is dropped to 10^6.
To date optionm.opprcd is the only dataset for which this has
consistently been necessary. This is subject to change with further use
cases.
:param dataset:
:return rows_per_file:
"""
rows_per_file = 10**7
if dataset.replace('.', '_') == 'optionm_opprcd':
rows_per_file = 10**6
elif dataset.replace('.', '_') == 'optionm_optionmnames':
rows_per_file = 10**6
return rows_per_file
def get_loop_frequency(dataset, year):
"""Finds the best frequency at which to query the server for the given
dataset so as to avoid producing problematically large files.
:param dataset:
:param year:
:return frequency:
"""
frequency = 'Y'
if dataset.startswith('optionm'):
if year < 2008:
frequency = 'M'
else:
frequency = 'D'
elif re.search('det_xepsus', dataset, flags=re.I):
if year > 2005:
frequency = 'M'
elif re.search('det_xepsint', dataset, flags=re.I):
if year > 2003:
frequency = 'M'
elif re.search('taq', dataset, flags=re.I):
frequency = 'D'
return frequency
def fix_weekdays(ymds, weekdays=1):
"""Takes a set of [year, month, date] tuples "ymds" and removes those which
are not valid days, e.g. June 31, February 30.
If weekdays is set to its default value of 1, it also removes
Saturday and Sundays.
:param ymds:
:param weekdays:
:return ymds:
"""
ymds2 = []
for [y, m, d] in ymds:
try:
wday = datetime.date(y, m, d).weekday()
except ValueError:
wday = -1
if weekdays == 1 and wday in range(5):
# weekdays==1 --> only keep weekdays #
ymds2.append([y, m, d])
elif weekdays == 0 and wday != -1:
# weekdays==0 --> keey any valid day #
ymds2.append([y, m, d])
return ymds2
def fix_input_name(dataset, year, month, day, rows=[]):
"""Adjusts the user-supplied dataset name to use the same upper/lower
case conventions as WRDS does.
:param dataset:
:param year:
:param month:
:param day:
:param rows:
:return [dataset, output_file]:
"""
[Y, M, D, R] = [year, month, day, rows]
if year != 'all':
ystr = '_' * (dataset[-1].isdigit()) + str(Y)
mstr = '' + (M != 0) * ('0' * (month < 10) + str(M))
dstr = (D != 0)*('0'*(D < 10) + str(D))
ymdstr = ystr + mstr + dstr + '.tsv'
output_file = re.sub('\.', '_', dataset) + ymdstr
else:
output_file = re.sub('\.', '_', dataset) + '.tsv'
if dataset.lower() == 'optionm.opprcd':
dataset += str(year)
elif dataset.lower() in ['taq.cq', 'taq.ct']:
dataset = re.sub('cq', 'CQ', dataset)
dataset = re.sub('ct', 'CT', dataset)
ystr = '_' + str(Y)
mstr = '' + (M != 0)*('0'*(M < 10) + str(M))
dstr = '' + (D != 0)*('0'*(D < 10) + str(D))
ymdstr = ystr + mstr + dstr
dataset += ymdstr
elif dataset.lower() in ['taq.mast', 'taq.div']:
ymdstr = '_' + str(Y) + (M != 0)*('0'*(M < 10) + str(M))
dataset += ymdstr
elif dataset.lower() == 'taq.rgsh':
ymdstr = str(100*Y + M)[2:]
dataset = 'taq.RGSH' + ymdstr
if R:
rowstr = 'rows' + str(R[0]) + 'to' + str(R[1]) + '.tsv'
output_file = re.sub('.tsv$', '', output_file) + rowstr
return [dataset, output_file]
def wrds_datevar(filename):
"""Different datasets in WRDS use different names for their
date-variables. Function returns the right date variable for each
dataset.
This may need periodic updating. Crowdsourcing is welcome.
:param filename:
:return date_var:
"""
if filename in ['tfn.s12', 'tfn.s34']:
return 'fdate'
if re.search('^crsp', filename):
return 'date'
if re.search('^comp', filename):
return 'DATADATE'
if re.search('^optionm\.opprcd', filename):
return 'date'
if re.search('^optionm', filename):
return 'effect_date'
if re.search('^ibes', filename):
return 'anndats'
return 'date'
def wait_for_retrieve_completion(outfile, get_success, max_wait=1200):
"""Checks size of downloaded outfile until two successive
give the same result.
Until this point, it infers that the download is still in progress.
:param outfile:
:param get_success:
:param max_wait:
:return: local_size
"""
if get_success == 0:
return 0
[total_wait, local_size, local_size_delayed, mtime2] = \
[0, 0, 1, time.time()]
write_file = '.' + outfile + '--writing'
local_path = os.path.join(os.path.expanduser('~'), write_file)
while total_wait < max_wait and local_size != local_size_delayed:
local_size = local_size_delayed
time.sleep(5)
total_wait += 5
local_stat = os.stat(local_path)
local_size_delayed = local_stat.st_size
mtime2 = local_stat.st_mtime
if total_wait >= max_wait:
print('get_wrds stopped waiting for SAS completion at step 3, %s, %s, '
'%s', (local_size, local_size_delayed, mtime2))
local_size = 0
return local_size
def get_n_lines(path2file):
"""Returns number of lines for a text files located at path2file.
:param path2file:
:return n_lines:
"""
with open(path2file, 'rb') as fd:
fsize = os.stat(fd.name).st_size
n_lines = 0
first_line = fd.readline().split('\t')
while fd.tell() < fsize:
fline = fd.readline()
n_lines += 1
return n_lines
def get_n_lines_from_log(outfile, dname):
"""Reads SAS log file created in get_wrds to find the number of
lines which the wrds server says should be in a downloaded
file "outfile".
This number can then be checked against the number actually found in
the file.
:param outfile:
:param dname:
:return logfile_lines:
"""
log_lines = -1
sasfile = 'wrds_export_' + re.sub('\.tsv$', '.log', outfile)
if not os.path.exists(os.path.join(dname, sasfile)):
partial_fname = re.sub('[0-9]*rows.*', '', sasfile)
sasfile2 = partial_fname+'_'+re.sub(partial_fname, '', sasfile)
if os.path.exists(os.path.join(dname, sasfile2)):
sasfile = sasfile2
all_fname = re.sub('rows', '_allrows', sasfile)
if os.path.exists(os.path.join(dname, all_fname)):
sasfile = all_fname
if os.path.exists(os.path.join(dname, sasfile)):
with open(os.path.join(dname, sasfile)) as fd:
fsize = os.stat(fd.name).st_size
while fd.tell() < fsize:
fline = fd.readline()
if re.search('^[0-9]* records created in ', fline):
log_lines = re.split(' records created in ', fline)[0]
log_lines = int(float(log_lines))
break
pattern0 = ('NOTE: The data set WORK\.NEW_DATA '
+'has [0-9]* observations')
if re.search(pattern0, fline):
pattern01 = 'NOTE: The data set WORK\.NEW_DATA has '
pattern02 = ' observations'
split_log = re.split(pattern02, fline)[0]
log_lines = re.split(pattern01, split_log)[-1]
log_lines = int(float(log_lines))
break
pattern1 = 'NOTE: [0-9]* records were written to the file'
if re.search(pattern1, fline):
split_log = re.split('NOTE: ', fline)[-1]
log_lines = re.split('records', split_log)[0]
log_lines = int(float(log_lines))
break
# The numbers given by the pattern below are often #
# one row lower than the numbers given by the above #
# patterns, the latter being the desired answer. #
# This code is kept as an option to re-implement #
# should their arise cases where none of the other #
# patterns are found. #
#pattern2 = 'NOTE: There were [0-9]* observations read'
#if re.search(pattern2,fline):
# split_log = re.split(' observations read',fline)[0]
# log_lines = re.split('NOTE: There were ',split_log)[-1]
# log_lines = int(float(log_lines))
# break
return log_lines
def _recombine_ready(fname, dname=None, suppress=0):
"""Checks files downloaded by get_wrds to see if the loop has
completed successfully and the files are ready to be be recombined.
If dname == None, the directory defaults to os.getcwd().
:param fname:
:param dname:
:param suppress:
:return isready (bool):
"""
if not dname:
dname = os.getcwd()
isready = 1
fname0 = re.sub('rows[0-9][0-9]*to[0-9][0-9]*\.tsv', '', fname)
if os.path.exists(os.path.join(dname, fname + '.tsv')):
isready = 0
rows_per_file = rows_per_file_adjusted(fname0)
flist0 = os.listdir(dname)
flist0 = [x for x in flist0 if x.endswith('.tsv')]
flist0 = [x for x in flist0 if re.search(fname0, x)]
fdict = {x: x.split('rows')[-1] for x in flist0}
fdict = {x: re.split('_?to_?',fdict[x])[0] for x in fdict}
fdict = {x: float(fdict[x]) for x in fdict if fdict[x].isdigit()}
flist = [[fdict[x], x] for x in fdict]
if isready and flist == []:
isready = 0
if suppress == 0:
print('recombine_ready: No such files found: ' + fname)
numlist = [x[0] for x in sorted(flist)]
missing_nums = [x for x in numlist if x != 1]
missing_nums = [x for x in missing_nums if x-rows_per_file not in numlist]
if isready and missing_nums != []:
isready = 0
if suppress == 0:
print('recombine_ready: ' + fname
+ ' missing_nums ' + repr(missing_nums+numlist))
end_nums = [re.sub('\.tsv$', '', x[1]) for x in flist]
end_nums = [re.split('to', x)[-1] for x in end_nums]
end_nums = [float(x) for x in end_nums]
if isready and end_nums != [] and max(end_nums)%rows_per_file == 0:
max_num = int(max(end_nums))
flist2 = [x[1] for x in flist if x[1].endswith(repr(max_num)
+ '.tsv')]
if len(flist2) == 1:
outfile = flist2[0]
n_lines = get_n_lines(os.path.join(dname, outfile))
log_n_lines = get_n_lines_from_log(outfile, dname)
if n_lines != log_n_lines:
isready = 0
print('recombine_ready: '+outfile
+' n_lines!=log_n_lines: '
+repr([n_lines, log_n_lines]))
else:
isready = 0
if suppress == 0:
print('recombine_ready: ' + fname + ' appears incomplete: '
+ repr(max(end_nums)))
return isready
def recombine_files(fname, dname=None, suppress=0):
"""Reads the files downloaded by get_wrds and combines them
back into the single file of interest.
If dname==None, the directory defaults to os.getcwd().
:param fname:
:param dname:
:param suppress:
:return num_combined_files:
"""
if not dname:
dname = os.getcwd()
combined_files = 0
if not _recombine_ready(fname, dname, suppress):
return combined_files
fname0 = re.sub('rows[0-9][0-9]*to[0-9][0-9]*\.tsv', '', fname)
rows_per_file = rows_per_file_adjusted(fname0)
flist0 = [x for x in os.listdir(dname) if re.search(fname0, x)]
flist0 = [x for x in flist0 if x.endswith('.tsv')]
fdict = {x: x.split('rows')[-1] for x in flist0}
fdict = {x: re.split('_?to_?',fdict[x])[0] for x in fdict}
fdict = {x: float(fdict[x]) for x in fdict if fdict[x].isdigit()}
flist = [[fdict[x], x] for x in fdict]
flist = [x[1] for x in sorted(flist)]
with open(os.path.join(dname, flist[-1]), 'rb') as fd:
fsize = os.stat(fd.name).st_size
nlines = 0
while fd.tell() > fsize:
fd.readline()
nlines += 1
if nlines >= rows_per_file:
print([fname, flist[-1],
'len(flines)=' + repr(nlines),
'should_be=' + repr(rows_per_file)])
return combined_files
with open(os.path.join(dname, fname0+'.tsv'), 'wb') as fd:
headers = []
found_problem = 0
for fname1 in flist:
fd1 = open(os.path.join(dname, fname1), 'rb')
fsize1 = os.stat(fd1.name).st_size
headers1 = fd1.readline().strip('\r\n')
if headers == []:
headers = headers1
fd.write(headers1 + '\n')
if headers1 != headers:
print('Problem with header matching:' + fname1)
found_problem = 1
if found_problem == 0:
try:
while fd1.tell() < fsize1:
fd.write(fd1.readline().strip('\r\n') + '\n')
fd1.close()
except KeyboardInterrupt:
fd1.close()
fd.close()
os.remove(fd.name)
raise KeyboardInterrupt
combined_files += 1
if found_problem == 0:
for fname1 in flist:
os.remove(os.path.join(dname, fname1))
return combined_files
| {
"repo_name": "Craig-PT/pywrds",
"path": "pywrds/utility.py",
"copies": "1",
"size": "14052",
"license": "bsd-3-clause",
"hash": 4018626762907152000,
"line_mean": 32.7788461538,
"line_max": 79,
"alpha_frac": 0.5450469684,
"autogenerated": false,
"ratio": 3.4730598121601584,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4518106780560158,
"avg_score": null,
"num_lines": null
} |
TICKER = 1
WAVE_VELOCITY = 10
from Tkinter import *
top = Tk()
top.title('Doppler effect')
ca = Canvas(bd=0,highlightthickness=0,bg="white")
ca.pack(expand=YES,fill=BOTH)
ca.config(width=640,height=480)
class Wave:
def __init__(self, ca, vel, x, y):
self.life = 0
self.canvas = ca
self.vel = vel
self.pos = [x,y]
self.__radius = 0
self.__index = self.__get_index()
self.update()
def __get_index(self):
return self.canvas.create_oval(*self.get_coords(),outline="#00aacc")
def get_coords(self):
return [
self.pos[0]-self.__radius, # x1
self.pos[1]-self.__radius, # y1
self.pos[0]+self.__radius, # x2
self.pos[1]+self.__radius
]
def update(self):
self.canvas.coords(self.__index, *self.get_coords())
self.__radius += self.vel
self.life += 1
if self.life <= 100:
self.canvas.after(TICKER,self.update)
else:
self.canvas.delete(self.__index)
class Particle:
def __init__(self, **kws):
self.canvas = kws.get('canvas')
self.kick = True
self.pos = kws.get('pos',[])
self.freq = kws.get('freq', 0)
self.vel = kws.get('vel',[0,0])
self.__radius = 5
self.__index = self.__get_index()
self.update()
def get_coords(self):
return [
self.pos[0]-self.__radius,
self.pos[1]-self.__radius,
self.pos[0]+self.__radius,
self.pos[1]+self.__radius
]
def __get_index(self):
return self.canvas.create_oval(*self.get_coords(),fill='red')
def update(self):
self.canvas.update_idletasks()
self.pos[0] += self.vel[0]
self.pos[1] += self.vel[1]
self.canvas.coords(self.__index, *self.get_coords())
self.canvas.after(TICKER, self.update)
Wave(self.canvas, WAVE_VELOCITY, self.pos[0], self.pos[1])
if not self.kick:
if self.pos[0] >= self.canvas.winfo_width():
self.pos[0] = 0
if self.pos[0] < 0:
self.pos[0] = self.canvas.winfo_width()
if self.pos[1] >= self.canvas.winfo_height():
self.pos[1] = 0
if self.pos[1] < 0:
self.pos[1] = self.canvas.winfo_height()
else:
if self.pos[0] >= self.canvas.winfo_width():
self.vel[0] *= -1
if self.pos[0] < 0:
self.vel[0] *= -1
if self.pos[1] >= self.canvas.winfo_height():
self.vel[1] *= -1
if self.pos[1] < 0:
self.vel[1] *= -1
Particle(pos=[100,240],canvas=ca,vel=[5,5])
top.bind('<Escape>', lambda e : top.destroy(), '+')
top.mainloop() | {
"repo_name": "cptx032/miniprojects",
"path": "doppler.py",
"copies": "1",
"size": "2402",
"license": "unlicense",
"hash": -1530097305278883300,
"line_mean": 25.7,
"line_max": 70,
"alpha_frac": 0.6236469609,
"autogenerated": false,
"ratio": 2.4839710444674252,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8368983458990393,
"avg_score": 0.04772690927540618,
"num_lines": 90
} |
__author__ = 'CQC'
# -*- coding:utf-8 -*-
import urllib
import urllib2
import re
import thread
import time
#糗事百科爬虫类
class QSBK:
#初始化方法,定义一些变量
def __init__(self):
self.pageIndex = 1
self.user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
#初始化headers
self.headers = { 'User-Agent' : self.user_agent }
#存放段子的变量,每一个元素是每一页的段子们
self.stories = []
#存放程序是否继续运行的变量
self.enable = False
#传入某一页的索引获得页面代码
def getPage(self,pageIndex):
try:
url = 'http://www.qiushibaike.com/hot/page/' + str(pageIndex)
#构建请求的request
request = urllib2.Request(url,headers = self.headers)
#利用urlopen获取页面代码
response = urllib2.urlopen(request)
#将页面转化为UTF-8编码
pageCode = response.read().decode('utf-8')
return pageCode
except urllib2.URLError, e:
if hasattr(e,"reason"):
print u"连接糗事百科失败,错误原因",e.reason
return None
#传入某一页代码,返回本页不带图片的段子列表
def getPageItems(self,pageIndex):
pageCode = self.getPage(pageIndex)
if not pageCode:
print "页面加载失败...."
return None
pattern = re.compile('<div.*?class="author.*?>.*?<a.*?</a>.*?<a.*?>(.*?)</a>.*?<div.*?class'+
'="content".*?title="(.*?)">(.*?)</div>(.*?)<div class="stats.*?class="number">(.*?)</i>',re.S)
items = re.findall(pattern,pageCode)
#用来存储每页的段子们
pageStories = []
#遍历正则表达式匹配的信息
for item in items:
#是否含有图片
haveImg = re.search("img",item[3])
#如果不含有图片,把它加入list中
if not haveImg:
#item[0]是一个段子的发布者,item[1]是发布时间,item[2]是内容,item[4]是点赞数
pageStories.append([item[0].strip(),item[1].strip(),item[2].strip(),item[4].strip()])
return pageStories
#加载并提取页面的内容,加入到列表中
def loadPage(self):
#如果当前未看的页数少于2页,则加载新一页
if self.enable == True:
if len(self.stories) < 2:
#获取新一页
pageStories = self.getPageItems(self.pageIndex)
#将该页的段子存放到全局list中
if pageStories:
self.stories.append(pageStories)
#获取完之后页码索引加一,表示下次读取下一页
self.pageIndex += 1
#调用该方法,每次敲回车打印输出一个段子
def getOneStory(self,pageStories,page):
#遍历一页的段子
for story in pageStories:
#等待用户输入
input = raw_input()
#每当输入回车一次,判断一下是否要加载新页面
self.loadPage()
#如果输入Q则程序结束
if input == "Q":
self.enable = False
return
print u"第%d页\t发布人:%s\t发布时间:%s\n%s\n赞:%s\n" %(page,story[0],story[1],story[2],story[3])
#开始方法
def start(self):
print u"正在读取糗事百科,按回车查看新段子,Q退出"
#使变量为True,程序可以正常运行
self.enable = True
#先加载一页内容
self.loadPage()
#局部变量,控制当前读到了第几页
nowPage = 0
while self.enable:
if len(self.stories)>0:
#从全局list中获取一页的段子
pageStories = self.stories[0]
#当前读到的页数加一
nowPage += 1
#将全局list中第一个元素删除,因为已经取出
del self.stories[0]
#输出该页的段子
self.getOneStory(pageStories,nowPage)
spider = QSBK()
spider.start() | {
"repo_name": "zhangmhao/crawl-house",
"path": "prototype/spider-qiushibaike.py",
"copies": "1",
"size": "4279",
"license": "mit",
"hash": -2761944601421572600,
"line_mean": 29.8545454545,
"line_max": 120,
"alpha_frac": 0.5204833481,
"autogenerated": false,
"ratio": 2.311307901907357,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8213464504841448,
"avg_score": 0.023665349033181817,
"num_lines": 110
} |
__author__ = 'CQC'
# -*- coding:utf-8 -*-
import urllib
import urllib2
import re
#处理页面标签类
class Tool:
#去除img标签,7位长空格
removeImg = re.compile('<img.*?>| {7}|')
#删除超链接标签
removeAddr = re.compile('<a.*?>|</a>')
#把换行的标签换为\n
replaceLine = re.compile('<tr>|<div>|</div>|</p>')
#将表格制表<td>替换为\t
replaceTD= re.compile('<td>')
#把段落开头换为\n加空两格
replacePara = re.compile('<p.*?>')
#将换行符或双换行符替换为\n
replaceBR = re.compile('<br><br>|<br>')
#将其余标签剔除
removeExtraTag = re.compile('<.*?>')
def replace(self,x):
x = re.sub(self.removeImg,"",x)
x = re.sub(self.removeAddr,"",x)
x = re.sub(self.replaceLine,"\n",x)
x = re.sub(self.replaceTD,"\t",x)
x = re.sub(self.replacePara,"\n ",x)
x = re.sub(self.replaceBR,"\n",x)
x = re.sub(self.removeExtraTag,"",x)
#strip()将前后多余内容删除
return x.strip()
#百度贴吧爬虫类
class BDTB:
#初始化,传入基地址,是否只看楼主的参数
def __init__(self,baseUrl,seeLZ):
self.baseURL = baseUrl
self.seeLZ = '?see_lz='+str(seeLZ)
self.tool = Tool()
#传入页码,获取该页帖子的代码
def getPage(self,pageNum):
try:
url = self.baseURL+ self.seeLZ + '&pn=' + str(pageNum)
request = urllib2.Request(url)
response = urllib2.urlopen(request)
return response.read().decode('utf-8')
except urllib2.URLError, e:
if hasattr(e,"reason"):
print u"连接百度贴吧失败,错误原因",e.reason
return None
#获取帖子标题
def getTitle(self):
page = self.getPage(1)
pattern = re.compile('<h1 class="core_title_txt.*?>(.*?)</h1>',re.S)
result = re.search(pattern,page)
if result:
#print result.group(1) #测试输出
return result.group(1).strip()
else:
return None
#获取帖子一共有多少页
def getPageNum(self):
page = self.getPage(1)
pattern = re.compile('<li class="l_reply_num.*?</span>.*?<span.*?>(.*?)</span>',re.S)
result = re.search(pattern,page)
if result:
#print result.group(1) #测试输出
return result.group(1).strip()
else:
return None
#获取每一层楼的内容,传入页面内容
def getContent(self,page):
pattern = re.compile('<div id="post_content_.*?>(.*?)</div>',re.S)
items = re.findall(pattern,page)
#for item in items:
# print item
print self.tool.replace(items[1])
baseURL = 'http://tieba.baidu.com/p/3138733512'
bdtb = BDTB(baseURL,1)
bdtb.getContent(bdtb.getPage(1)) | {
"repo_name": "zhangmhao/crawl-house",
"path": "prototype/spider-baiduTieba.py",
"copies": "1",
"size": "2901",
"license": "mit",
"hash": 1816679850483613200,
"line_mean": 28.1590909091,
"line_max": 93,
"alpha_frac": 0.546588694,
"autogenerated": false,
"ratio": 2.4639769452449567,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3510565639244957,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CQC'
# -*- coding:utf-8 -*-
import urllib
import urllib2
import cookielib
import re
import webbrowser
import tool
#模拟登录淘宝类
class Taobao:
#初始化方法
def __init__(self):
#登录的URL
self.loginURL = "https://login.taobao.com/member/login.jhtml"
#代理IP地址,防止自己的IP被封禁
self.proxyURL = 'http://120.193.146.97:843'
#登录POST数据时发送的头部信息
self.loginHeaders = {
'Host':'login.taobao.com',
'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:35.0) Gecko/20100101 Firefox/35.0',
'Referer' : 'https://login.taobao.com/member/login.jhtml',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection' : 'Keep-Alive'
}
#用户名
self.username = 'cqcre'
#ua字符串,经过淘宝ua算法计算得出,包含了时间戳,浏览器,屏幕分辨率,随机数,鼠标移动,鼠标点击,其实还有键盘输入记录,鼠标移动的记录、点击的记录等等的信息
self.ua = '191UW5TcyMNYQwiAiwTR3tCf0J/QnhEcUpkMmQ=|Um5Ockt0TXdPc011TXVKdyE=|U2xMHDJ+H2QJZwBxX39Rb1d5WXcrSixAJ1kjDVsN|VGhXd1llXGNaYFhkWmJaYl1gV2pIdUtyTXRKfkN4Qn1FeEF6R31TBQ==|VWldfS0TMw8xDjYWKhAwHiUdOA9wCDEVaxgkATdcNU8iDFoM|VmNDbUMV|V2NDbUMV|WGRYeCgGZhtmH2VScVI2UT5fORtmD2gCawwuRSJHZAFsCWMOdVYyVTpbPR99HWAFYVMpUDUFORshHiQdJR0jAT0JPQc/BDoFPgooFDZtVBR5Fn9VOwt2EWhCOVQ4WSJPJFkHXhgoSDVIMRgnHyFqQ3xEezceIRkmahRqFDZLIkUvRiEDaA9qQ3xEezcZORc5bzk=|WWdHFy0TMw8vEy0UIQE0ADgYJBohGjoAOw4uEiwXLAw2DThuOA==|WmBAED5+KnIbdRh1GXgFQSZbGFdrUm1UblZqVGxQa1ZiTGxQcEp1I3U=|W2NDEz19KXENZwJjHkY7Ui9OJQsre09zSWlXY1oMLBExHzERLxsuE0UT|XGZGFjh4LHQdcx5zH34DRyBdHlFtVGtSaFBsUmpWbVBkSmpXd05zTnMlcw==|XWdHFzl5LXUJYwZnGkI/VitKIQ8vEzMKNws3YTc=|XmdaZ0d6WmVFeUB8XGJaYEB4TGxWbk5yTndXa0tyT29Ta0t1QGBeZDI='
#密码,在这里不能输入真实密码,淘宝对此密码进行了加密处理,256位,此处为加密后的密码
self.password2 = '7511aa6854629e45de220d29174f1066537a73420ef6dbb5b46f202396703a2d56b0312df8769d886e6ca63d587fdbb99ee73927e8c07d9c88cd02182e1a21edc13fb8e140a4a2a4b53bf38484bd0e08199e03eb9bf7b365a5c673c03407d812b91394f0d3c7564042e3f2b11d156aeea37ad6460118914125ab8f8ac466f'
self.post = post = {
'ua':self.ua,
'TPL_checkcode':'',
'CtrlVersion': '1,0,0,7',
'TPL_password':'',
'TPL_redirect_url':'http://i.taobao.com/my_taobao.htm?nekot=udm8087E1424147022443',
'TPL_username':self.username,
'loginsite':'0',
'newlogin':'0',
'from':'tb',
'fc':'default',
'style':'default',
'css_style':'',
'tid':'XOR_1_000000000000000000000000000000_625C4720470A0A050976770A',
'support':'000001',
'loginType':'4',
'minititle':'',
'minipara':'',
'umto':'NaN',
'pstrong':'3',
'llnick':'',
'sign':'',
'need_sign':'',
'isIgnore':'',
'full_redirect':'',
'popid':'',
'callback':'',
'guf':'',
'not_duplite_str':'',
'need_user_id':'',
'poy':'',
'gvfdcname':'10',
'gvfdcre':'',
'from_encoding ':'',
'sub':'',
'TPL_password_2':self.password2,
'loginASR':'1',
'loginASRSuc':'1',
'allp':'',
'oslanguage':'zh-CN',
'sr':'1366*768',
'osVer':'windows|6.1',
'naviVer':'firefox|35'
}
#将POST的数据进行编码转换
self.postData = urllib.urlencode(self.post)
#设置代理
self.proxy = urllib2.ProxyHandler({'http':self.proxyURL})
#设置cookie
self.cookie = cookielib.LWPCookieJar()
#设置cookie处理器
self.cookieHandler = urllib2.HTTPCookieProcessor(self.cookie)
#设置登录时用到的opener,它的open方法相当于urllib2.urlopen
self.opener = urllib2.build_opener(self.cookieHandler,self.proxy,urllib2.HTTPHandler)
#赋值J_HToken
self.J_HToken = ''
#登录成功时,需要的Cookie
self.newCookie = cookielib.CookieJar()
#登陆成功时,需要的一个新的opener
self.newOpener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.newCookie))
#引入工具类
self.tool = tool.Tool()
#得到是否需要输入验证码,这次请求的相应有时会不同,有时需要验证有时不需要
def needCheckCode(self):
#第一次登录获取验证码尝试,构建request
request = urllib2.Request(self.loginURL,self.postData,self.loginHeaders)
#得到第一次登录尝试的相应
response = self.opener.open(request)
#获取其中的内容
content = response.read().decode('gbk')
#获取状态吗
status = response.getcode()
#状态码为200,获取成功
if status == 200:
print u"获取请求成功"
#\u8bf7\u8f93\u5165\u9a8c\u8bc1\u7801这六个字是请输入验证码的utf-8编码
pattern = re.compile(u'\u8bf7\u8f93\u5165\u9a8c\u8bc1\u7801',re.S)
result = re.search(pattern,content)
#如果找到该字符,代表需要输入验证码
if result:
print u"此次安全验证异常,您需要输入验证码"
return content
#否则不需要
else:
#返回结果直接带有J_HToken字样,表明直接验证通过
tokenPattern = re.compile('id="J_HToken" value="(.*?)"')
tokenMatch = re.search(tokenPattern,content)
if tokenMatch:
self.J_HToken = tokenMatch.group(1)
print u"此次安全验证通过,您这次不需要输入验证码"
return False
else:
print u"获取请求失败"
return None
#得到验证码图片
def getCheckCode(self,page):
#得到验证码的图片
pattern = re.compile('<img id="J_StandardCode_m.*?data-src="(.*?)"',re.S)
#匹配的结果
matchResult = re.search(pattern,page)
#已经匹配得到内容,并且验证码图片链接不为空
if matchResult and matchResult.group(1):
return matchResult.group(1)
else:
print u"没有找到验证码内容"
return False
#输入验证码,重新请求,如果验证成功,则返回J_HToken
def loginWithCheckCode(self):
#提示用户输入验证码
checkcode = raw_input('请输入验证码:')
#将验证码重新添加到post的数据中
self.post['TPL_checkcode'] = checkcode
#对post数据重新进行编码
self.postData = urllib.urlencode(self.post)
try:
#再次构建请求,加入验证码之后的第二次登录尝试
request = urllib2.Request(self.loginURL,self.postData,self.loginHeaders)
#得到第一次登录尝试的相应
response = self.opener.open(request)
#获取其中的内容
content = response.read().decode('gbk')
#检测验证码错误的正则表达式,\u9a8c\u8bc1\u7801\u9519\u8bef 是验证码错误五个字的编码
pattern = re.compile(u'\u9a8c\u8bc1\u7801\u9519\u8bef',re.S)
result = re.search(pattern,content)
#如果返回页面包括了,验证码错误五个字
if result:
print u"验证码输入错误"
return False
else:
#返回结果直接带有J_HToken字样,说明验证码输入成功,成功跳转到了获取HToken的界面
tokenPattern = re.compile('id="J_HToken" value="(.*?)"')
tokenMatch = re.search(tokenPattern,content)
#如果匹配成功,找到了J_HToken
if tokenMatch:
print u"验证码输入正确"
self.J_HToken = tokenMatch.group(1)
return tokenMatch.group(1)
else:
#匹配失败,J_Token获取失败
print u"J_Token获取失败"
return False
except urllib2.HTTPError, e:
print u"连接服务器出错,错误原因",e.reason
return False
#通过token获得st
def getSTbyToken(self,token):
tokenURL = 'https://passport.alipay.com/mini_apply_st.js?site=0&token=%s&callback=stCallback6' % token
request = urllib2.Request(tokenURL)
response = urllib2.urlopen(request)
#处理st,获得用户淘宝主页的登录地址
pattern = re.compile('{"st":"(.*?)"}',re.S)
result = re.search(pattern,response.read())
#如果成功匹配
if result:
print u"成功获取st码"
#获取st的值
st = result.group(1)
return st
else:
print u"未匹配到st"
return False
#利用st码进行登录,获取重定向网址
def loginByST(self,st,username):
stURL = 'https://login.taobao.com/member/vst.htm?st=%s&TPL_username=%s' % (st,username)
headers = {
'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:35.0) Gecko/20100101 Firefox/35.0',
'Host':'login.taobao.com',
'Connection' : 'Keep-Alive'
}
request = urllib2.Request(stURL,headers = headers)
response = self.newOpener.open(request)
content = response.read().decode('gbk')
#检测结果,看是否登录成功
pattern = re.compile('top.location = "(.*?)"',re.S)
match = re.search(pattern,content)
if match:
print u"登录网址成功"
location = match.group(1)
return True
else:
print "登录失败"
return False
#获得已买到的宝贝页面
def getGoodsPage(self,pageIndex):
goodsURL = 'http://buyer.trade.taobao.com/trade/itemlist/listBoughtItems.htm?action=itemlist/QueryAction&event_submit_do_query=1' + '&pageNum=' + str(pageIndex)
response = self.newOpener.open(goodsURL)
page = response.read().decode('gbk')
return page
#获取所有已买到的宝贝信息
def getAllGoods(self,pageNum):
print u"获取到的商品列表如下"
for x in range(1,int(pageNum)+1):
page = self.getGoodsPage(x)
self.tool.getGoodsInfo(page)
#程序运行主干
def main(self):
#是否需要验证码,是则得到页面内容,不是则返回False
needResult = self.needCheckCode()
#请求获取失败,得到的结果是None
if not needResult ==None:
if not needResult == False:
print u"您需要手动输入验证码"
checkCode = self.getCheckCode(needResult)
#得到了验证码的链接
if not checkCode == False:
print u"验证码获取成功"
print u"请在浏览器中输入您看到的验证码"
webbrowser.open_new_tab(checkCode)
self.loginWithCheckCode()
#验证码链接为空,无效验证码
else:
print u"验证码获取失败,请重试"
else:
print u"不需要输入验证码"
else:
print u"请求登录页面失败,无法确认是否需要验证码"
#判断token是否正常获取到
if not self.J_HToken:
print "获取Token失败,请重试"
return
#获取st码
st = self.getSTbyToken(self.J_HToken)
#利用st进行登录
result = self.loginByST(st,self.username)
if result:
#获得所有宝贝的页面
page = self.getGoodsPage(1)
pageNum = self.tool.getPageNum(page)
self.getAllGoods(pageNum)
else:
print u"登录失败"
taobao = Taobao()
taobao.main() | {
"repo_name": "zhangmhao/crawl-house",
"path": "prototype/spider-taoballist.py",
"copies": "1",
"size": "12386",
"license": "mit",
"hash": 7391967095079775000,
"line_mean": 35.7333333333,
"line_max": 772,
"alpha_frac": 0.5762323271,
"autogenerated": false,
"ratio": 2.3108167770419428,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3387049104141943,
"avg_score": null,
"num_lines": null
} |
####AUTHOR/CREDIT: O'Brien, Oliver (o.obrien@ucl.ac.uk)
#Paramters: startID (in order of the london.csv file - not necc numerical order!), maxID
import csv
import os
import sys
def routing():
locations = []
maxID = int(sys.argv[3])
csvreader = csv.reader(open('london.csv', 'r'))
for row in csvreader:
locations.append([row[0], row[3], row[4]])
started = False
for start in locations:
startID = int(start[0])
if int(sys.argv[1]) == startID:
started = True
if int(sys.argv[2]) == startID:
started = False
if started == True:
print "***************" + str(startID) + "***************"
for end in locations:
endID = int(end[0])
if startID != endID and startID <= maxID and endID <= maxID:
fname = str(startID) + "_" + str(endID) + ".txt"
if os.path.exists("../routes/results_londonr6/" + fname) == False:
print "Trying " + str(startID) + " to " + str(endID)
cmd = "../routes/routino-2.7.2/web/bin/router --transport=bicycle --quiet --lon1=" + start[2] + " --lat1=" + start[1] + " --lon2=" + end[2] + " --lat2=" + end[1] + " --shortest --output-text-all"
os.system(cmd)
os.rename("shortest-all.txt", "../routes/results_londonr6/" + fname)
| {
"repo_name": "oscarechobravo/ftc2050-toolchain",
"path": "routino.py",
"copies": "1",
"size": "1343",
"license": "apache-2.0",
"hash": -8411698330467659000,
"line_mean": 35.2972972973,
"line_max": 203,
"alpha_frac": 0.5368577811,
"autogenerated": false,
"ratio": 3.1378504672897196,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9152857373636067,
"avg_score": 0.004370174950730325,
"num_lines": 37
} |
__author__ = 'cristian'
import MySQLdb
import MySQLdb.cursors
class MySql:
_host = ''
_user = ''
_password = ''
_database = ''
_con = None
error = ''
last_id = 0
num_rows = 0
def __init__(self, mysql_host, mysql_user, mysql_pass, mysql_db):
self._host = mysql_host
self._user = mysql_user
self._password = mysql_pass
self._database = mysql_db
# mysql connection function
def open(self):
self.error = ''
try:
con = MySQLdb.connect(self._host, self._user, self._password, self._database,
cursorclass=MySQLdb.cursors.DictCursor)
except MySQLdb.Error, e:
try:
con = None
self.error = "MySQL Error [%d]: %s" % (e.args[0], e.args[1])
except IndexError:
con = None
self.error = "MySQL Error: %s" % str(e)
return con
# mysql query function
def query(self, query):
self.error = ''
self.last_id = 0
self.num_rows = 0
if self._con is None:
con = self.open()
if self.error:
return False
self._con = con
if self._con.open is False:
con = self.open()
if self.error:
return False
self._con = con
with self._con:
try:
curs = self._con.cursor()
curs.execute(query)
rows = curs.fetchall()
if curs.lastrowid:
self.last_id = curs.lastrowid
self.num_rows = curs.rowcount
except MySQLdb.Error, e:
try:
rows = False
self.error = "MySQL Error [%d]: %s" % (e.args[0], e.args[1])
except IndexError:
rows = False
self.error = "MySQL Error: %s" % str(e)
return rows | {
"repo_name": "ceakki/ddns-server",
"path": "dns/mysql.py",
"copies": "1",
"size": "2003",
"license": "mit",
"hash": 3517876505594465300,
"line_mean": 23.4390243902,
"line_max": 89,
"alpha_frac": 0.4583125312,
"autogenerated": false,
"ratio": 4.207983193277311,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5166295724477311,
"avg_score": null,
"num_lines": null
} |
__author__ = 'croxis'
from datetime import datetime
from flask import flash, redirect, render_template, request, url_for
from flask.ext.login import current_user, login_required, login_user, logout_user
from werkzeug.contrib.atom import AtomFeed
from . import user
from ..models import User
from .forms import LoginForm, UserEditForm, UserRegisterForm
@user.route('/login', methods=['GET', 'POST'])
#@oid.loginhandler
def login():
"""Login by password or preset list of openid providers. New openid will register new user."""
form = LoginForm()
user = None
if form.validate_on_submit():
try:
user = User.objects.get(name=form.username.data)
except:
flash('Name and password do not match')
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
else:
flash('Name and password do not match')
return render_template('login.html', title='Login', form=form)
@user.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out')
return redirect(url_for('main.index'))
@user.route('/profile/edit', methods=['GET', 'POST'])
@login_required
def profile_edit():
form = UserEditForm()
if form.validate_on_submit() and current_user.verify_password(form.old_password.data):
if form.email.data:
current_user.email = form.email.data
if form.password.data:
current_user.password = form.password.data
current_user.save()
flash('Your profile has been updated')
return redirect(url_for('user.profile'))
form.email.data = current_user.email
return render_template('user.html', title=current_user.name, form=form)
@user.route('/register', methods=['GET', 'POST'])
def register():
"""Provide openid provider list here as well. First user is admin"""
form = UserRegisterForm()
if form.validate_on_submit():
user = User(name=form.username.data,
email=form.email.data,
registered=datetime.utcnow())
user.password = form.password.data # Has to be separate due to setter property
if not User.objects:
user.level = 'admin'
user.save()
flash('You can now login. Email authentication will be added later.')
return redirect(url_for('user.login'))
return render_template('register_user.html', title='New User!', form=form)
@user.route('/<name>/feed')
def user_feed(name):
user_agent = request.headers.get('User-Agent')
user = User.objects.get_or_404(name=name)
feed = AtomFeed(name + " following these mods.", feed_url=request.url, url=request.host_url)
if user.subscribed_mods:
#TODO: Try and make the sorting done via mongo instead
mods = list(user.subscribed_mods)
mods.sort(key=lambda r: r.updated_timestamp, reverse=True)
for mod in mods:
feed.add(mod.name,
mod.downloads[-1].change_log,
content_type='html',
author=mod.downloads[-1].author.name,
url=url_for('mod.mod_page', name=mod.name, _external=True),
updated=mod.updated_timestamp)
return feed.get_response()
@user.route('/<name>')
def user_page(name):
user_agent = request.headers.get('User-Agent')
user = User.objects.get_or_404(name=name)
return render_template('user.html', title=user.name, user=user) | {
"repo_name": "croxis/kmr",
"path": "app/user/views.py",
"copies": "1",
"size": "3597",
"license": "mit",
"hash": 15622878305283932,
"line_mean": 37.2765957447,
"line_max": 98,
"alpha_frac": 0.6427578538,
"autogenerated": false,
"ratio": 3.8103813559322033,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4953139209732203,
"avg_score": null,
"num_lines": null
} |
__author__ = 'croxis'
from datetime import datetime
import mimetypes
import random
from flask import abort, flash, make_response, redirect, render_template, request, url_for
from flask.ext.login import current_user, login_required
from . import mod
from .forms import ModDownloadUpdateForm, ModEditForm, ModRegisterForm, ModUploadForm
from ..models import Mod, ModDownload, ModDownloadStat, User, UserDownloadStat, mongo
@mod.route('/register', methods=['GET', 'POST'])
@login_required
def register_mod():
# Require a file to prevent the site from being cluttered with "in planning" mods that never have a release
# I'm looking at you bukkit!
form = ModRegisterForm()
request.data
if form.validate_on_submit():
mod = Mod(
name=form.name.data,
install_instructions=form.install_instructions.data,
summary=form.summary.data,
description=form.description.data,
registered_timestamp=datetime.utcnow(),
updated_timestamp=datetime.utcnow(),
random=random.random()
)
#mod.images.append(form.image_path.data)
#return str((form.image_path.data, request.files['image_path']))
'''image = request.files['image_path']
print(type(image), dir(image))
f = mongo.GridFSProxy()
fi = mongo.ImageGridFsProxy()
f.put(image)
fi.put(image)
print(type(f), dir(f))
print(type(fi), dir(fi))
mod.images.append(f)
#mod.images.append(fi)'''
f = mongo.GridFSProxy()
f.put(request.files['image_path'], content_type=mimetypes.guess_type(form.image_path.data.filename))
mod.images.append(f)
mod.save()
current_user.owned_mods.append(mod)
current_user.save()
mod_download = ModDownload(
change_log=form.change_log.data,
mod_version=form.mod_version.data,
ksp_version=form.ksp_version.data,
upload_timestamp=datetime.utcnow(),
author=User.objects(name=current_user.name).first()
)
# Which should take precedence, local or remote hosting?
# TODO: Validate zip file as a zip file and ksp mod. Validate in form?
mod_download.zip_file.put(request.files['file_path'], content_type='application/zip')
mod.downloads.append(mod_download)
mod.save()
return redirect(url_for('mod.mod_page', name=mod.name))
return render_template('register_mod.html', title='New Mod!', form=form)
@mod.route('/list')
def list():
query = request.args.get('query')
page = request.args.get('page', 1, type=int)
pagination = Mod.objects.paginate(page=page, per_page=25)
mods = pagination.items
return render_template('list.html', title='All Mods.', mods=mods, pagination=pagination)
@mod.route('/<name>/upload', methods=['GET', 'POST'])
@login_required
def mod_upload(name):
mod = Mod.objects.get_or_404(name=name)
if mod not in current_user.owned_mods or not current_user.is_admin():
flash("You do not own this mod")
return redirect(url_for('mod.mod_page', name=name))
form = ModUploadForm()
if form.validate_on_submit():
mod_download = ModDownload(
change_log=form.change_log.data,
mod_version=form.mod_version.data,
ksp_version=form.ksp_version.data,
upload_timestamp=datetime.utcnow(),
author=User.objects(name=current_user.name).first()
)
# Which should take precedence, local or remote hosting?
# TODO: Validate zip file as a zip file and ksp mod. Validate in form?
mod_download.zip_file.put(request.files['file_path'], content_type='application/zip')
mod.downloads.append(mod_download)
mod.save()
return redirect(url_for('mod.mod_page', name=mod.name))
return render_template('upload_mod.html', title='Upload Mod!', name=name, form=form)
@mod.route('/<name>/<version>/delete')
@login_required
def mod_download_delete(name, version):
user_agent = request.headers.get('User-Agent')
mod = Mod.objects.get_or_404(name=name)
if mod not in current_user.owned_mods or not current_user.is_admin():
flash("You do not own this mod")
return redirect(url_for('mod.mod_page', name=mod.name))
for download in mod.downloads:
if version == download.mod_version or (version == 'None' and download.mod_version is None):
mod.downloads.remove(download)
mod.save()
flash(name + " " + version + " deleted.")
return redirect(url_for('mod.mod_edit', name=mod.name))
@mod.route('/<name>/<version>/download')
def mod_download_version(name, version):
user_agent = request.headers.get('User-Agent')
mod = Mod.objects.get_or_404(name=name)
for download in mod.downloads:
if download.mod_version == version:
response = make_response(download.zip_file.read())
response.mimetype = download.zip_file.content_type
response.headers["Content-Disposition"] = ("attachment; filename=" +
name.replace(" ", "_") +
"-" + version + ".zip")
mod_stat = ModDownloadStat(timestamp=datetime.utcnow())
if current_user.is_authenticated():
mod_stat.user = User.objects(name=current_user.name).first()
user_stat = UserDownloadStat(
mod=mod,
version=version,
timestamp=datetime.utcnow()
)
current_user.downloaded.append(user_stat)
current_user.save()
download.downloaded.append(mod_stat)
mod.save()
return response
abort(404)
@mod.route('/<name>/<version>/edit', methods=['GET', 'POST'])
@login_required
def mod_download_edit(name, version):
user_agent = request.headers.get('User-Agent')
mod = Mod.objects.get_or_404(name=name)
if mod not in current_user.owned_mods or not current_user.is_admin():
flash("You do not own this mod")
return redirect(url_for('mod.mod_page', name=mod.name))
for download in mod.downloads:
if download.mod_version == version:
form = ModDownloadUpdateForm()
if form.validate_on_submit():
download.mod_version = form.mod_version.data
download.ksp_version = form.ksp_version.data
download.change_log = form.change_log.data
mod.save()
print(url_for('mod.mod_edit', name=name))
return redirect(url_for('mod.mod_edit', name=name))
form.mod_version.data = download.mod_version
form.ksp_version.data = download.ksp_version
form.change_log.data = download.change_log
return render_template('edit_download_mod.html', title='Edit file!', name=name, form=form)
abort(404)
@mod.route('/<name>/delete')
@login_required
def mod_delete(name):
user_agent = request.headers.get('User-Agent')
mod = Mod.objects.get_or_404(name=name)
if mod not in current_user.owned_mods:
flash("You do not own this mod")
return redirect(url_for('mod.mod_page', name=mod.name))
mod.delete()
flash(name + " deleted.")
return redirect(url_for('main.index'))
@mod.route('/<name>/download')
def mod_download_latest(name):
"""As far as I can read it is not possible to query and return embedded documents in mongo. Consider making ModDownload Document"""
user_agent = request.headers.get('User-Agent')
mod = Mod.objects.get_or_404(name=name)
# TODO: This is fugly. Consider making ModDownload a document
d = mod.get_latest_download()
response = make_response(d.zip_file.read())
response.mimetype = d.zip_file.content_type
response.headers["Content-Disposition"] = "attachment; filename=" + mod.name.replace(" ", "_") + "-" + d.mod_version + ".zip"
mod_stat = ModDownloadStat(timestamp=datetime.utcnow())
if current_user.is_authenticated():
mod_stat.user = User.objects(name=current_user.name).first()
user_stat = UserDownloadStat(
mod=mod,
version=d.mod_version,
timestamp=datetime.utcnow()
)
current_user.downloaded.append(user_stat)
current_user.save()
d.downloaded.append(mod_stat)
mod.save()
return response
@mod.route('/<name>/edit', methods=['GET', 'POST'])
@login_required
def mod_edit(name):
user_agent = request.headers.get('User-Agent')
mod = Mod.objects.get_or_404(name=name)
if mod not in current_user.owned_mods or not current_user.is_admin():
flash("You do not own this mod")
return redirect(url_for('mod.mod_page', name=mod.name))
form = ModEditForm()
if form.validate_on_submit():
mod.name = form.name.data
mod.summary = form.summary.data
mod.description = form.description.data
mod.install_instructions = form.install_instructions.data
mod.save()
return redirect(url_for('mod.mod_page', name=name))
form.name.data = mod.name
form.summary.data = mod.summary
form.description.data = mod.description
form.install_instructions.data = mod.install_instructions
return render_template('mod_edit.html', title='Edit: ' + mod.name, mod=mod, form=form)
@mod.route('/<name>')
def mod_page(name):
user_agent = request.headers.get('User-Agent')
mod = Mod.objects.get_or_404(name=name)
return render_template('mod.html', title=mod.name, mod=mod) | {
"repo_name": "croxis/kmr",
"path": "app/mod/views.py",
"copies": "1",
"size": "9657",
"license": "mit",
"hash": 3099745709020508000,
"line_mean": 40.4506437768,
"line_max": 135,
"alpha_frac": 0.6301128715,
"autogenerated": false,
"ratio": 3.7113758647194466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48414887362194464,
"avg_score": null,
"num_lines": null
} |
__author__ = 'croxis'
from flask_wtf import Form
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms import SubmitField, TextField, TextAreaField, ValidationError
from wtforms.validators import Optional, Required
from flask.ext.pagedown.fields import PageDownField
from ..models import Mod
class ModDownloadUpdateForm(Form):
mod_version = TextField('Mod Version', validators=[Required()])
ksp_version = TextField('KSP Version', validators=[Required()])
change_log = PageDownField(validators=[Required()])
submit = SubmitField('Update')
class ModEditForm(Form):
name = TextField('Mod Name', validators=[Required()])
summary = TextAreaField('Summary -- a short description of your mod', validators=[Required()])
description = PageDownField('Extended Description', validators=[Required()])
install_instructions = PageDownField('Install Instructions', validators=[Required()])
submit = SubmitField('Update')
class ModUploadForm(Form):
mod_version = TextField('Mod Version', validators=[Required()])
ksp_version = TextField('KSP Version', validators=[Required()])
file_path = FileField('Zip File', validators=[Required(), FileRequired(), FileAllowed(['zip'], 'Zip files only!')])
change_log = PageDownField(validators=[Required()])
submit = SubmitField('Upload')
class ModRegisterForm(Form):
# TODO: Make a form that properly inherits from both. CSRF token missing when I try to do so
name = TextField('Mod Name', validators=[Required()])
image_path = FileField('Mod Image File', validators=[Optional(), FileRequired(), FileAllowed(['png', 'jpg', 'gif'], 'Image files only!')])
summary = TextAreaField('Summary -- a short description of your mod', validators=[Required()])
description = PageDownField('Extended Description', validators=[Required()])
install_instructions = PageDownField('Install Instructions', validators=[Required()])
mod_version = TextField('Mod Version', validators=[Required()])
ksp_version = TextField('KSP Version', validators=[Required()])
file_path = FileField('Zip File', validators=[Required(), FileRequired(), FileAllowed(['zip'], 'Zip files only!')])
change_log = PageDownField(validators=[Required()])
submit = SubmitField('Upload')
def validate_name(self, field):
if Mod.objects(name=field.data).first():
raise ValidationError("Mod name already in use.") | {
"repo_name": "croxis/kmr",
"path": "app/mod/forms.py",
"copies": "1",
"size": "2440",
"license": "mit",
"hash": 2784830012214971400,
"line_mean": 47.82,
"line_max": 142,
"alpha_frac": 0.7155737705,
"autogenerated": false,
"ratio": 4.326241134751773,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0023495599016414744,
"num_lines": 50
} |
__author__ = 'croxis'
from io import BytesIO
import random
import re
from PIL import Image, ImageDraw, ImageFont
import requests
import lib.transforms as transforms
import lib.utils as utils
from lib.manalib import Manatext
from . import magic_image
from . import img_manager
try:
import textwrap
import nltk.data
sent_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
# This crazy thing is actually invoked as an unpass, so newlines are still
# encoded.
def sentencecase(s):
s = s.replace(utils.x_marker, utils.reserved_marker)
lines = s.split(utils.newline)
clines = []
for line in lines:
if line:
sentences = sent_tokenizer.tokenize(line)
clines += [' '.join([sent.capitalize() for sent in sentences])]
return utils.newline.join(clines).replace(utils.reserved_marker,
utils.x_marker)
except ImportError:
def sentencecase(s):
return s.capitalize()
def get_fonts():
return dict(
font_title=ImageFont.truetype("fonts/beleren-bold_P1.01.ttf", size=18),
font_type=ImageFont.truetype("fonts/beleren-bold_P1.01.ttf", size=16),
font=ImageFont.truetype("fonts/mplantin.ttf", size=18))
def draw_costs(image, draw, fonts, card):
cost = get_cost(card)
w, h = img_manager.get_icon('white').size
x_offset = 0
for x in range(0, cost['white']):
image.paste(img_manager.get_icon('white'),
(321 - x_offset, 42 - h // 2),
img_manager.get_icon('white'))
x_offset += 23
for x in range(0, cost['blue']):
image.paste(img_manager.get_icon('blue'),
(321 - x_offset, 42 - h // 2),
img_manager.get_icon('blue'))
x_offset += 23
for x in range(0, cost['black']):
image.paste(img_manager.get_icon('black'),
(321 - x_offset, 42 - h // 2),
img_manager.get_icon('blue'))
x_offset += 23
for x in range(0, cost['green']):
image.paste(img_manager.get_icon('green'),
(321 - x_offset, 42 - h // 2),
img_manager.get_icon('blue'))
x_offset += 23
for x in range(0, cost['red']):
image.paste(img_manager.get_icon('red'), (321 - x_offset, 42 - h // 2),
img_manager.get_icon('blue'))
x_offset += 23
if cost['colorless']:
colorless_mana = img_manager.get_icon('colorless')
draw_colorless = ImageDraw.Draw(colorless_mana)
w, h = draw_colorless.textsize(str(cost['colorless']))
W, H = colorless_mana.size
draw_colorless.text(((W - w) // 2 - 2, (H - h) // 2 - 5),
str(cost['colorless']),
fill=(0, 0, 0, 255),
font=fonts['font_title'])
image.paste(colorless_mana,
(321 - x_offset, 36 - h // 2),
colorless_mana)
colorless_mana.close()
def draw_title(image, draw, fonts, card):
w, h = draw.textsize(card.name.title())
draw.text((35, 38 - h // 2),
# card.name.format(gatherer=True),
card.name.title(),
fill=(0, 0, 0, 255),
font=fonts['font_title'])
def draw_types(image, draw, fonts, card):
typeline = ""
if card.supertypes:
typeline += ' '.join(card.supertypes).title() + ' '
typeline += ' '.join(card.types).title()
if card.subtypes:
typeline += ' - ' + ' '.join(card.subtypes).title()
w, h = draw.textsize(typeline)
draw.text((35, 304 - h // 2),
typeline,
fill=(0, 0, 0, 255),
font=fonts['font_type'])
def get_card_text(card):
# Card texts
# card_text = card.text.format()
mtext = card.text.text
mtext = transforms.text_unpass_1_choice(mtext, delimit=True)
mtext = transforms.text_unpass_2_counters(mtext)
mtext = transforms.text_unpass_3_uncast(mtext)
mtext = transforms.text_unpass_4_unary(mtext)
mtext = transforms.text_unpass_5_symbols(mtext, for_forum=False)
mtext = sentencecase(mtext)
# We will do step 5 ourselves to keep capitalization
mtext = transforms.text_unpass_6_cardname(mtext, card.name.title())
mtext = transforms.text_unpass_7_newlines(mtext)
mtext = transforms.text_unpass_8_unicode(mtext)
new_text = Manatext('')
new_text.text = mtext
new_text.costs = card.text.costs
card_text = new_text.format()
return card_text
def draw_card_text(image, draw, fonts, card):
lines = textwrap.wrap(get_card_text(card), 37, replace_whitespace=False)
y_offset = 0
for line in lines:
for sub_line in line.split('\n'):
x_offset = 0
rg = re.compile('(\\{.*?\\})', re.IGNORECASE | re.DOTALL)
for subsub_line in rg.split(sub_line):
if subsub_line:
x = 36 + x_offset
y = 335 + y_offset - 3
if rg.match(subsub_line):
if '{w}' in subsub_line.lower():
image.paste(img_manager.get_icon_text('white'),
(x, y),
img_manager.get_icon_text('blue'))
x_offset += 21
elif '{b}' in subsub_line.lower():
image.paste(img_manager.get_icon_text('black'),
(36 + x_offset, 335 + y_offset - 3),
img_manager.get_icon_text('blue'))
x_offset += 21
elif '{u}' in subsub_line.lower():
image.paste(img_manager.get_icon_text('blue'),
(36 + x_offset, 335 + y_offset - 3),
img_manager.get_icon_text('blue'))
x_offset += 21
elif '{r}' in subsub_line.lower():
image.paste(img_manager.get_icon_text('red'),
(36 + x_offset, 335 + y_offset - 3),
img_manager.get_icon_text('blue'))
x_offset += 21
elif '{g}' in subsub_line.lower():
image.paste(img_manager.get_icon_text('green'),
(36 + x_offset, 335 + y_offset - 3),
img_manager.get_icon_text('blue'))
x_offset += 21
elif '{t}' in subsub_line.lower():
image.paste(img_manager.get_icon_text('tap'),
(36 + x_offset, 335 + y_offset - 3),
img_manager.get_icon_text('tap'))
x_offset += 21
else:
try:
int(subsub_line[1])
colorless_mana = img_manager.get_icon_text(
'colorless')
draw_colorless = ImageDraw.Draw(colorless_mana)
w, h = draw_colorless.textsize(
str(subsub_line[1]))
draw_colorless.text(
((18 - w) // 2 - 2, (18 - h) // 2 - 4),
str(subsub_line[1]),
fill=(0, 0, 0, 255),
font=fonts['font_title'])
image.paste(colorless_mana, (x, y),
colorless_mana)
colorless_mana.close()
x_offset += 21
except:
pass
else:
draw.text((35 + x_offset, 335 + y_offset),
subsub_line,
fill=(0, 0, 0, 255),
font=fonts['font'])
x_offset += fonts['font'].getsize(subsub_line)[0]
y_offset += 19
def draw_card_copywrite(image, draw, fonts, card):
draw.text((60, 484), "Copy, right?", fill=(0, 0, 0, 255),
font=fonts['font'])
def draw_power_toughness(image, draw, fonts, card):
if not card.pt:
return
power = str(card.pt_p.count('^'))
toughness = str(card.pt_t.count('^'))
c = card.cost.colors
if len(c) == '':
c = 'a'
if len(c) > 1:
c = 'm'
c = c.lower()
if not c:
c = 'a'
pt_image = Image.open('app/card_parts/magic-new.mse-style/' +
c +
'pt.jpg')
image.paste(pt_image, (271, 461))
draw.text((295, 470), power + " / " + toughness, fill=(0, 0, 0, 255),
font=fonts['font_title'])
def draw_rarity(image, draw, fonts, card):
pass
def create_card_img(card, google):
background_color = get_background_color(card)
image = img_manager.get_background(background_color)
fonts = get_fonts()
draw = ImageDraw.Draw(image)
draw_costs(image, draw, fonts, card)
draw_title(image, draw, fonts, card)
draw_types(image, draw, fonts, card)
draw_card_text(image, draw, fonts, card)
draw_card_copywrite(image, draw, fonts, card)
draw_power_toughness(image, draw, fonts, card)
draw_rarity(image, draw, fonts, card)
art, w, h = get_card_art(card, google)
draw_card_art(image, draw, fonts, card, art, w, h)
return image
def draw_card_art(image, draw, fonts, card, art, w, h):
image.paste(art, ((image.size[0] - w) // 2, 175 - h // 2))
def get_cost(card):
cost = {}
cost['colorless'] = 0
cost['white'] = card.cost.format().lower().count('w')
cost['blue'] = card.cost.format().lower().count('u')
cost['black'] = card.cost.format().lower().count('b')
cost['red'] = card.cost.format().lower().count('r')
cost['green'] = card.cost.format().lower().count('g')
rg = re.compile('(\\d+)', re.IGNORECASE | re.DOTALL)
m = rg.search(card.cost.format())
if m:
cost['colorless'] = int(m.group(1))
return cost
def get_background_color(card):
colors = card.cost.get_colors()
if colors == "":
return 'artifact'
if len(colors) > 1:
return 'multicolor'
if colors == "W":
return 'white'
if colors == "U":
return 'blue'
if colors == "B":
return 'black'
if colors == 'R':
return 'red'
if colors == 'G':
return 'green'
return None
def get_card_art(card, google):
if google:
google_result = google_card_art(card)
if google_result != None:
return google_result
return get_default_card_art(card)
def get_default_card_art(card):
art = img_manager.default_portrait
art = art.crop((0, 0, 311, 228))
w, h = art.size
return (art, w, h)
def google_card_art(card):
terms = magic_image.find_search_terms(card)
random.shuffle(terms)
img_url = None
for term in terms[:5]:
color = term[-1]
query = "+".join(term[:-1])
if color == 'u':
color = 'blue'
img_url = magic_image.fetch(query + '+"fantasy"+paintings+-card',
color)
if img_url:
break
if img_url:
with BytesIO(requests.get(img_url).content) as reader:
reader.seek(0)
try:
art = Image.open(reader)
art.thumbnail((311, 311))
art = art.crop((0, 0, 311, 229))
w, h = art.size
return (art, w, h)
except OSError:
print("Unable to handle this kind of image.")
return None
| {
"repo_name": "croxis/mtgai",
"path": "app/card_visual.py",
"copies": "1",
"size": "12141",
"license": "mit",
"hash": 1580799061882881800,
"line_mean": 35.1339285714,
"line_max": 79,
"alpha_frac": 0.4854624825,
"autogenerated": false,
"ratio": 3.750695088044486,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4736157570544486,
"avg_score": null,
"num_lines": null
} |
__author__ = 'croxis'
import os.path
from PIL import Image
COLORS = {'white': 'w',
'black': 'b',
'blue': 'u',
'green': 'g',
'red': 'r',
'colorless': 'c',
'artifact': 'a',
'multicolor': 'm'}
C = {v: k for k, v in COLORS.items()}
class ImageManager:
def __init__(self,
root_path='app/card_parts/',
card_path='magic-new.mse-style/',
icon_path='magic-mana-beveled.mse-symbol-font/',
icon_text_path='magic-mana-small.mse-symbol-font/'):
self.card_path = os.path.join(root_path, card_path)
self.icon_path = os.path.join(root_path, icon_path)
self.icon_text_path = os.path.join(root_path, icon_text_path)
self.cards = {}
self.icons = {}
self.icons_text = {}
for color in COLORS:
self.cards[color] = Image.open(self.card_path + COLORS[color] + 'card.jpg')
for name in ['w', 'b', 'u', 'g', 'r']:
self.icons[C[name]] = Image.open(self.icon_path + 'mana_' + name + '.png')
self.icons['tap'] = Image.open(self.icon_path + 'mana_t.png')
self.icons['colorless'] = Image.open(self.icon_path + 'mana_circle.png')
for icon in self.icons.values():
icon.thumbnail((22, 22))
for name in ['w', 'b', 'u', 'g', 'r']:
self.icons_text[C[name]] = Image.open(self.icon_text_path + 'mana_' + name + '.png')
self.icons_text['tap'] = Image.open(self.icon_text_path + 'mana_t.png')
self.icons_text['colorless'] = Image.open(self.icon_text_path + 'mana_circle.png')
for icon in self.icons_text.values():
icon.thumbnail((18, 18))
self.default_portrait = Image.open(os.path.join(root_path, 'Magic_the_gathering_pentagon.png'))
self.default_portrait.thumbnail((311, 311))
def get_background(self, color):
return self.cards[color].copy()
def get_icon(self, name):
if name == 'colorless':
return self.icons[name].copy()
return self.icons[name]
def get_icon_text(self, name):
if name == 'colorless':
return self.icons_text[name].copy()
return self.icons_text[name]
| {
"repo_name": "croxis/mtgai",
"path": "app/image_manager.py",
"copies": "1",
"size": "2261",
"license": "mit",
"hash": 6680155407458835000,
"line_mean": 31.3,
"line_max": 103,
"alpha_frac": 0.5444493587,
"autogenerated": false,
"ratio": 3.2025495750708215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42469989337708214,
"avg_score": null,
"num_lines": null
} |
__author__ = 'croxis'
from datetime import datetime
import mimetypes
import random
from flask import abort, current_app, jsonify, make_response, render_template, request
from flask.ext.login import current_user
from . import main
from ..models import Mod, mongo
@main.route('/')
def index():
user_agent = request.headers.get('User-Agent')
# Idea: If tags are used maybe the random sample can be done via one or two tags
# Also return newest version of ksp
try:
rand_mods = random.sample(list(Mod.objects), 4)
except ValueError:
rand_mods = list(Mod.objects)
updated_mods = Mod.objects.order_by('-updated_timestamp')[:5]
subscribed_updated = []
if current_user.is_authenticated():
for mod in current_user.subscribed_mods:
for download in mod.downloads:
u = False
for user_download in current_user.downloaded:
if user_download.timestamp < download.upload_timestamp:
u = True
break
if u:
subscribed_updated.append((mod, download))
return render_template(
'index.html',
current_time=datetime.utcnow(),
current_user=current_user,
rand_mods=rand_mods,
subscribed_updates=subscribed_updated,
updated_mods=updated_mods
)
@main.route('/images/<mod>/<index>')
def images(mod, index):
img = Mod.objects.get_or_404(name=mod).images[int(index)]
response = make_response(img.read())
response.mimetype = img.content_type[0]
return response
@main.route('/search_results')
def search_results():
#TODO: Unhardcode pymongo db
query_string = request.args.get('search_string')
if query_string:
results = mongo.connection[current_app.config['MONGODB_SETTINGS']['DB']].command(
'text',
'mod',
search=query_string,
limit=10
)
sending_results = []
for result in results['results']:
sending_results.append(
{
'image_url': '/images/'+result['obj']['name']+'/0',
'name': result['obj']['name'],
'summary': result['obj']['summary'],
'updated_timestamp': result['obj']['updated_timestamp']
}
)
return jsonify(results=sending_results)
return jsonify(results=[])
@main.route('/search')
def search():
query = request.args.get('query')
return render_template('search.html', title='Search!')
@main.route('/toggle_following')
def toggle_following():
if current_user.is_authenticated():
mod_name = request.args.get('mod_name')
mod = Mod.objects.get_or_404(name=mod_name)
following = True
if mod in current_user.subscribed_mods:
current_user.subscribed_mods.remove(mod)
following = False
else:
current_user.subscribed_mods.append(mod)
current_user.save()
return jsonify(following=following)
abort(404) | {
"repo_name": "croxis/kmr",
"path": "app/main/views.py",
"copies": "1",
"size": "3100",
"license": "mit",
"hash": -7740487037489334000,
"line_mean": 30.6428571429,
"line_max": 89,
"alpha_frac": 0.5938709677,
"autogenerated": false,
"ratio": 4.133333333333334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017179564909669185,
"num_lines": 98
} |
__author__ = 'croxis'
from flask.ext.login import AnonymousUserMixin, LoginManager, UserMixin
from flask.ext.mongoengine import MongoEngine
from werkzeug.security import generate_password_hash, check_password_hash
import bcrypt
# Break the cycle of circular dependency
mongo = MongoEngine()
login = LoginManager()
@login.user_loader
def load_user(user_id):
"""Returns a user or None based on hashed session id. Do not use user name"""
return User.objects(id=user_id).first()
class Mod(mongo.Document):
name = mongo.StringField(required=True, unique=True, default='')
dependencies = mongo.ListField(mongo.ReferenceField('self'))
description = mongo.StringField(required=True)
downloads = mongo.ListField(mongo.EmbeddedDocumentField('ModDownload'))
#images = mongo.ListField(mongo.ImageField(thumbnail_size=(100, 100, True)))
images = mongo.ListField(mongo.FileField())
install_instructions = mongo.StringField(required=True, default='')
random = mongo.FloatField(required=True, default=0)
registered_timestamp = mongo.DateTimeField(required=True)
summary = mongo.StringField(required=True)
tags = mongo.ListField(mongo.ReferenceField('Tag'))
updated_timestamp = mongo.DateTimeField(required=True)
def get_latest_download(self):
"""Returns the latest download of the mod."""
if not self.downloads:
return
d = self.downloads[0]
for download in self.downloads:
if download.upload_timestamp > d.upload_timestamp:
d = download
return d
def get_total_downloads(self):
i = 0
for download in self.downloads:
i += len(download.downloaded)
return i
class ModDownload(mongo.EmbeddedDocument):
"""Specific mod download.
It looks like it is not possible to query mongo and return just an Embedded Document.
Consider making ModDownload a Document with a reference to the Mod?
Author's note: I chose to store the file in the database using gridfs as convenience for the prototype. I have not
researched into seeing if this is A Good Idea™. My guess is that uploading the the file system and serving
directly via nginx will probably scale better.
It might also be worth refactoring this to a generic file class with moddownload, image files, and craft files
inheriting.
Apparently nginx can be compiled to serve gridfs as well.
"""
mod_version = mongo.StringField(required=True)
ksp_version = mongo.StringField(required=True)
url = mongo.StringField()
zip_file = mongo.FileField()
upload_timestamp = mongo.DateTimeField(required=True)
author = mongo.ReferenceField('User', required=True)
change_log = mongo.StringField(default='')
downloaded = mongo.ListField(mongo.EmbeddedDocumentField('ModDownloadStat'))
class ModDownloadStat(mongo.EmbeddedDocument):
user = mongo.ReferenceField('User')
timestamp = mongo.DateTimeField()
class User(mongo.Document, UserMixin):
"""Flask-Login requires an user class. We indulge."""
name = mongo.StringField(required=True, unique=True, default='')
email = mongo.StringField(required=True) # Not email field due to new tld
password_hash = mongo.StringField()
level = mongo.StringField(default='standard')
subscribed_mods = mongo.ListField(mongo.ReferenceField(Mod))
owned_mods = mongo.ListField(mongo.ReferenceField(Mod))
downloaded = mongo.ListField(mongo.EmbeddedDocumentField('UserDownloadStat'))
registered = mongo.DateTimeField()
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = bcrypt.hashpw(password, bcrypt.gensalt(log_rounds=12))
def verify_password(self, password):
return bcrypt.checkpw(password, self.password_hash)
def is_admin(self):
if self.level.lower() == 'admin':
return True
return False
class AnonymousUser(AnonymousUserMixin):
def is_admin(self):
return False
class UserDownloadStat(mongo.EmbeddedDocument):
"""Entry in a list of mods, versions, and when a user downloaded a file."""
mod = mongo.ReferenceField(Mod)
version = mongo.StringField()
timestamp = mongo.DateTimeField()
class Tag(mongo.Document):
"""User defined tags"""
name = mongo.StringField(required=True)
login.anonymous_user = AnonymousUser | {
"repo_name": "croxis/kmr",
"path": "app/models.py",
"copies": "1",
"size": "4495",
"license": "mit",
"hash": -2229914786500480800,
"line_mean": 34.952,
"line_max": 118,
"alpha_frac": 0.7122190073,
"autogenerated": false,
"ratio": 4.191231343283582,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5403450350583582,
"avg_score": null,
"num_lines": null
} |
__author__ = 'croxis'
from flask import abort, flash, jsonify, make_response, redirect, render_template, request, url_for
from flask.ext.openid import OpenID # OpenID support broken until fix: https://github.com/mitsuhiko/flask-openid/pull/34
from flask.ext.pagedown.fields import PageDownField
from flask_wtf import Form
from flask_wtf.file import FileField, FileAllowed, FileRequired
from werkzeug.security import generate_password_hash, check_password_hash
from wtforms import BooleanField, PasswordField, SubmitField, TextField, TextAreaField, ValidationError
from wtforms.validators import Email, EqualTo, Optional, Required, URL
import mimetypes
import random
from datetime import datetime
'''@oid.after_login
def after_login(resp):
if not resp.email:
flash('Invalid login. Please try again.')
return redirect(url_for('login'))
print("test")'''
########
# Routes
########
if __name__=='__main__':
#Mod.drop_collection()
#User.drop_collection()
#app.run(debug=True, host='0.0.0.0')
pass | {
"repo_name": "croxis/kmr",
"path": "app/main/kmr.py",
"copies": "1",
"size": "1047",
"license": "mit",
"hash": 3628862929456666600,
"line_mean": 24.5609756098,
"line_max": 121,
"alpha_frac": 0.7258834766,
"autogenerated": false,
"ratio": 3.6228373702422147,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.980238503311287,
"avg_score": 0.009267162745868824,
"num_lines": 41
} |
__author__ = 'croxis'
from flask_wtf import Form
from flask_wtf.file import FileField
from wtforms import TextField, TextAreaField, ValidationError
from wtforms.validators import Required
from flask.ext.pagedown.fields import PageDownField
from ..models import Mod, User
'''class OIDLoginForm(Form):
"""OID user is currently broken at the moment"""
openid = TextField('openid', validators = [Required()])
remember_me = BooleanField('remember_me', default = False)'''
class ModDownloadForm(Form):
mod_version = TextField('Mod Version', validators=[Required()])
ksp_version = TextField('KSP Version', validators=[Required()])
file_path = FileField('Zip File', validators=[Required()])
#submit = SubmitField('Upload')
class ModForm(Form):
name = TextField('Mod Name', validators=[Required()])
summery = TextAreaField('Summery -- a short description of your mod', validators=[Required()])
description = TextAreaField('Extended Description', validators=[Required()])
def validate_name(self, field):
if Mod.objects(name=field.data).first():
raise ValidationError("Mod name already in use.")
| {
"repo_name": "croxis/kmr",
"path": "app/main/forms.py",
"copies": "1",
"size": "1159",
"license": "mit",
"hash": 4696400715043045000,
"line_mean": 34.1212121212,
"line_max": 98,
"alpha_frac": 0.7100949094,
"autogenerated": false,
"ratio": 4.066666666666666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03182498384518587,
"num_lines": 33
} |
__author__ = 'croxis'
from flask_wtf import Form
from wtforms import BooleanField, PasswordField, SubmitField, TextField, ValidationError
from wtforms.validators import Email, EqualTo, Required
from ..models import User
class LoginForm(Form):
username = TextField('User Name', validators=[Required()])
password = PasswordField('Password', validators=[Required()])
remember_me = BooleanField('Stay logged in')
submit = SubmitField('Log In')
class UserEditForm(Form):
email = TextField('Email', validators=[Email()])
old_password = PasswordField('Current Password', validators=[Required()])
password = PasswordField('New Password', validators=[EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm new password')
submit = SubmitField('Update')
class UserRegisterForm(Form):
username = TextField('User Name', validators=[Required()])
email = TextField('Email', validators=[Required(), Email()])
password = PasswordField('Password', validators=[Required(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm password', validators=[Required()])
submit = SubmitField('Register')
def validate_username(self, field):
if User.objects(name=field.data).first():
raise ValidationError("User name already in use.") | {
"repo_name": "croxis/kmr",
"path": "app/user/forms.py",
"copies": "1",
"size": "1365",
"license": "mit",
"hash": -3902717611878918700,
"line_mean": 39.1764705882,
"line_max": 119,
"alpha_frac": 0.7157509158,
"autogenerated": false,
"ratio": 4.431818181818182,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5647569097618181,
"avg_score": null,
"num_lines": null
} |
__author__ = 'croxis'
"""
Python source: https://gist.github.com/petrklus/b1f427accdf7438606a6
Original pseudo code:
Set Temperature = Temperature \ 100
Calculate Red:
If Temperature <= 66 Then
Red = 255
Else
Red = Temperature - 60
Red = 329.698727446 * (Red ^ -0.1332047592)
If Red < 0 Then Red = 0
If Red > 255 Then Red = 255
End If
Calculate Green:
If Temperature <= 66 Then
Green = Temperature
Green = 99.4708025861 * Ln(Green) - 161.1195681661
If Green < 0 Then Green = 0
If Green > 255 Then Green = 255
Else
Green = Temperature - 60
Green = 288.1221695283 * (Green ^ -0.0755148492)
If Green < 0 Then Green = 0
If Green > 255 Then Green = 255
End If
Calculate Blue:
If Temperature >= 66 Then
Blue = 255
Else
If Temperature <= 19 Then
Blue = 0
Else
Blue = Temperature - 10
Blue = 138.5177312231 * Ln(Blue) - 305.0447927307
If Blue < 0 Then Blue = 0
If Blue > 255 Then Blue = 255
End If
End If
"""
import math
from panda3d.core import Vec3
def convert_K_to_RGB_float(temperature):
red, green, blue = convert_K_to_RGB(temperature)
return red/255.0, green/255.0, blue/255.0
def convert_K_to_RGB(colour_temperature):
"""
Converts from K to RGB, algorithm courtesy of
http://www.tannerhelland.com/4435/convert-temperature-rgb-algorithm-code/
"""
# range check
if colour_temperature < 1000:
colour_temperature = 1000
elif colour_temperature > 40000:
colour_temperature = 40000
tmp_internal = colour_temperature / 100.0
# red
if tmp_internal <= 66:
red = 255
else:
tmp_red = 329.698727446 * math.pow(tmp_internal - 60, -0.1332047592)
if tmp_red < 0:
red = 0
elif tmp_red > 255:
red = 255
else:
red = tmp_red
# green
if tmp_internal <= 66:
tmp_green = 99.4708025861 * math.log(tmp_internal) - 161.1195681661
if tmp_green < 0:
green = 0
elif tmp_green > 255:
green = 255
else:
green = tmp_green
else:
tmp_green = 288.1221695283 * math.pow(tmp_internal - 60, -0.0755148492)
if tmp_green < 0:
green = 0
elif tmp_green > 255:
green = 255
else:
green = tmp_green
# blue
if tmp_internal >= 66:
blue = 255
elif tmp_internal <= 19:
blue = 0
else:
tmp_blue = 138.5177312231 * math.log(
tmp_internal - 10) - 305.0447927307
if tmp_blue < 0:
blue = 0
elif tmp_blue > 255:
blue = 255
else:
blue = tmp_blue
return Vec3(red, green, blue) | {
"repo_name": "croxis/Panda-Core-Technology",
"path": "Planets/utils/blackbody.py",
"copies": "1",
"size": "2901",
"license": "mit",
"hash": 7705592892069374000,
"line_mean": 23.593220339,
"line_max": 79,
"alpha_frac": 0.5429162358,
"autogenerated": false,
"ratio": 3.5814814814814815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46243977172814815,
"avg_score": null,
"num_lines": null
} |
__author__ = 'croxis'
import sandbox
from direct.directnotify.DirectNotify import DirectNotify
log = DirectNotify().newCategory("SpaceDrive-ClientNet")
#PROPOSAL! {server entity id: client entity id} and reverse lookup dict too
class ClientNetworkSystem(sandbox.UDPNetworkSystem):
def init2(self):
self.packetCount = 0
self.accept('login', self.sendLogin)
self.accept('requestStations', self.requestStations)
self.accept('requestThrottle', self.requestThrottle)
self.accept('requestCreateShip', self.requestCreateShip)
self.accept('requestTarget', self.requestTarget)
def process_packet(self, msgID, remotePacketCount, ack, acks, hashID, serialized, address):
#If not in our protocol range then we just reject
if msgID < 0 or msgID > 200:
return
data = protocol.readProto(msgID, serialized)
if msgID == protocol.CONFIRM_STATIONS:
sandbox.send('shipUpdate', [data, True])
sandbox.send('setShipID', [data])
sandbox.send('makeStationUI', [data])
elif msgID == protocol.PLAYER_SHIPS:
sandbox.send('shipUpdates', [data])
sandbox.send('shipSelectScreen', [data])
elif msgID == protocol.POS_PHYS_UPDATE:
sandbox.send('shipUpdates', [data])
elif msgID == protocol.SHIP_CLASSES:
sandbox.send('shipClassList', [data])
def sendLogin(self, serverAddress):
self.serverAddress = serverAddress
datagram = self.generateGenericPacket(protocol.LOGIN)
universals.log.debug("sending login")
self.send(datagram)
def requestCreateShip(self, shipName, className):
datagram = protocol.requestCreateShip(shipName, className)
self.send(datagram)
def requestStations(self, shipid, stations):
datagram = protocol.requestStations(shipid, stations)
self.send(datagram)
def requestThrottle(self, throttle, heading):
datagram = protocol.requestThrottle(throttle, heading)
self.send(datagram)
def requestTarget(self, targetID):
datagram = protocol.requestTurretTarget(targetID)
self.send(datagram)
def send(self, datagram):
self.send_data(datagram, self.serverAddress)
class ServerComponent:
"""Theoretical component for server generated and sent entities"""
serverEntityID = 0
lastServerUpdate = 0
| {
"repo_name": "croxis/SpaceDrive",
"path": "spacedrive/networking/client_networking.py",
"copies": "1",
"size": "2496",
"license": "mit",
"hash": 3542728348685860400,
"line_mean": 35.2537313433,
"line_max": 95,
"alpha_frac": 0.6574519231,
"autogenerated": false,
"ratio": 4.146179401993355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5303631325093355,
"avg_score": null,
"num_lines": null
} |
__author__ = 'croxis'
"""
Python source: https://gist.github.com/petrklus/b1f427accdf7438606a6
Original pseudo code:
Set Temperature = Temperature \ 100
Calculate Red:
If Temperature <= 66 Then
Red = 255
Else
Red = Temperature - 60
Red = 329.698727446 * (Red ^ -0.1332047592)
If Red < 0 Then Red = 0
If Red > 255 Then Red = 255
End If
Calculate Green:
If Temperature <= 66 Then
Green = Temperature
Green = 99.4708025861 * Ln(Green) - 161.1195681661
If Green < 0 Then Green = 0
If Green > 255 Then Green = 255
Else
Green = Temperature - 60
Green = 288.1221695283 * (Green ^ -0.0755148492)
If Green < 0 Then Green = 0
If Green > 255 Then Green = 255
End If
Calculate Blue:
If Temperature >= 66 Then
Blue = 255
Else
If Temperature <= 19 Then
Blue = 0
Else
Blue = Temperature - 10
Blue = 138.5177312231 * Ln(Blue) - 305.0447927307
If Blue < 0 Then Blue = 0
If Blue > 255 Then Blue = 255
End If
End If
"""
import math
from panda3d.core import Vec3
def convert_K_to_RGB_float(temperature):
red, green, blue = convert_K_to_RGB(temperature)
return red/255.0, green/255.0, blue/255.0
def convert_K_to_RGB(colour_temperature):
"""
Converts from K to RGB, algorithm courtesy of
http://www.tannerhelland.com/4435/convert-temperature-rgb-algorithm-code/
"""
# range check
if colour_temperature < 1000:
colour_temperature = 1000
elif colour_temperature > 40000:
colour_temperature = 40000
tmp_internal = colour_temperature / 100.0
# red
if tmp_internal <= 66:
red = 255
else:
tmp_red = 329.698727446 * math.pow(tmp_internal - 60, -0.1332047592)
if tmp_red < 0:
red = 0
elif tmp_red > 255:
red = 255
else:
red = tmp_red
# green
if tmp_internal <= 66:
tmp_green = 99.4708025861 * math.log(tmp_internal) - 161.1195681661
if tmp_green < 0:
green = 0
elif tmp_green > 255:
green = 255
else:
green = tmp_green
else:
tmp_green = 288.1221695283 * math.pow(tmp_internal - 60, -0.0755148492)
if tmp_green < 0:
green = 0
elif tmp_green > 255:
green = 255
else:
green = tmp_green
# blue
if tmp_internal >= 66:
blue = 255
elif tmp_internal <= 19:
blue = 0
else:
tmp_blue = 138.5177312231 * math.log(
tmp_internal - 10) - 305.0447927307
if tmp_blue < 0:
blue = 0
elif tmp_blue > 255:
blue = 255
else:
blue = tmp_blue
return Vec3(red, green, blue) | {
"repo_name": "croxis/SpaceDrive",
"path": "spacedrive/utils/blackbody.py",
"copies": "1",
"size": "3018",
"license": "mit",
"hash": -7012207628510967000,
"line_mean": 23.593220339,
"line_max": 79,
"alpha_frac": 0.5218687873,
"autogenerated": false,
"ratio": 3.7121771217712176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4734045909071217,
"avg_score": null,
"num_lines": null
} |
import requests
import optparse
import sys
import threading
import time
import Queue
import re
from progressbar import ProgressBar
header = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
'Accept-Encoding': 'gzip, deflate, compress',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
"Accept-Language": "zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:35.0) Gecko/20100101 Firefox/35.0"
}
class DirScan:
def __init__(self, target, threads_num, ext):
self.target = target.strip()
self.threads_num = threads_num
self.ext = ext
self.lock = threading.Lock()
#outfile
self.__load_dir_dict()
self.errorpage = r'无法加载模块|[nN]ot [fF]ound|不存在|未找到|Error|Welcome to nginx!|404|美容|百姓大事件|功能入口'
self.regex = re.compile(self.errorpage)
def __load_dir_dict(self):
self.queue = Queue.Queue()
ext = self.ext
target = self.target
hostuser = target.split('.')
hostuser = hostuser[len(hostuser)-2]
#print hostuser
bak = ['/'+hostuser+'.rar','/'+hostuser+'.zip','/'+hostuser+hostuser+'.rar','/'+hostuser+'.rar','/'+hostuser+'.tar.gz','/'+hostuser+'.tar','/'+hostuser+'123.zip','/'+hostuser+'123.tar.gz','/'+hostuser+hostuser+'.zip','/'+hostuser+hostuser+'.tar.gz','/'+hostuser+hostuser+'.tar','/'+hostuser+'.bak']
for j in range(len(bak)):
BAK = bak[j]
self.queue.put(BAK)
with open('mulu.txt') as f:
for line in f:
mulu = line.replace('$ext$',ext).strip()
if mulu:
#print mulu
self.queue.put(mulu)
def _scan(self):
#print "[*]%s Scaning...." % self.target
try:
while self.queue.qsize() > 0:
sub = self.queue.get(timeout=1.0)
#try:
#print sub
domain = self.target + sub
#print domain
r = requests.get(domain, headers = header, allow_redirects=False, timeout=5)
code = r.status_code
text = r.content
lens = len(text)
if code == 200 and not self.regex.findall(text) and lens != 0 :
try:
title = re.findall(r"<title>(.+?)</title>",text)
print "[*] %s =======> 200 (Title:%s)\n" %(domain, title[0]),
except Exception,e:
print "[*] %s =======> 200\n" %domain,
except Exception,e:
pass
def run(self):
self.start_time = time.time()
t_sequ = []
t_pro = threading.Thread(target=self.progress, name="progress")
t_pro.setDaemon(True)
t_pro.start()
for i in range(self.threads_num):
t = threading.Thread(target=self._scan, name=str(i))
t.setDaemon(True)
t.start()
while self.thread_count > 0:
time.sleep(0.01)
if __name__ == '__main__':
#parser = optparse.OptionParser('usage: %prog [options] http://www.c-chicken.cc')
#parser.add_option('-t', '--threads', dest='threads_num',
# default=10, type='int',
# help='Number of threads. default = 10')
# parser.add_option('-e', '--ext', dest='ext', default='php',
# type='string', help='You want to Scan WebScript. default is php')
# parser.add_option('-o', '--output', dest='output', default=None,
# type='string', help='Output file name. default is {target}.txt')
# (options, args) = parser.parse_args()
#if len(args) < 1:
# parser.print_help()
# sys.exit(0)
f = open('target.txt','r')
hosts = f.read().split()
for i in range(len(hosts)):
Dict = 'http://'+hosts[i]
d = DirScan(target=Dict,threads_num=10,ext='php')
d.run()
| {
"repo_name": "RicterZ/moescan",
"path": "tools/DirScan/pldirscan.py",
"copies": "1",
"size": "4027",
"license": "mit",
"hash": -2029998308000329200,
"line_mean": 34.8648648649,
"line_max": 307,
"alpha_frac": 0.5342878674,
"autogenerated": false,
"ratio": 3.3037344398340247,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43380223072340246,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CSPF'
from ..config import db
class User(db.Model):
__tablename__ = "users"
id = db.Column("id",db.Integer,primary_key=True,autoincrement=True)
username = db.Column('username',db.String(20),unique=True,index=True)
password = db.Column('password',db.String(50))
def __init__(self,username,password):
self.username = username
self.password = password
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
def create(self):
row = self.query.filter_by(username=self.username).first()
if row:
return False
else:
db.session.add(self)
db.session.commit()
return True
def change_password(self,password):
if self.query.filter_by(username=self.username).update(dict(password=self.password)):
db.session.commit()
return True
else:
return False
def __repr__(self):
return '<User %r>' % self.username
| {
"repo_name": "breakthesec/DodoVulnerableBank",
"path": "DodoRESTAPI/app/model/Users.py",
"copies": "1",
"size": "1139",
"license": "mit",
"hash": 6843217516838618000,
"line_mean": 24.3111111111,
"line_max": 93,
"alpha_frac": 0.5899912204,
"autogenerated": false,
"ratio": 3.9411764705882355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5031167690988235,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CSPF'
from ..config import app
from .. model.Transaction import Transaction
from .. model.Account import Account
from flask import request,jsonify
@app.route("/transaction/statement/<int:customer_id>",methods=['POST',"GET"])
def view_statement(customer_id):
transactions = Transaction.query.filter_by(customer_id=customer_id).all()
if not transactions:
return "", 204
else:
statement = {}
for transaction in transactions:
statement.update({transaction.id:
{"amount":transaction.amount,"remarks":transaction.remarks,
"details":transaction.transaction_details,"sender_account_number":transaction.sender_account_number,
"receiver_account_number":transaction.receiver_account_number
}
})
return jsonify(statement)
@app.route("/transaction/transfer",methods=['POST',"GET"])
def transfer_amount():
customer_id = request.form['customer_id']
to_account = request.form['to_account']
amount = request.form['amount']
account = Account.query.filter_by(id=customer_id).first()
if account is None:
return jsonify({"success":False})
else:
account.debit(amount)
receiver_account= Account.query.filter_by(account_number=to_account).first()
if receiver_account:
receiver_account.credit(amount)
transaction = Transaction(account.id,amount,account.account_number,to_account,
remarks="Transferred",
transaction_details="Transferred from "+account.account_number+" to "+to_account)
transaction.insert()
return jsonify({"success":True}) | {
"repo_name": "breakthesec/DodoVulnerableBank",
"path": "DodoRESTAPI/app/controller/transaction.py",
"copies": "1",
"size": "1850",
"license": "mit",
"hash": -2944468790624310000,
"line_mean": 44.3,
"line_max": 134,
"alpha_frac": 0.5994594595,
"autogenerated": false,
"ratio": 4.579207920792079,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.567866738029208,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CSPF'
from ..config import db
class Account(db.Model):
__tablename__ = "accounts"
id = db.Column("id",db.Integer,primary_key=True)
branch = db.Column('branch',db.String(50))
account_number = db.Column('account_number',db.String(50))
balance = db.Column('balance',db.Integer)
def __init__(self,id,account_number,balance=0,branch="Arkham"):
self.id = id
self.account_number = account_number
self.balance = balance
self.branch = branch
def get_id(self):
return unicode(self.id)
def create(self):
row = self.query.filter_by(id=self.id).first()
if row:
return False
else:
db.session.add(self)
db.session.commit()
return True
def debit(self,debit_amount):
new_amount = self.balance - int(debit_amount)
if self.query.filter_by(id=self.id).update(dict(balance=new_amount)):
db.session.commit()
print "New Balance : "+str(self.balance)+"\n"
def credit(self,debit_amount):
new_amount = self.balance + int(debit_amount)
if self.query.filter_by(id=self.id).update(dict(balance=new_amount)):
db.session.commit()
print "New Balance : "+str(self.balance)+"\n"
def __repr__(self):
return '<Account %r>' % self.account_number
| {
"repo_name": "breakthesec/DodoVulnerableBank",
"path": "DodoRESTAPI/app/model/Account.py",
"copies": "1",
"size": "1420",
"license": "mit",
"hash": -7613404034541715000,
"line_mean": 30.2727272727,
"line_max": 77,
"alpha_frac": 0.5725352113,
"autogenerated": false,
"ratio": 3.641025641025641,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47135608523256406,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CSPF'
from ..config import db
class Transaction(db.Model):
__tablename__ = "transaction"
id = db.Column("id", db.Integer, primary_key=True, autoincrement=True)
customer_id = db.Column("customer_id", db.Integer)
amount = db.Column('amount', db.Integer)
remarks = db.Column('remarks', db.String(100))
transaction_details = db.Column('transaction_details', db.String(200))
sender_account_number = db.Column('sender_accoutn_number', db.String(50))
receiver_account_number = db.Column('receiver_account_number', db.String(50))
def __init__(self, customer_id, amount, sender_account_number, receiver_account_number, remarks="",
transaction_details=""):
self.customer_id = customer_id
self.amount = amount
self.sender_account_number = sender_account_number
self.receiver_account_number = receiver_account_number
self.remarks = remarks
self.transaction_details = transaction_details
def get_id(self):
return unicode(self.id)
def insert(self):
db.session.add(self)
db.session.commit()
return True
def __repr__(self):
return '<Transaction %r>' % self.id
| {
"repo_name": "breakthesec/DodoVulnerableBank",
"path": "DodoRESTAPI/app/model/Transaction.py",
"copies": "1",
"size": "1250",
"license": "mit",
"hash": 481864319006227140,
"line_mean": 35.8787878788,
"line_max": 103,
"alpha_frac": 0.6328,
"autogenerated": false,
"ratio": 3.8580246913580245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49908246913580245,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CSPF'
import os
from OpenSSL import crypto
import socket, ssl
from utils import get_home_dir
class SSLCertifiacate:
def __init__(self,host=None):
self.host = host
ssl_dir = os.path.join(get_home_dir(), "ssl")
self.key_path = os.path.join(ssl_dir, "dodo.key")
self.cert_path = os.path.join(ssl_dir, "dodo.crt")
if not os.path.exists(ssl_dir):
os.makedirs(ssl_dir)
def generate(self):
if not self.host:
self.host = socket.gethostname()
print "SSL Host used for Certificate Generation: "+self.host
key = crypto.PKey()
key.generate_key(crypto.TYPE_RSA, 2048)
cert = crypto.X509()
cert.get_subject().C = "IN"
cert.get_subject().ST = "TN"
cert.get_subject().L = "dodo"
cert.get_subject().O = "dodo"
cert.get_subject().OU = "dodo"
cert.get_subject().CN = self.host
cert.set_serial_number(1111)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(key)
cert.sign(key, "sha1")
with open(self.cert_path, "w") as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
with open(self.key_path, "w") as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, key))
def get(self):
if not os.path.exists(self.cert_path) or not os.path.exists(self.cert_path):
self.generate()
# context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) #For some reason android not able to communicate with stupid python on TLSv1_2
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(self.cert_path, self.key_path)
return context
| {
"repo_name": "breakthesec/DodoVulnerableBank",
"path": "DodoRESTAPI/app/digital_certificate.py",
"copies": "1",
"size": "1868",
"license": "mit",
"hash": -2807401258273301000,
"line_mean": 33.9230769231,
"line_max": 135,
"alpha_frac": 0.5867237687,
"autogenerated": false,
"ratio": 3.341681574239714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44284053429397136,
"avg_score": null,
"num_lines": null
} |
_author_ = 'ctippur'
import boto3
import os
from aws_requests_auth.aws_auth import AWSRequestsAuth
class credsaws():
## init
session = boto3.session.Session()
#es_host='search-enrichment-service-ebin6h6q5xc7ehx2snhwbtyl3m.us-west-2.es.amazonaws.com'
#es_host=os.environ['es_endpoint']
def __init__(self):
credentials=self.session.get_credentials().get_frozen_credentials()
self.awsauth = AWSRequestsAuth(
aws_access_key=credentials.access_key,
aws_secret_access_key=credentials.secret_key,
aws_token=credentials.token,
aws_host=self.es_host,
aws_region=self.session.region_name,
aws_service='es'
)
def getAuth(self):
return self.awsauth
def getESHost(self):
return self.es_host
class credslocal(object):
## init
session = boto3.session.Session(
region_name='us-east-1',
aws_access_key_id='example_key_id',
aws_secret_access_key='my_super_secret_key'
)
awsauth = None
def __init__(self, service):
print ("In init")
print (boto3.__version__)
self.session.resource(service)
self.awsauth = AWSRequestsAuth(
aws_access_key="credentials.access_key",
aws_secret_access_key="credentials.secret_key",
aws_token="credentials.token",
aws_host="self.es_host",
aws_region="us-east-1",
aws_service=service
)
def getAuth(self):
return self.awsauth
def getSession(self):
return self.session
| {
"repo_name": "ctippur/swaggeralliance",
"path": "src/python/flask/swagger_server/lib/aws/Creds.py",
"copies": "1",
"size": "1614",
"license": "apache-2.0",
"hash": -4570439810450338300,
"line_mean": 28.3454545455,
"line_max": 94,
"alpha_frac": 0.6059479554,
"autogenerated": false,
"ratio": 3.45610278372591,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9374977765008567,
"avg_score": 0.03741459482346852,
"num_lines": 55
} |
__author__ = 'Călin Sălăgean'
class Person():
__id_class = 0
def __init__(self, first_name = None, last_name = None, address = None, id = None):
'''
Person class constructor
:param name:
:param address:
:param event:
:return: Class instance
'''
self.validate(first_name, last_name, address)
self.__first_name, self.__last_name, self.__address = first_name, last_name, address
self.__id = self.__set_id(id)
@staticmethod
def validate(first_name, last_name, address):
'''
Validation method for Person class
:param name:
:param address:
:param event:
:return: None
:raise AttributeError:
'''
if not first_name or not last_name:
raise AttributeError("Please provide full name")
if not address:
raise AttributeError("Please provide an address")
# TODO: Implement validating existing events
def __set_id(self, id):
'''
Increments the class _id attribute
:param id:
:return: None
'''
if id is None:
instance_id = Person.__id_class
else:
instance_id = id
Person.__id_class += 1
return instance_id
def get_id(self):
'''
Returns person ID
:return id:
'''
return self.__id
def get_name(self):
'''
Returns person name
:return name:
'''
return self.__first_name + ' ' + self.__last_name
def get_address(self):
'''
Returns person address
:return address:
'''
return self.__address
def update(self, first_name = None, last_name = None, address = None):
'''
Update person instance attributes
:param first_name:
:param last_name:
:param address:
:return: None
'''
if first_name is None:
first_name = self.__first_name
if last_name is None:
last_name = self.__last_name
if address is None:
address = self.__address
self.validate(first_name, last_name, address)
self.__first_name, self.__last_name, self.__address = first_name, last_name, address
def get_serialization(self):
'''
Return JSON serialization
:return: JSON Object
'''
dict = {
'id': self.__id,
'first_name': self.__first_name,
'last_name': self.__last_name,
'address': self.__address,
}
return dict
@staticmethod
def set_class_id(id):
'''
Set class _id attribute
:param id:
:return: None
'''
Person.__id_class = id | {
"repo_name": "dooma/Events",
"path": "events/models/person.py",
"copies": "1",
"size": "2816",
"license": "mit",
"hash": 2847937633247938000,
"line_mean": 24.5818181818,
"line_max": 92,
"alpha_frac": 0.5151084252,
"autogenerated": false,
"ratio": 4.307810107197549,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5322918532397549,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Călin Sălăgean'
from datetime import datetime
import re
class Event():
__id_class = 0
def __init__(self, date = None, time = None, description = None, id = None):
'''
Event class contructor
:param date:
:param time:
:param description:
:return: Instance variable
'''
self.validate(date, time, description)
self.__date, self.__time, self.__description, self.__id = date, time, description, self.__set_id(id)
def validate(self, date, time, description):
'''
Raise exceptions if parameters doesn't have right format
:param date:
:param time:
:param description:
:return: None
'''
if not date or not time or not description:
raise AttributeError("Please provide all fields")
try:
datetime.strptime(date, '%d/%m/%Y')
except ValueError:
raise ValueError("Date doesn't have the right format")
time = re.sub('\W', '', time)
if (len(time) != 4 and len(time) != 2) or not time.isdigit():
raise ValueError("Time doesn't have the right format")
def __set_id(self, id):
'''
Increments class _id attribute
:return: None
'''
if id is None:
instance_id = Event.__id_class
else:
instance_id = id
Event.__id_class += 1
return instance_id
def get_id(self):
'''
Returns event id
:return: id
'''
return self.__id
def get_date(self):
'''
Returns event date
:return: date
'''
return self.__date
def get_time(self):
'''
Returns event time
:return: time
'''
return self.__time
def get_description(self):
'''
Returns event description
:return: description
'''
return self.__description
def update(self, date = None, time = None, description = None):
'''
Updates event instance
:param date:
:param time:
:param description:
:return:
'''
if date is None:
date = self.__date
if time is None:
time = self.__time
if description is None:
description = self.__description
self.validate(date, time, description)
self.__date, self.__time, self.__description = date, time, description
def get_serialization(self):
'''
Returns JSON object
:return: json
'''
dict = {
'id': self.get_id(),
'date': self.get_date(),
'time': self.get_time(),
'description': self.get_description()
}
return dict
@staticmethod
def set_class_id(id):
'''
Sets class id
:param id:
:return:
'''
Event.__id_class = id | {
"repo_name": "dooma/Events",
"path": "events/models/event.py",
"copies": "1",
"size": "2981",
"license": "mit",
"hash": -1009934390293902100,
"line_mean": 23.825,
"line_max": 108,
"alpha_frac": 0.5080591001,
"autogenerated": false,
"ratio": 4.444776119402985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0027998827659700353,
"num_lines": 120
} |
__author__ = 'Călin Sălăgean'
from events.controllers.event import EventController
from events.controllers.person import PersonController
class Menu():
text = """
1. Adaugare persoana
2. Afisare persoane
3. Afisare persoana selectata
4. Stergere persaoana
5. Editare persoana
6. Inscriere persoana la eveniment
7. Adaugare eveniment
8. Afisare evenimente
9. Afisare eveniment selectat
10. Sterge eveniment
11. Editare eveniment
12. Cautare persoane
13. Cautare evenimente
14. Lista de evenimente dupa persoana
15. Lista de persoane inscrise la evenimente ordonate dupa numarul de evenimente
16. Listeaza primele 20% evenimente importante
(Alta tasta). Iesire
"""
def __init__(self):
self.menu_text = Menu.text
self.person_controller = PersonController()
self.event_controller = EventController()
self.command_menu = {
'1': self.add_person,
'2': self.show_all_persons,
'3': self.show_person,
'4': self.delete_person,
'5': self.edit_person,
'6': self.associate_person,
'7': self.add_event,
'8': self.show_all_events,
'9': self.show_event,
'10': self.delete_event,
'11': self.edit_event,
'12': self.search_people,
'13': self.search_events,
'14': self.events_list_by_person,
'15': self.people_most_events,
'16': self.get_top_events
}
def print_menu(self):
print(self.menu_text)
def execute_command(self):
self.print_menu()
command = input("Introduceti comanda ")
try:
self.command_menu[command]()
self.execute_command()
except:
print("Iesire")
def add_person(self):
first_name = input("Introduceti prenume ")
last_name = input("Introduceti nume de familie ")
address = input("Introduceti adresa ")
print(self.person_controller.insert(first_name, last_name, address))
def show_all_persons(self):
print(self.person_controller.index())
def show_person(self):
id = input("Introduceti id-ul ")
print(self.person_controller.show(id))
def delete_person(self):
id = input("Introduceti id-ul ")
print(self.person_controller.delete(id))
def edit_person(self):
id = input("Introduceti id-ul ")
first_name = input("Introduceti prenume ")
last_name = input("Introduceti nume de familie ")
address = input("Introduceti adresa ")
print(self.person_controller.update(id, first_name, last_name, address))
def associate_person(self):
person_id = input("Introduceti ID persoana ")
event_id = input("Introduceti ID event ")
data = input("Introduceti data ")
print(self.person_controller.associate(person_id, event_id, data))
def add_event(self):
date = input("Introduceti data ")
time = input("Introduceti ora ")
description = input("Introduceti descriere ")
print(self.event_controller.insert(date, time, description))
def show_event(self):
id = input("Introduceti id-ul ")
print(self.event_controller.show(id))
def show_all_events(self):
print(self.event_controller.index())
def delete_event(self):
id = input("Introduceti id-ul ")
print(self.event_controller.delete(id))
def edit_event(self):
id = input("Introduceti id-ul ")
date = input("Introduceti data ")
time = input("Introduceti ora ")
description = input("Introduceti descriere ")
print(self.event_controller.update(id, date, time, description))
def search_people(self):
term = input("Introduceti termenul de cautare ")
print(self.person_controller.search(term))
def search_events(self):
term = input("Introduceti termenul de cautare ")
print(self.event_controller.search(term))
def events_list_by_person(self):
person_id = input("Introduceti ID ")
print(self.person_controller.get_events(person_id))
def people_most_events(self):
print(self.person_controller.get_top_persons())
def get_top_events(self):
print(self.event_controller.get_top_events())
menu = Menu()
menu.execute_command() | {
"repo_name": "dooma/Events",
"path": "start.py",
"copies": "1",
"size": "4508",
"license": "mit",
"hash": 1671441266844625400,
"line_mean": 29.8630136986,
"line_max": 88,
"alpha_frac": 0.6057713651,
"autogenerated": false,
"ratio": 3.533333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.962970766684497,
"avg_score": 0.0018794063176726969,
"num_lines": 146
} |
__author__ = 'Călin Sălăgean'
from events.models.event import Event
from events.repositories.event import EventRepository
from events.repositories.person_event import PersonEventRepository
from events.repositories.person import PersonRepository
class EventController():
def __init__(self):
'''
EventController constructor
:return: object
'''
self.repository = EventRepository()
self.person_event_repository = PersonEventRepository()
self.person_repository = PersonRepository()
def index(self):
'''
Return all events in one string ready to display
:return: string
'''
events = self.repository.get_all()
output = ""
for event in events:
output += str(event.get_id()) + "\t" + event.get_date() + "\t" + event.get_time() + "\t" + event.get_description() + "\n"
return output
def show(self, id):
'''
Return a found event ready to display
:param id:
:return: string
'''
try:
id = int(id)
except ValueError:
return "Please enter an integer for ID"
try:
event = self.repository.get(id)
return str(event.get_id()) + "\t" + event.get_date() + "\t" + event.get_time() + "\t" + event.get_description() + "\n"
except (ValueError, AttributeError) as e:
return e
def insert(self, date = None, time = None, description = None):
'''
Insert an event
:param date:
:param time:
:param description:
:return: string
'''
try:
event = Event(date, time, description)
except (AttributeError, ValueError) as e:
return e
self.repository.insert(event)
return "Event inserted successfully"
def update(self, id, date = None, time = None, description = None):
'''
Update an event
:param id:
:param date:
:param time:
:param description:
:return: string
'''
try:
id = int(id)
except ValueError:
return "Please insert a valid ID"
try:
event = self.repository.get(id)
except ValueError as e:
return e
if date == '':
date = None
if time == '':
time = None
if description == '':
description = None
try:
event.update(date, time, description)
self.repository.update(event)
except (ValueError, AttributeError) as e:
return e
return "Event updated successfully!"
def delete(self, id):
'''
Delete an event
:param id:
:return: string
'''
try:
id = int(id)
except ValueError:
return "Please enter an integer for ID"
try:
event = self.repository.get(id)
self.repository.delete(event)
except (ValueError, AttributeError) as e:
return e
return "Event was deleted successfully"
def get_top_events(self):
'''
Show top events with most visitors
:return: string
'''
relations = self.get_instantiated_relation()
occurences = self.determine_occurences(relations)
relations.sort(
key=lambda rel: EventController.find_dict2(occurences, rel[0].get_id())[rel[0].get_id()],
reverse=True
)
# Unique relations
unique_relations = []
actual_event = -1
for rel in relations:
if rel[0].get_id() != actual_event:
actual_event = rel[0].get_id()
unique_relations.append(rel)
output = ""
for rel in unique_relations:
number_events = EventController.find_dict2(occurences, rel[0].get_id())[rel[0].get_id()]
output += str(rel[0].get_id()) + "\t" + rel[0].get_description() + "\tparticipa \t" + str(number_events) + " persoane\n"
return output
def get_instantiated_relation(self, person_id = None, event_id = None):
'''
Instantiate a Repository Many-to-Many relation
:param person_id:
:param event_id:
:return: array
'''
if person_id is None and event_id is None:
person_events = self.person_event_repository.get_all()
else:
person_events = self.person_event_repository.get_by_id(person_id, event_id)
result = []
for relation in person_events:
person = self.person_repository.get(relation.get_person_id())
event = self.repository.get(relation.get_event_id())
date = relation.get_date()
if date is None:
date = "Doesn't exist"
result.append((event, person, date))
return result
@staticmethod
def determine_occurences(relations):
'''
Find occurences in multiple tuples of realtions
:param relations:
:return: array
'''
occurences = []
for relation in relations:
event_id = relation[0].get_id()
try:
occurence = EventController.find_dict2(occurences, event_id)
except:
occurences.append({
event_id: 0
})
occurence = occurences[-1]
finally:
occurence[event_id] += 1
return occurences
@staticmethod
def find_dict(array, id):
'''
Find dictionary with given id
:param array:
:param id:
:return: dictionary
:raise: ValueError if id is not found
'''
for elem in array:
try:
elem[id]
return elem
except KeyError:
continue
raise ValueError
@staticmethod
def find_dict2(array, id):
'''
Find dictionary with given id
:param array:
:param id:
:return: dictionary
:raise: ValueError if id is not found
'''
if not len(array):
raise ValueError
try:
array[0][id]
return array[0]
finally:
return find_dict2(array[1:], id) | {
"repo_name": "dooma/Events",
"path": "events/controllers/event.py",
"copies": "1",
"size": "6395",
"license": "mit",
"hash": -3859223540747952600,
"line_mean": 26.3205128205,
"line_max": 133,
"alpha_frac": 0.5292553191,
"autogenerated": false,
"ratio": 4.3394433129667345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5368698632066734,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Călin Sălăgean'
from events.models.person import Person
from utils.IO import IO
class PersonRepository():
def __init__(self, filename = 'person.json'):
'''
PersonRepository class constructor
:param filename: Default value 'person.json'
:return:
'''
self.__defaultfile = filename
Person.set_class_id(self.get_max_id())
def insert(self, person):
'''
Add a person object into disk storage
:param event:
:return: None
'''
operation = IO(self.__defaultfile)
try:
people = operation.get()
except ValueError:
people = []
people.append(person.get_serialization())
operation.set(people)
def update(self, person):
'''
Updates a person into disk storage
:param updated_event:
:return:
'''
operation = IO(self.__defaultfile)
people = operation.get()
for per in people:
if per['id'] == person.get_id():
people[people.index(per)] = person.get_serialization()
operation.set(people)
def delete(self, person):
'''
Deletes a person from disk storage
:param deleted_event:
:return:
'''
operation = IO(self.__defaultfile)
people = operation.get()
for per in people:
if per['id'] == person.get_id():
people.remove(per)
operation.set(people)
def get(self, id):
'''
Returns a person with provided ID from disk storage
:param id:
:return: Event instance
'''
for per in self.get_all():
if per.get_id() == id:
return per
raise ValueError('Person not found!')
def get_all(self):
'''
Returns all persons from disk storage
:return:
'''
operation = IO(self.__defaultfile)
people = []
for person in operation.get():
person_instance = Person(person['first_name'], person['last_name'], person['address'], person['id'])
people.append(person_instance)
return people
def get_max_id(self):
'''
Returns the maximum ID found on disk storage
:return:
'''
people = self.get_all()
max = -1
for person in people:
if max < person.get_id():
max = person.get_id()
return max + 1 | {
"repo_name": "dooma/Events",
"path": "events/repositories/person.py",
"copies": "1",
"size": "2520",
"license": "mit",
"hash": -5382964545277476000,
"line_mean": 24.18,
"line_max": 112,
"alpha_frac": 0.5252284466,
"autogenerated": false,
"ratio": 4.423550087873462,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001327656414382078,
"num_lines": 100
} |
__author__ = 'Călin Sălăgean'
from events.repositories.event import EventRepository
from events.repositories.person import PersonRepository
from datetime import datetime
class PersonEvent():
def __init__(self, person_id = None, event_id = None, date = None):
'''
PersonEvent constructor
:param person_id:
:param event_id:
:param date:
:return: object
'''
self.validate(person_id, event_id, date)
self.__person_id, self.__event_id, self.__date = person_id, event_id, date
@staticmethod
def validate(person_id, event_id, date):
'''
Validates PersonEvent params
:param person_id:
:param event_id:
:param date:
:return: None
:raise AttributeError:
:raise ValueError:
'''
if person_id is None or event_id is None:
raise AttributeError('Please provide all fields')
event_repository = EventRepository()
person_repository = PersonRepository()
try:
event = event_repository.get(event_id)
person = person_repository.get(person_id)
except ValueError:
raise ValueError('Invalid IDs! Please provide valid IDs')
if date is not None:
try:
datetime.strptime(date, '%d/%m/%Y')
except ValueError:
raise ValueError("Date doesn't have the right format")
def update(self, person_id = None, event_id = None, date = None):
'''
Updates realation between person and event
:param person_id:
:param event_id:
:param date:
:return: None
'''
if person_id is None:
person_id = self.__person_id
if event_id is None:
event_id = self.__event_id
if date is None:
date = self.__date
self.validate(person_id, event_id, date)
self.__person_id, self.__event_id, self.__date = person_id, event_id, date
def get_person_id(self):
'''
Returns person ID
:return person_id:
'''
return self.__person_id
def get_event_id(self):
'''
Returns event ID
:return event_id:
'''
return self.__event_id
def get_date(self):
'''
Returns date when relation was instantiated
:return date:
'''
return self.__date
def get_serialization(self):
'''
Returns a JSON serialization
:return: JSON Object
'''
dict = {
'person_id': self.__person_id,
'event_id': self.__event_id,
'date': self.__date
}
return dict | {
"repo_name": "dooma/Events",
"path": "events/models/person_event.py",
"copies": "1",
"size": "2731",
"license": "mit",
"hash": -7596357108319026000,
"line_mean": 26.5656565657,
"line_max": 82,
"alpha_frac": 0.5450879765,
"autogenerated": false,
"ratio": 4.302839116719243,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0029635821328909476,
"num_lines": 99
} |
__author__ = 'Călin Sălăgean'
from events.repositories.person_event import PersonEvent, PersonEventRepository
from events.repositories.person import PersonRepository
from events.repositories.event import EventRepository
from events.models.person_event import PersonEvent
import re
class PersonController():
def __init__(self):
'''
PersonController constructor
:return: object
'''
self.repository = PersonRepository()
self.person_event_repository = PersonEventRepository()
self.event_repository = EventRepository()
def index(self):
'''
Return all persons
:return: string
'''
output = ""
for person in self.repository.get_all():
output += str(person.get_id()) + "\t" + person.get_name() + "\t" + person.get_address() + "\n"
return output
def show(self, id):
'''
Return a found person
:param id:
:return: string
'''
try:
id = int(id)
except ValueError:
return "Please enter an integer for ID"
try:
person = self.repository.get(id)
return str(person.get_id()) + "\t" + person.get_name() + "\t" + person.get_address() + "\n"
except (ValueError, AttributeError) as e:
return e
def insert(self, name = None, address = None, event = None):
'''
Insert a person
:param date:
:param time:
:param description:
:return: string
'''
try:
person = Person(name, address, event)
self.repository.insert(person)
except (ValueError, AttributeError) as e:
return e
return "Inserted successfully"
def update(self, id, first_name = None, last_name = None, address = None):
'''
Update an event
:param id:
:param date:
:param time:
:param description:
:return: string
'''
try:
id = int(id)
except ValueError:
return "Please insert a valid ID"
try:
person = self.repository.get(id)
except ValueError as e:
return e
if first_name == '':
first_name = None
if last_name == '':
last_name = None
if address == '':
address = None
try:
person.update(first_name, last_name, address)
self.repository.update(person)
except (ValueError, AttributeError) as e:
return e
return "Person updated successfully!"
def delete(self, id):
'''
Delete an event from instances array
:param id:
:return: string
'''
try:
id = int(id)
except ValueError:
return "Please enter an integer for ID"
try:
person = self.repository.get(id)
self.repository.delete(person)
except (ValueError, AttributeError) as e:
return e
return "Person was deleted successfully"
def associate(self, person_id, event_id, data):
'''
Associate a person with an event
:param person_id:
:param event_id:
:param data:
:return: string
'''
try:
person_id = int(person_id)
event_id = int(event_id)
except ValueError:
return "Please enter an integer for ID"
try:
person_event = PersonEvent(person_id, event_id, data)
self.person_event_repository.insert(person_event)
except (ValueError, AttributeError) as e:
return e
return "Association created successfully"
def search(self, term):
'''
Search people by term
:param term:
:return: string
'''
found_people = []
for person in self.repository.get_all():
person_criteria = person.get_name() + person.get_address()
if (re.match(term.lower(), person_criteria.lower()) is not None):
found_people.append(person)
output = ""
for person in found_people:
output += str(person.get_id()) + "\t" + person.get_name() + "\t" + person.get_address() + "\n"
return output
def get_events(self, id):
'''
Return events that a person has participated in
:param id:
:return: string
'''
try:
id = int(id)
except ValueError:
return "Please enter an integer for ID"
output = ""
try:
relations = self.get_instantiated_relation(id)
except:
print("The id doesn't exist")
relations.sort(key=lambda rel: (rel[0].get_description(), rel[0].get_date()))
for relation in relations:
event = relation[0]
person = relation[1]
date = relation[2]
output += str(event.get_id()) + "\t" + event.get_description() + "\t Inscreire in data: " + str(date) + "\n"
return output
def get_top_persons(self):
'''
Return top people with most visits
:return: string
'''
relations = self.get_instantiated_relation()
occurences = self.determine_occurences(relations)
relations.sort(
key=lambda rel: PersonController.find_dict(occurences, rel[1].get_id())[rel[1].get_id()],
reverse=True
)
# Unique relations
unique_relations = []
actual_person = -1
for rel in relations:
if rel[1].get_id() != actual_person:
actual_person = rel[1].get_id()
unique_relations.append(rel)
output = ""
for rel in unique_relations:
number_events = PersonController.find_dict(occurences, rel[1].get_id())[rel[1].get_id()]
output += rel[1].get_name() + "\tparticipa la\t" + str(number_events) + "\n"
return output
def get_instantiated_relation(self, person_id = None, event_id = None):
'''
Reinstantiate a relation between person and event from repository
:param person_id:
:param event_id:
:return: string
'''
if person_id is None and event_id is None:
person_events = self.person_event_repository.get_all()
else:
person_events = self.person_event_repository.get_by_id(person_id, event_id)
result = []
for relation in person_events:
event = self.event_repository.get(relation.get_event_id())
person = self.repository.get(relation.get_person_id())
date = relation.get_date()
if date is None:
date = "Doesn't exist"
result.append((event, person, date))
return result
@staticmethod
def determine_occurences(relations):
'''
Find occurences in multiple tuples of realtions
:param relations:
:return: array
'''
occurences = []
for relation in relations:
person_id = relation[1].get_id()
try:
occurence = PersonController.find_dict(occurences, person_id)
except:
occurences.append({
person_id: 0
})
occurence = occurences[-1]
finally:
occurence[person_id] += 1
return occurences
@staticmethod
def find_dict(array, id):
'''
Find dictionary with given id
:param array:
:param id:
:return: dictionary
:raise: ValueError if id is not found
'''
for elem in array:
try:
elem[id]
return elem
except KeyError:
continue
raise ValueError | {
"repo_name": "dooma/Events",
"path": "events/controllers/person.py",
"copies": "1",
"size": "7945",
"license": "mit",
"hash": -2761423835787932000,
"line_mean": 27.1666666667,
"line_max": 120,
"alpha_frac": 0.5333669101,
"autogenerated": false,
"ratio": 4.318651441000544,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5352018351100544,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Călin Sălăgean'
from utils.IO import IO
from events.models.event import Event
class EventRepository():
def __init__(self, filename = 'event.json'):
'''
EventRepository class constructor
:param filename: Default value 'event.json'
:return:
'''
self.__datafile = filename
Event.set_class_id(self.get_max_id())
def insert(self, event):
'''
Add an event object into disk storage
:param event:
:return: None
'''
operation = IO(self.__datafile)
try:
data = operation.get()
except ValueError:
data = []
data.append(event.get_serialization())
operation.set(data)
def update(self, updated_event):
'''
Updates an event into disk storage
:param updated_event:
:return:
'''
operation = IO(self.__datafile)
events = operation.get()
for event in events:
if event['id'] == updated_event.get_id():
events[events.index(event)] = updated_event.get_serialization()
break
operation.set(events)
def delete(self, deleted_event):
'''
Deletes an event from disk storage
:param deleted_event:
:return:
'''
operation = IO(self.__datafile)
events = operation.get()
for event in events:
if event['id'] == deleted_event.get_id():
events.remove(event)
break
operation.set(events)
def get(self, id):
'''
Returns an event with provided ID from disk storage
:param id:
:return: Event instance
'''
operation = IO(self.__datafile)
for event in operation.get():
if event['id'] == id:
return Event(event['date'], event['time'], event['description'], event['id'])
raise ValueError('Event not found!')
def get_all(self):
'''
Returns all events from disk storage
:return:
'''
operation = IO(self.__datafile)
events = []
for event in operation.get():
instance = Event(event['date'], event['time'], event['description'], event['id'])
events.append(instance)
return events
def get_max_id(self):
'''
Returns the maximum ID found on disk storage
:return:
'''
events = self.get_all()
max = -1
for event in events:
if max < event.get_id():
max = event.get_id()
return max + 1 | {
"repo_name": "dooma/Events",
"path": "events/repositories/event.py",
"copies": "1",
"size": "2655",
"license": "mit",
"hash": -5491324509669188000,
"line_mean": 25.0098039216,
"line_max": 93,
"alpha_frac": 0.5214932127,
"autogenerated": false,
"ratio": 4.47972972972973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0014465438012729271,
"num_lines": 102
} |
__author__ = 'Călin Sălăgean'
from utils.IO import IO
from events.models.person_event import PersonEvent
class PersonEventRepository():
def __init__(self, filename = None):
self.__defaultfile = filename or 'person_event.json'
def insert(self, person_event):
operation = IO(self.__defaultfile)
try:
person_events = operation.get()
except ValueError:
person_events = []
person_events.append(person_event.get_serialization())
operation.set(person_events)
def get_all(self):
operation = IO(self.__defaultfile)
person_events = []
for person_event in operation.get():
person_events.append(PersonEvent(person_event['person_id'], person_event['event_id'], person_event['date']))
return person_events
def get_by_id(self, person_id = None, event_id = None):
operation = IO(self.__defaultfile)
person_events = []
for person_event in operation.get():
if person_id is not None and event_id is not None:
if person_event['person_id'] == person_id and person_event['event_id'] == event_id:
return [PersonEvent(person_id, event_id, person_event['date'])]
elif person_id is None:
if person_event['event_id'] == event_id:
person_events.append(PersonEvent(person_event['person_id'], event_id, person_event['date']))
else:
if person_event['person_id'] == person_id:
person_events.append(PersonEvent(person_id, person_event['event_id'], person_event['date']))
return person_events | {
"repo_name": "dooma/Events",
"path": "events/repositories/person_event.py",
"copies": "1",
"size": "1682",
"license": "mit",
"hash": 2032470505474927000,
"line_mean": 34.7446808511,
"line_max": 120,
"alpha_frac": 0.5979749851,
"autogenerated": false,
"ratio": 3.9786729857819907,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004921017967252904,
"num_lines": 47
} |
__author__ = 'Călin Sălăgean'
import os, json
class IO():
def __init__(self, filename = None):
'''
IO Constructor
:param filename: Default None
:return: object
'''
self.validate(filename)
self.__filename = filename
def get(self):
'''
Returns parsed file content
:return: dictionary
:raise: ValueError if content is invalid
'''
file = open(IO.filepath(self.__filename))
try:
content = json.loads(file.read())
return content
except:
raise ValueError('The content of ' + self.__filename + ' is not valid!')
finally:
file.close()
def set(self, data):
'''
Writes data into file as JSON content
:param data: dictionary (JSON format)
:return: None
'''
temporary_file_prefix = 'tmp_'
temporary_file = open(IO.filepath(temporary_file_prefix + self.__filename), 'w')
json.dump(data, temporary_file)
temporary_file.close()
if temporary_file.closed:
os.remove(IO.filepath(self.__filename))
os.rename(temporary_file.name, IO.filepath(self.__filename))
@staticmethod
def filepath(filename):
'''
Static method that creates relative path for given filename
:param filename:
:return: string
'''
data_directory = 'data/'
return os.path.abspath(data_directory + filename)
@staticmethod
def validate(filename):
'''
Validates filename. If file does not exist, it will create the file
:param filename:
:return:
:raise: AttributeError if filename is invalid
'''
if filename == None or not len(str(filename)) or filename.split('.')[1] != 'json':
raise AttributeError('Please provide a valid file name!')
if not os.path.exists(IO.filepath(filename)):
open(IO.filepath(filename), 'w') | {
"repo_name": "dooma/Events",
"path": "utils/IO.py",
"copies": "1",
"size": "2023",
"license": "mit",
"hash": -5437359181193553000,
"line_mean": 28.2898550725,
"line_max": 90,
"alpha_frac": 0.5658415842,
"autogenerated": false,
"ratio": 4.478935698447893,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004707532996841141,
"num_lines": 69
} |
__author__ = 'Călin Sălăgean'
import unittest
from events.models.event import Event
class TestEvent(unittest.TestCase):
def test_initialization(self):
event = Event('14/01/2015', '14:00', 'Colectiv')
self.assertIsInstance(event, Event)
def test_date_input_validation(self):
with self.assertRaises(AttributeError):
event = Event(time = '14:00', description = 'Colectiv')
def test_time_input_validation(self):
with self.assertRaises(AttributeError):
event = Event(date = '14/01/2015', description = 'Colectiv')
def test_valid_properties(self):
event = Event('14/01/2015', '14:00', 'Colectiv')
self.assertEqual(event.date, '14/01/2015')
self.assertEqual(event.time, '14:00')
self.assertEqual(event.description, 'Colectiv')
def test_date_format_validation(self):
with self.assertRaisesRegexp(ValueError, "Date doesn't have the right format"):
event = Event('14012015', '14:00', 'Colectiv')
def test_time_format_validation(self):
with self.assertRaisesRegex(ValueError, "Time doesn't have the right format"):
event = Event('14/01/2015', '1400', 'Colectiv') | {
"repo_name": "dooma/Events",
"path": "spec/models/test_event.py",
"copies": "1",
"size": "1213",
"license": "mit",
"hash": 4103978191900530000,
"line_mean": 38.064516129,
"line_max": 87,
"alpha_frac": 0.652892562,
"autogenerated": false,
"ratio": 3.5798816568047336,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4732774218804734,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Călin Sălăgean'
import unittest
from events.models.person_event import PersonEvent
class TestPersonEvent(unittest.TestCase):
def test_instance(self):
with self.assertRaises(ValueError):
person_event = PersonEvent(1, 2)
self.assertIsInstance(person_event, PersonEvent)
def test_validation(self):
with self.assertRaisesRegex(AttributeError, 'Please provide all fields'):
person_event = PersonEvent()
person_event = PersonEvent(person_id=1)
person_event = PersonEvent(event_id=1)
with self.assertRaisesRegex(ValueError, 'Invalid IDs! Please provide valid IDs'):
person_event = PersonEvent(1, 1)
person_event = PersonEvent(25, 25)
def test_update(self):
person_event = PersonEvent(25, 25)
person_event.update(25,84)
self.assertEqual(person_event.get_person_id(), 25)
self.assertEqual(person_event.get_event_id(), 84)
def test_get_serialization(self):
person_event = PersonEvent(25, 25)
serialization = person_event.get_serialization()
self.assertEqual(serialization['person_id'], 25)
self.assertEqual(serialization['event_id'], 25) | {
"repo_name": "dooma/Events",
"path": "spec/models/test_person_event.py",
"copies": "1",
"size": "1239",
"license": "mit",
"hash": -7105311679982183000,
"line_mean": 32.4324324324,
"line_max": 89,
"alpha_frac": 0.6618122977,
"autogenerated": false,
"ratio": 4.012987012987013,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5174799310687013,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Călin Sălăgean'
import unittest, os
from utils.IO import IO
from events.repositories.event import EventRepository
from events.models.event import Event
class TestEventRepository(unittest.TestCase):
def test_insert(self):
io = IO('test.json')
io.set([])
event = Event('14/01/2015', '14:00', 'Colectiv')
EventRepository('test.json').insert(event)
content = io.get()
self.assertIs(type(content), list)
stored_event = content[0]
self.assertEqual(stored_event['date'], '14/01/2015')
self.assertEqual(stored_event['time'], '14:00')
self.assertEqual(stored_event['description'], 'Colectiv')
def test_get_all(self):
io = IO('test.json')
io.set([])
repo = EventRepository('test.json')
content = repo.get_all()
self.assertEqual(content, [])
event1 = Event('14/01/2015', '14:00', 'Colectiv')
event2 = Event('14/01/2015', '14:00', 'Colectiv')
repo.insert(event1)
repo.insert(event2)
content = repo.get_all()
self.assertIs(type(content), list)
self.assertEqual(len(content), 2)
stored_event1 = content[0]
stored_event2 = content[1]
self.assertEqual(stored_event1.get_date(), '14/01/2015')
self.assertEqual(stored_event1.get_time(), '14:00')
self.assertEqual(stored_event1.get_description(), 'Colectiv')
self.assertEqual(stored_event2.get_date(), '14/01/2015')
self.assertEqual(stored_event2.get_time(), '14:00')
self.assertEqual(stored_event2.get_description(), 'Colectiv')
def test_get(self):
Event.set_class_id(0)
io = IO('test.json')
io.set([])
repo = EventRepository('test.json')
event1 = Event('14/01/2015', '14:00', 'Colectiv')
event2 = Event('14/01/2015', '14:00', 'Colectiv')
repo.insert(event1)
repo.insert(event2)
content = repo.get(0)
self.assertIs(type(content), Event)
self.assertEqual(content.get_date(), '14/01/2015')
self.assertEqual(content.get_time(), '14:00')
self.assertEqual(content.get_description(), 'Colectiv')
with self.assertRaisesRegex(ValueError, 'Event not found!'):
content = repo.get(10)
def test_update(self):
Event.set_class_id(0)
io = IO('test.json')
io.set([])
repo = EventRepository('test.json')
event1 = Event('14/01/2015', '14:00', 'Colectiv')
event2 = Event('14/01/2015', '14:00', 'Colectiv')
repo.insert(event1)
repo.insert(event2)
event = repo.get(0)
self.assertIs(type(event), Event)
event.update('15/01/2015', '15:00', 'Untold festival')
repo.update(event)
updated_event = repo.get(0)
self.assertEqual(updated_event.get_date(), '15/01/2015')
self.assertEqual(updated_event.get_time(), '15:00')
self.assertEqual(updated_event.get_description(), 'Untold festival')
def test_get_max_id(self):
Event.set_class_id(0)
io = IO('test.json')
io.set([])
repo = EventRepository('test.json')
self.assertEqual(repo.get_max_id(), 0)
content = repo.get_all()
self.assertEqual(content, [])
event1 = Event('14/01/2015', '14:00', 'Colectiv')
event2 = Event('14/01/2015', '14:00', 'Colectiv')
repo.insert(event1)
repo.insert(event2)
content = repo.get_all()
self.assertEqual(repo.get_max_id(), 2)
def test_delete(self):
Event.set_class_id(0)
io = IO('test.json')
io.set([])
repo = EventRepository('test.json')
event1 = Event('14/01/2015', '14:00', 'Colectiv')
event2 = Event('14/01/2015', '14:00', 'Colectiv')
repo.insert(event1)
repo.insert(event2)
event = repo.get(0)
self.assertIs(type(event), Event)
repo.delete(event)
with self.assertRaisesRegex(ValueError, 'Event not found!'):
updated_event = repo.get(0) | {
"repo_name": "dooma/Events",
"path": "spec/repositories/test_event.py",
"copies": "1",
"size": "4099",
"license": "mit",
"hash": -3537187086741714000,
"line_mean": 30.2748091603,
"line_max": 76,
"alpha_frac": 0.5842285156,
"autogenerated": false,
"ratio": 3.34094616639478,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9421457882144557,
"avg_score": 0.0007433599700443378,
"num_lines": 131
} |
__author__ = "cuongnb14@gmail.com"
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Float
from sqlalchemy.orm import relationship, backref
from sqlalchemy import ForeignKey
Base = declarative_base()
class WebApp(Base):
"""Mapping with table apps"""
__tablename__ = 'autoscaling_web_app'
id = Column(Integer, primary_key=True)
name = Column(String, unique=True)
uuid = Column(String, unique=True)
min_instances = Column(Integer)
max_instances = Column(Integer)
cpus = Column(Float)
mem = Column(Float)
policies = relationship("Policy", order_by="Policy.id", backref="app", cascade="all, delete, delete-orphan")
class Policy(Base):
"""Mapping with table policies"""
__tablename__ = 'autoscaling_policies'
id = Column(Integer, primary_key=True)
web_app_id = Column(String, ForeignKey('autoscaling_web_app.id'))
metric_type = Column(String)
upper_threshold = Column(Float)
lower_threshold = Column(Float)
instances_out = Column(Integer)
instances_in = Column(Integer)
scale_up_wait = Column(Integer)
scale_down_wait = Column(Integer)
| {
"repo_name": "cuongnb14/autoscaling-paas",
"path": "project/autoscaling/autoscaling/model.py",
"copies": "1",
"size": "1175",
"license": "mit",
"hash": 3092280253413701600,
"line_mean": 32.5714285714,
"line_max": 112,
"alpha_frac": 0.7004255319,
"autogenerated": false,
"ratio": 3.753993610223642,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9938869205334767,
"avg_score": 0.0031099873577749688,
"num_lines": 35
} |
__author__ = 'cuongnb14@gmail.com'
import logging
class Scaler(object):
"""docstring for ClassName"""
def __init__(self, marathon_client, app):
self.logger = logging.getLogger("Scaler")
self.marathon_client = marathon_client
self.app = app
def scale(self, delta):
"""sacle app_name (add or remove) delta intances
@param int delta number intances add if (delta > 0) or remove if (delta < 0)
"""
current_instances = self.marathon_client.get_app("app-"+self.app.uuid).instances
new_instance = current_instances + delta
if(new_instance > self.app.max_instances):
new_instance = self.app.max_instances
if(new_instance < self.app.min_instances):
new_instance = self.app.min_instances
if(new_instance != current_instances):
self.logger.info("scale to: %d", new_instance)
self.marathon_client.scale_app("app-"+self.app.uuid, new_instance)
else:
self.logger.info("number instances to threshold, no scale!")
| {
"repo_name": "cuongnb14/autoscaling-paas",
"path": "project/autoscaling/autoscaling/scaler.py",
"copies": "1",
"size": "1070",
"license": "mit",
"hash": 8568042082940434000,
"line_mean": 38.6296296296,
"line_max": 88,
"alpha_frac": 0.6224299065,
"autogenerated": false,
"ratio": 3.7152777777777777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4837707684277778,
"avg_score": null,
"num_lines": null
} |
class dstat_plugin(dstat):
"""
EVENTS COMMENT.
"""
def __init__(self):
self.name = 'latency'
self.type = 'p'
self.width = 4
self.scale = 34
def vars(self):
ret = []
for name in glob.glob('/tmp/dstat/sla/client[0-9]*'):
ret.append(os.path.basename(name))
ret.sort()
return ret
def nick(self):
return [name.lower() for name in self.vars]
def extract(self):
for csla in self.vars:
f = open('/tmp/dstat/sla/'+csla+'/latency', 'r')
st = f.readline().rstrip()
if(st == ""):
self.val[csla] = 0
else:
self.val[csla] = float(st)
f.close()
def check(self):
for csla in glob.glob('/tmp/dstat/sla/client[0-9]*'):
if not os.access(csla+'/latency', os.R_OK):
raise Exception, 'Cannot access latency %s information' % os.path.basename(csla)
# vim:ts=4:sw=4:et
| {
"repo_name": "barzan/dbseer",
"path": "middleware_old/dstat_for_server/plugins/dstat_client_latency.py",
"copies": "4",
"size": "1026",
"license": "apache-2.0",
"hash": -3124171144379731500,
"line_mean": 24.65,
"line_max": 96,
"alpha_frac": 0.5019493177,
"autogenerated": false,
"ratio": 3.320388349514563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5822337667214563,
"avg_score": null,
"num_lines": null
} |
#pads with spaces and returns string,so it can be divided exactly by a certain amount
def sim_pad(MESSAGE, PAD_TO_MULT):
MESSAGE = MESSAGE + "1"
pad = (((len(MESSAGE)/PAD_TO_MULT)+1)*PAD_TO_MULT)-len(MESSAGE)
for i in range(pad):
MESSAGE = MESSAGE + " "
return MESSAGE
#pads a string so it can be divided exactly by a number but with a choosen charcater
#also returns both the padded string and value of how many characters it has paded by, needed for unpadding
#returns two values in a tuple
def sim_pad_with_char(MESSAGE, PAD_TO_MULT, CHAR_TO_PAD_WITH):
if len(CHAR_TO_PAD_WITH)== 1:
pad = (((len(MESSAGE)/PAD_TO_MULT)+1)*PAD_TO_MULT)-len(MESSAGE)
for i in range(pad):
MESSAGE = MESSAGE + CHAR_TO_PAD_WITH
return (MESSAGE, pad)
else:
raise ValueError('ValueError: legnth of CHAR_TO_PAD_WITH must only be 1')
#pads a string with a certain character and certain amount
def sim_pad_with_amount(MESSAGE, CHAR_TO_PAD_WITH, PAD_AMOUNT):
for i in range(PAD_AMOUNT):
MESSAGE = MESSAGE + CHAR_TO_PAD_WITH
return (MESSAGE)
#unpads a padded string
def sim_unpad(PADDED_MES):
PADDED_MES = PADDED_MES.rstrip()[:-1]
return PADDED_MES
#unpads a string that is padded with a certain character
#requires how many extra characters the string has been padded with
def sim_unpad_with_char(PADDED_MES, PAD_AMOUNT):
PAD_AMOUNT = PAD_AMOUNT*-1
PADDED_MES = PADDED_MES[:PAD_AMOUNT]
return PADDED_MES
#unpad a string based on the amount it was padded by
def sim_unpad_with_amount(PADDED_MES, PAD_AMOUNT):
PAD_AMOUNT = PAD_AMOUNT*-1
PADDED_MES = PADDED_MES[:PAD_AMOUNT]
return PADDED_MES
| {
"repo_name": "Curiousteenager/simplepad",
"path": "simplepad.py",
"copies": "1",
"size": "1915",
"license": "mit",
"hash": -2354845574991672000,
"line_mean": 35.5490196078,
"line_max": 134,
"alpha_frac": 0.6804177546,
"autogenerated": false,
"ratio": 3.0837359098228663,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4264153664422866,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Curtis Mackay'
pe_index = 0;
import itertools
class PE(object):
"""
Data structure for storing PE dependency data
"""
newid = itertools.count().__next__
def __init__(self, operation, pe_inputs, pe_outputs, pe_id=None):
self.operation = operation
self.pe_inputs = pe_inputs
self.pe_outputs = pe_outputs
if pe_id is None:
self.pe_id = PE.newid()
else:
self.pe_id = pe_id
def __repr__(self):
return "PE %s" % self.pe_id
# return "PE inputs:%s, outputs:%s" % (self.pe_inputs, self.pe_outputs)
def __str__(self):
return "PE inputs:%s, outputs:%s" % (self.pe_inputs, self.pe_outputs)
def compress_mapping(pe_list, net_map):
for pe in pe_list:
for i in range(len(pe.pe_inputs)):
for net_set in net_map:
if pe.pe_inputs[i] in net_set:
pe.pe_inputs[i] = net_set[0]
for i in range(len(pe.pe_outputs)):
for net_set in net_map:
if pe.pe_outputs[i] in net_set:
pe.pe_outputs[i] = net_set[0]
def create_dependency_list(pe_list):
dep_list = list()
for pe in pe_list:
for pe_dep_candidate in pe_list:
input_set = set(pe.pe_inputs)
output_set = set(pe_dep_candidate.pe_outputs)
if input_set & output_set:
new_dependency = True
for dep in dep_list:
if dep[-1] is pe_dep_candidate:
dep.append(pe)
new_dependency = False
if new_dependency:
dep_list.append([pe_dep_candidate, pe])
# Add in any PE's that have no dependency
for pe in pe_list:
# optimized way to flatten list
flattened_dep_list = [item for sublist in dep_list for item in sublist]
if pe not in set(flattened_dep_list):
dep_list.append([pe])
return dep_list | {
"repo_name": "crnmrn/SPADE-ARCH",
"path": "compiler/dpda_compiler/parser/dependency_tracker.py",
"copies": "1",
"size": "1991",
"license": "bsd-3-clause",
"hash": 392705441356094300,
"line_mean": 31.6557377049,
"line_max": 79,
"alpha_frac": 0.5364138624,
"autogenerated": false,
"ratio": 3.555357142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9572226805638843,
"avg_score": 0.003908839923660002,
"num_lines": 61
} |
__author__ = 'cutylewiwi'
# -*- coding:utf-8 -*-
import urllib
import urllib2
import re
class Group:
def __init__(self, name, faculties):
self.name = name
self.faculties = faculties
class GroupList:
def __init__(self, baseUrl):
self.baseUrl = baseUrl
self.groups = []
def getPage(self, _url):
try:
url = _url
request = urllib2.Request(url)
response = urllib2.urlopen(request)
# print response.read()
return response
except urllib2.URLError, e:
if hasattr(e,"reason"):
print u"Faild to get Prof List at University of Delaware",e.reason
return None
def getGroupList(self):
page = self.getPage(self.baseUrl)
regex = '<article>(.*?)</article>'
myItems = re.findall(regex, page.read(), re.S)
# print myItems
tmpStr = myItems[0]
regex = '<li>\n<h3.*?>(.*?)</h3>.*?</ul>.*?<p.*?>(.*?)</p>(.*?)</p>'
myItems = re.findall(regex, tmpStr, re.S)
print len(myItems)
print myItems
return
for item in myItems:
# print item
GroupName = item[0]
regex = '<a.*?>(.*?)</a>'
GroupFaculties = re.findall(regex, item[1], re.S)
self.groups.append(Group(GroupName, GroupFaculties))
# print ProfName
# print ProfPhotoUrl
# print ProfPUrl
# print ProfTitle
# print ProfArea
# print ProfOffice
# print " "
# self.profs.append(Prof(ProfName, ProfPhotoUrl, ProfPUrl, ProfTitle, ProfArea, ProfOffice))
def outPutProf(self):
result = "<?xml version=\"1.0\" ?>\n\t<institution>\n"
self.getGroupList()
for group in self.groups:
result += "\t\t<research>\n"
result += "\t\t\t<groupname>%s</groupname>\n" % (group.name)
for faculty in group.faculties:
tmpStr = faculty
if tmpStr[:3] == "Dr.":
tmpStr = tmpStr[4:]
result += "\t\t\t<professorname>%s</professorname>\n" % (tmpStr)
result += "\t\t</research>\n"
result += "\t</institution>\n"
# print result
file = open("UTA_research.xml","w")
file.writelines(result)
baseURL = 'https://cse.uta.edu/research/research-areas.php'
pl = GroupList(baseURL)
# pl.getPage()
pl.outPutProf()
# pl.getGroupList()
| {
"repo_name": "doge-search/webdoge",
"path": "wuyudong/UTA/UTA_research.py",
"copies": "1",
"size": "2593",
"license": "unlicense",
"hash": 7706575861810142000,
"line_mean": 30.012345679,
"line_max": 104,
"alpha_frac": 0.5106054763,
"autogenerated": false,
"ratio": 3.5963938973647713,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46069993736647713,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cutylewiwi'
# -*- coding:utf-8 -*-
import urllib
import urllib2
import re
class Prof:
def __init__(self, name, photoUrl, pUrl, title, area, office, phone, email):
self.name = name
self.photoUrl = photoUrl
self.pUrl = pUrl
self.title = title
self.area = area
self.office = office
self.phone = phone
self.email = email
class ProfList:
def __init__(self, baseUrl):
self.baseUrl = baseUrl
self.profs = []
def getPage(self):
try:
url = self.baseUrl
request = urllib2.Request(url)
response = urllib2.urlopen(request)
# print response.read()
return response
except urllib2.URLError, e:
if hasattr(e,"reason"):
print u"Faild to get Prof List at KSU",e.reason
return None
def getProfList(self):
page = self.getPage()
# regex = '<div class=\"wdn-grid-set\">.*?<a class=\"wdn-button\" title=\"Web page for.*?\" href=\"(.*?)\".*?<strong>(.*?)</strong>.*?<div class=\"gs-fac-rsch\">(.*?)(<br />)?</div>'
# regex = '<div id="node-2964" class="node node-type-page build-mode-full clearfix">.*?<div class="content">.*?<div>(.*?)<div>.*?<div>.*?</div>'
regex = '<table class="views-view-grid col-2" summary="Faculty of the CIS Department">(.*?)</table>'
myItems = re.findall(regex, page.read(), re.S)
# print myItems[0]
# return
for item in myItems:
tmpStr = item
regex = '<div class="picture">.*?<img.*?src="(.*?)".*?>.*?</div>'
myTokens = re.findall(regex, tmpStr, re.S)
# print myTokens
ProfPhotoUrl = myTokens[0]
regex = '<div class="views-field-value">.*?<a.*?>(.*?)</a>.*?</div>'
myTokens = re.findall(regex, tmpStr, re.S)
ProfName = myTokens[0]
regex = '<div class="views-field-value-1">.*?<b>(.*?)</b>.*?</div>'
myTokens = re.findall(regex, tmpStr, re.S)
ProfTitle = myTokens[0]
regex = '<div class="views-field-name">.*?<a.*?>(.*?)</a>.*?</div>'
myTokens = re.findall(regex, tmpStr, re.S)
ProfPUrl = myTokens[0]
ProfArea = ""
regex = '<div class="views-field-value-2">.*?<span class="field-content">(.*?)</span>.*?</div>'
myTokens = re.findall(regex, tmpStr, re.S)
ProfOffice = myTokens[0]
regex = '<div class="views-field-value-3">.*?<span class="field-content">(.*?)</span>.*?</div>'
myTokens = re.findall(regex, tmpStr, re.S)
ProfPhone = myTokens[0]
regex = '<div class="views-field-mail">.*?<a.*?>(.*?)</a>.*?</div>'
myTokens = re.findall(regex, tmpStr, re.S)
ProfEmail = myTokens[0]
# print ProfName
# print ProfPhotoUrl
# print ProfPUrl
# print ProfTitle
# print ProfArea
# print ProfOffice
# print ProfPhone
# print ProfEmail
# print " "
self.profs.append(Prof(ProfName, ProfPhotoUrl, ProfPUrl, ProfTitle, ProfArea, ProfOffice, ProfPhone, ProfEmail))
def outPutProf(self):
result = "<?xml version=\"1.0\" ?>\n\t<institution>\n"
self.getProfList()
for prof in self.profs:
result += "\t\t<professor>\n"
result += "\t\t\t<name>%s</name>\n" % (prof.name)
result += "\t\t\t<title>%s</title>\n" % (prof.title)
result += "\t\t\t<office>%s</office>\n" % (prof.office)
result += "\t\t\t<email>%s</email>\n" % (prof.email)
result += "\t\t\t<phone>%s</phone>\n" % (prof.phone)
result += "\t\t\t<website>%s</website>\n" % (prof.pUrl)
result += "\t\t\t\n" % (prof.photoUrl)
result += "\t\t</professor>\n"
result += "\t</institution>\n"
# print result
fileName = "KSU.xml"
outputDir = "result"
file = open(fileName,"w")
file.writelines(result)
baseURL = 'https://www.cis.ksu.edu/people/current-faculty'
pl = ProfList(baseURL)
pl.outPutProf()
# pl.getPage()
# pl.getProfList()
| {
"repo_name": "doge-search/webdoge",
"path": "wuyudong/KSU/KSU.py",
"copies": "1",
"size": "4385",
"license": "unlicense",
"hash": 9212551889555674000,
"line_mean": 38.2293577982,
"line_max": 190,
"alpha_frac": 0.5069555302,
"autogenerated": false,
"ratio": 3.404503105590062,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9397932742735269,
"avg_score": 0.0027051786109585888,
"num_lines": 109
} |
__author__ = 'cutylewiwi'
# -*- coding:utf-8 -*-
import urllib
import urllib2
import re
class Prof:
def __init__(self, name, photoUrl, pUrl, title, area, office):
self.name = name
self.photoUrl = photoUrl
self.pUrl = pUrl
self.title = title
self.area = area
self.office = office
class ProfList:
def __init__(self, baseUrl):
self.baseUrl = baseUrl
self.profs = []
def getPage(self):
try:
url = self.baseUrl
request = urllib2.Request(url)
response = urllib2.urlopen(request)
# print response.read()
return response
except urllib2.URLError, e:
if hasattr(e,"reason"):
print u"Faild to get Prof List at University of Delaware",e.reason
return None
def getProfList(self):
page = self.getPage()
#regex = '<div class=\"one-third column\">.*?<li>(.*?)</li>.*?</div>'
regex = '<div class="one-third column clear">.*?<h2>Full-Time</h2>.*?<ul>(.*?)</ul>.*?</div>'
#regex = '<tr>.*?<img src=\"(.*?)\".*?</tr>'
myItems = re.findall(regex, page.read(), re.S)
tmpStr = myItems[0]
# print tmpStr
regex = '<li>.*?<a.*?href="(.*?)">(.*?)</a>.*?</li>'
myItems = re.findall(regex, tmpStr, re.S)
# print myItems
# return
for item in myItems:
# print item
ProfName = item[1]
ProfPhotoUrl = ""
ProfPUrl = "http://www.eecs.utk.edu/people/faculty" + item[0]
ProfTitle = ""
ProfArea = ""
ProfOffice = ""
# print ProfName
# print ProfPhotoUrl
# print ProfPUrl
# print ProfTitle
# print ProfArea
# print ProfOffice
# print " "
self.profs.append(Prof(ProfName, ProfPhotoUrl, ProfPUrl, ProfTitle, ProfArea, ProfOffice))
def outPutProf(self):
result = "<?xml version=\"1.0\" ?>\n\t<institution>\n"
self.getProfList()
for prof in self.profs:
result += "\t\t<professor>\n"
result += "\t\t\t<name>%s</name>\n" % (prof.name)
result += "\t\t\t<title>%s</title>\n" % (prof.title)
result += "\t\t\t<office>%s</office>\n" % (prof.office)
result += "\t\t\t<email></email>\n"
result += "\t\t\t<phone></phone>\n"
result += "\t\t\t<website>%s</website>\n" % (prof.pUrl)
result += "\t\t\t\n" % (prof.photoUrl)
result += "\t\t</professor>\n"
result += "\t</institution>\n"
# print result
file = open("UTK.xml","w")
file.writelines(result)
baseURL = 'http://www.eecs.utk.edu/people/faculty/'
pl = ProfList(baseURL)
# pl.getPage()
pl.outPutProf()
# pl.getProfList()
| {
"repo_name": "doge-search/webdoge",
"path": "wuyudong/UTK/UTK.py",
"copies": "1",
"size": "2980",
"license": "unlicense",
"hash": -5998241134554957000,
"line_mean": 31.4831460674,
"line_max": 102,
"alpha_frac": 0.4946308725,
"autogenerated": false,
"ratio": 3.401826484018265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43964573565182646,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cvdsouza'
#Developed at : SEFCOM Labs by JJ SEO and Clinton Dsouza
from bluetooth import *
import threading
class MyThread (threading.Thread):
def __init__ (self, socket):
self.socket = socket
threading.Thread.__init__(self)
def run (self):
try:
str = raw_input('Enter your input:')
self.socket.send (str)
except:
self.socket.close()
print "socket close"
server_sock = BluetoothSocket(RFCOMM)
server_sock.bind(("", PORT_ANY))
server_sock.listen(1)
port = server_sock.getsockname()[1]
uuid = "a60f35f0-b93a-11de-8a39-08002009c666"
#uuid = "04c6093b-0000-1000-8000-00805f9b34fb"
#uuid = "84f39d29-7d6d-437d-973b-fba39e49d4ee"
advertise_service(server_sock, "SampleServer", service_id=uuid,
service_classes=[uuid, SERIAL_PORT_CLASS], profiles=[SERIAL_PORT_PROFILE],
# protocols = [ OBEX_UUID ]
)
print("Waiting for connection on RFCOMM channel %d" % port)
client_sock, client_info = server_sock.accept()
print("Accepted connection from ", client_info)
try:
while True:
MyThread(client_sock).start()
data = client_sock.recv(1024)
if len(data) == 0:
break
print("received [%s]" % data)
with open("STS.json","a") as myfile:
myfile.write('\n'+data)
except IOError:
pass
print("disconnected")
client_sock.close()
server_sock.close()
print("all done")
| {
"repo_name": "jtromo/ASU-Thesis-RaspberryPiSmartKey",
"path": "RaspberryPiSmartKey/.sync/Archive/PyBlueServer.py",
"copies": "1",
"size": "1412",
"license": "apache-2.0",
"hash": 2074202821976899000,
"line_mean": 23.7719298246,
"line_max": 92,
"alpha_frac": 0.652266289,
"autogenerated": false,
"ratio": 2.966386554621849,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4118652843621849,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cvl'
def numberic_system(base_system):
result = {}
for csc_n in base_system:
result[csc_n] = base_system.find(csc_n)
return result
class CSC(object):
system = {}
def __init__(self, alphabet):
self.system = numberic_system(alphabet)
def csc(self, sym):
result = ''
for s in sym:
result += str(self.system[s])
return result
def r_csc(self, num):
for key in self.system.keys():
if self.system[key] == int(num):
return key
return 'out_of_range'
def increment(self, csc_number):
csc_len = len(csc_number)
i = 0
while 1:
if i > csc_len:
csc_number += '0'
if i == csc_len:
csc_number += '0'
break
num = csc_number[i]
if num in self.system.keys():
csc_result = self.r_csc(int(self.csc(num)) + 1)
if csc_result != 'out_of_range':
csc_number = csc_number[:i] + csc_result + csc_number[i + 1:]
break
else:
csc_number = csc_number[:i] + '0' + csc_number[i + 1:]
i += 1
else:
csc_number = csc_number[:i] + '0' + csc_number[i + 1:]
i += 1
return csc_number
| {
"repo_name": "cludtk/freedomain",
"path": "csc.py",
"copies": "1",
"size": "1394",
"license": "mit",
"hash": 4554861451716396000,
"line_mean": 26.3333333333,
"line_max": 81,
"alpha_frac": 0.449784792,
"autogenerated": false,
"ratio": 3.7074468085106385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46572316005106384,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cwhelan'
# converts sam format to modil format
# only works on data from one chromsome for now, so please split first!
import sys
current_read_id = "NA"
read1_alignments = {}
read2_alignments = {}
def print_reads(read1_alignments, read2_alignments):
for p1 in read1_alignments:
for p2 in read2_alignments:
if abs(p1-p2) <= 25000:
a1 = read1_alignments[p1]
a2 = read2_alignments[p2]
if p1 < p2:
left_read = a1
right_read = a2
else:
left_read = a2
right_read = a1
left_fields = left_read.split("\t")
right_fields = right_read.split("\t")
left_flag = int(left_fields[1])
right_flag = int(right_fields[1])
if not left_flag & 0x10 and right_flag & 0x10:
left_read_start = int(left_fields[3])
left_read_end = left_read_start + len(left_fields[9])
right_read_start = int(right_fields[3])
right_read_end = right_read_start + len(right_fields[9])
span_of_mapping = right_read_end - left_read_start
print "\t".join(map(str, [left_read_end, right_read_start, left_fields[0], lib_insert_size - span_of_mapping, lib_insert_size, right_fields[0]]))
lib_insert_size = int(sys.argv[1])
for line in sys.stdin:
fields = line.split("\t")
read_pair_name = fields[0]
if read_pair_name != current_read_id:
if current_read_id != "NA":
print_reads(read1_alignments, read2_alignments)
read1_alignments = {}
read2_alignments = {}
current_read_id = read_pair_name
flag = int(fields[1])
pos = int(fields[3])
if flag & 0x40:
read1_alignments[pos] = line
else:
read2_alignments[pos] = line
print_reads(read1_alignments, read2_alignments)
| {
"repo_name": "cwhelan/cloudbreak",
"path": "src/main/scripts/nameSortedSamToMODIL.py",
"copies": "1",
"size": "1992",
"license": "mit",
"hash": -2189905813125169000,
"line_mean": 36.5849056604,
"line_max": 165,
"alpha_frac": 0.546184739,
"autogenerated": false,
"ratio": 3.393526405451448,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9437362840591652,
"avg_score": 0.0004696607719593218,
"num_lines": 53
} |
__author__ = 'CwT'
from AbstractAnalysis import AbstractFlowAnalysis
class GlobalAnalysis(AbstractFlowAnalysis):
def __init__(self):
self._globalvars = {}
AbstractFlowAnalysis.__init__(self)
@property
def GlobalSet(self):
return self._globalvars
def flowTransfer(self, block):
for instruction in block.insns:
handler, var = instruction.isGlobal()
if handler == 'register':
if var not in self._globalvars.keys():
self._globalvars[var] = []
elif handler == 'check':
for each in var:
start = each.find('<$')
name = each[start+2:-1]
if name in self._globalvars.keys() and \
each not in self._globalvars[name]:
self._globalvars[name].append(each)
def IgnoreFunc(self):
if self._graph._name == 'main':
return True
return False
def printData(self):
print(self._globalvars)
| {
"repo_name": "Panalyzer/Panalyzer",
"path": "src/GlobalAnalysis.py",
"copies": "1",
"size": "1063",
"license": "mit",
"hash": 741299972539474000,
"line_mean": 31.2121212121,
"line_max": 63,
"alpha_frac": 0.5305738476,
"autogenerated": false,
"ratio": 4.621739130434783,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006887052341597796,
"num_lines": 33
} |
__author__ = 'CwT'
from DexParse import *
array = [0x32, 0xa, # if-eq v0, v0, 8
0x26, 0x3, 0x0, # fill-array-data v0, 3
0x0300, 0x2, 0x1, 0x0, 0x0 # fill-array-data-payload 0x0300, width, size, data[]
]
array2 = [0x32, 0xd, # if-eq v0, v0, 8
0x1023, 0x06d7, # new-array v0, v1, typeid
0x26, 0x3, 0x0, # fill-array-data v0, 3
0x0300, 0x1, 0x2, 0x0, 0x0, # fill-array-data-payload 0x0300, width, size, data[]
]
def addJunkopcode(dexfile, class_name, method_name):
tmp_method = dexfile.getmethodItem(class_name, method_name)
coderef = tmp_method["method"].coderef
dexfile.verifyclass(tmp_method["defidx"])
index = dexfile.gettypeid("[B")
if index < 0:
index = dexfile.gettypeid("[C")
if index < 0:
pass
print("find index: ", index)
for i in range(0, len(array2)):
coderef.insns.insert(i, array2[i])
num = coderef.insns_size * 2 # byte number
coderef.insns[3] = index
coderef.insns[9] = num & 0xffff
coderef.insns[10] = (num >> 16) & 0xffff
coderef.insns_size += 12
def modifyopcode(dexfile, class_name, method_name):
tmp_method = dexfile.getmethodItem(class_name, method_name)
coderef = tmp_method["method"].coderef
coderef.insns[4] = 0x3a
if __name__ == '__main__':
dexfile = DexFile("classes.dex")
# modifyopcode(dexfile, "Lcom/cc/test/MainActivity;", "onCreate")
addJunkopcode(dexfile, "Lcom/cc/test/MainActivity;", "onCreateOptionsMenu")
dexfile.copytofile("classescp.dex")
| {
"repo_name": "CvvT/DexParse",
"path": "AddJunk.py",
"copies": "2",
"size": "1572",
"license": "apache-2.0",
"hash": 1338007297107412500,
"line_mean": 36.4285714286,
"line_max": 92,
"alpha_frac": 0.6164122137,
"autogenerated": false,
"ratio": 2.673469387755102,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42898816014551017,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CwT'
from ForwardFlowAnalysis import ForwardFlowAnalysis
class SSAliasAnalysis(ForwardFlowAnalysis):
def __init__(self):
self._AliasSetBefore = {}
self._AliasSetAfter = {}
ForwardFlowAnalysis.__init__(self)
def flowTransfer(self, block):
set = self.getAliasSet(block.Id)
out = dict(set)
vars = self._graph.getTypeMap()
for instruction in block.insns:
a, b = instruction.getAlias(vars)
if a:
self.eliminate(a, out)
self.add(a, b, out)
result = self.getAliasSet(block.Id, False)
if len(result['Must']) == len(out['Must']) and \
len(result['May']) == len(out['May']):
return False
for entity in ['Must', 'May']:
result[entity][:] = out[entity]
return True
def add(self, vara, varb, set):
for entity in ['Must', 'May']:
additems = []
for pair in set[entity]:
if varb in pair:
other = pair[0] if pair[0] != varb else pair[1]
if (vara, other) not in set[entity] or (other, vara) not in set[entity]:
additems.append((vara, other))
for pair in additems:
set[entity].append(pair)
if (vara, varb) not in set['Must'] or (varb, vara) not in set['Must']:
set['Must'].append((vara, varb))
def eliminate(self, var, set):
for entity in ['Must', 'May']:
Godie = []
for pair in set[entity]:
if var in pair:
Godie.append(pair)
for pair in Godie:
set[entity].remove(pair)
Godie[:] = []
# Merge: Intersection
def mergeInto(self, sourceBlock, destBlock):
sourceSet = self.getAliasSet(sourceBlock.Id)
destSet = self.getAliasSet(destBlock.Id, False)
for pair in destSet['Must']:
reversed = (pair[1], pair[0])
if pair not in sourceSet['Must'] and reversed not in sourceSet['Must']:
destSet['May'] += [pair]
destSet['Must'].remove(pair)
for pair in sourceSet['May']:
reserved = (pair[1], pair[0])
if pair not in destSet and reserved not in destSet:
destSet['May'].append(pair)
def copy(self, sourceBlock, destBlock):
# destFlow and sourceFLow must be set
# copy to
destSet = self.getAliasSet(destBlock.Id)
srcSet = self.getAliasSet(sourceBlock.Id, False)
for entity in ['Must', 'May']:
destSet[entity][:] = srcSet[entity]
def getAliasSet(self, Id, BeforeOrAfter=True):
Target = self._AliasSetBefore if BeforeOrAfter else self._AliasSetAfter
if Id not in Target.keys():
Target[Id] = dict({'Must': [], 'May': []})
return Target[Id]
def printData(self):
print "Function Alias:"
for key, value in self._AliasSetAfter.items():
print(key)
print(" Must Alias Pair")
print " ", value['Must']
print(" May Alias Pair")
print " ", value['May']
| {
"repo_name": "Panalyzer/Panalyzer",
"path": "src/AliasAnalysis.py",
"copies": "1",
"size": "3222",
"license": "mit",
"hash": 1238581724217521700,
"line_mean": 36.4651162791,
"line_max": 92,
"alpha_frac": 0.5301055245,
"autogenerated": false,
"ratio": 3.900726392251816,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.987005373519344,
"avg_score": 0.01215563631167532,
"num_lines": 86
} |
__author__ = 'CwT'
from ForwardFlowAnalysis import ForwardFlowAnalysis
class SSAliasAnalysis(ForwardFlowAnalysis):
def __init__(self):
self._AliasSet = dict({'Must': [], 'May': []})
ForwardFlowAnalysis.__init__(self)
@property
def AliasSet(self):
return self._AliasSet
def flowTransfer(self, block):
set = self._AliasSet
vars = self._graph.getTypeMap()
len_must, len_may = len(set['Must']), len(set['May'])
for instruction in block.insns:
a, b = instruction.getAlias(vars)
if a:
self.eliminate(a, set)
self.add(a, b, set)
if len(set['Must']) == len_must and \
len(set['May']) == len_may:
return False
return True
def add(self, vara, varb, set):
for entity in ['Must', 'May']:
additems = []
for pair in set[entity]:
if varb in pair:
other = pair[0] if pair[0] != varb else pair[1]
if (vara, other) not in set[entity] or (other, vara) not in set[entity]:
additems.append((vara, other))
for pair in additems:
set[entity].append(pair)
if (vara, varb) not in set['Must'] or (varb, vara) not in set['Must']:
set['Must'].append((vara, varb))
def eliminate(self, var, set):
for entity in ['Must', 'May']:
Godie = []
for pair in set[entity]:
if var in pair:
Godie.append(pair)
for pair in Godie:
set[entity].remove(pair)
Godie[:] = []
# Merge: Intersection
def mergeInto(self, sourceBlock, destBlock):
pass
def copy(self, sourceBlock, destBlock):
pass
def printData(self):
print "Function Alias:"
for key, value in self._AliasSet.items():
print(key)
print(" Must Alias Pair")
print " ", value
print(" May Alias Pair")
print " ", value
| {
"repo_name": "Panalyzer/Panalyzer",
"path": "src/SSAliasAnalysis.py",
"copies": "1",
"size": "2103",
"license": "mit",
"hash": -6958093383351475000,
"line_mean": 31.3538461538,
"line_max": 92,
"alpha_frac": 0.504041845,
"autogenerated": false,
"ratio": 3.938202247191011,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4942244092191011,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CwT'
from ForwardFlowAnalysis import ForwardFlowAnalysis
class SSALiteralAnalysis(ForwardFlowAnalysis):
def __init__(self):
self._LiteralSet = {}
ForwardFlowAnalysis.__init__(self)
@property
def varMaps(self):
return self._LiteralSet
def flowTransfer(self, block):
set = self._LiteralSet
changed = False
for instruction in block.insns:
name, types = instruction.getType(set)
if name:
if name not in set.keys() or \
type(types) != type(set[name]):
changed = True
set[name] = types
return changed
def getType(self, name):
if name in self._LiteralSet:
return self._LiteralSet[name]
return None
def mergeInto(self, sourceBlock, destBlock):
pass
def copy(self, sourceBlock, destBlock):
pass
def printData(self):
print "Function Type:"
for key, value in self._LiteralSet.items():
print key, value.type
| {
"repo_name": "Panalyzer/Panalyzer",
"path": "src/SSALiteralAnalysis.py",
"copies": "1",
"size": "1081",
"license": "mit",
"hash": 6381821593756199000,
"line_mean": 26.7179487179,
"line_max": 55,
"alpha_frac": 0.5689176688,
"autogenerated": false,
"ratio": 4.466942148760331,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.553585981756033,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CwT'
from Model import Node
import Queue
class InterProcTaintAnalysis(object):
def __init__(self):
self._graph = None
self._FuncExist = {}
self._FuncParams = {}
self._SrcNodes = []
self._TargetNodes = []
self._Counter = 0
# Tainted Src Op and Var
self.taintedSrcOp = ['Expr_ArrayDimFetch']
self.taintedSrcVar = ['<$_GET>', '<$_POST>', '<$_REQUEST>','<$_COOKIE>', '<$_SESSION>', '<$_FILES>', '<$_ENV>']
self.tgtToBeTaintedOp = ['Terminal_Echo','Expr_Print', 'Expr_FuncCall']
self.tgtToBeTaintedName = ['LITERAL(\'printf\')','LITERAL(\'print_r\')']
self.taintedSrcMysql = ['LITERAL(\'mysql_query\')', 'LITERAL(\'mysqli_query\')']
# InterProcAnalysis.__init__(self)
def doAnalysis(self, SuperGraph):
self._graph = SuperGraph
self.work(self.MarkTaintedSource)
self.work(self.MarkTaintTarget)
for value in self._graph.subgraphs.values():
self.AnalysisFunction(value)
self.SearchPath()
def AnalysisFunction(self, func):
workList = Queue.Queue()
workList.put(func.Entry)
while not workList.empty():
block = workList.get()
changed = self.flowthrough(func, block)
if changed:
for item in block.successors:
workList.put(item)
def assignId(self):
self._Counter += 2
return self._Counter
def addNode(self, node):
self._graph.addNode(node)
def addEdge(self, srcFunc, srcVar, tgtFunc, tgtVar):
return self._graph.addEdge(srcFunc, srcVar, tgtFunc, tgtVar)
def addTargetNodeToEdge(self, srcFunc, srcVar, tgtNode, EdgeId):
return self._graph.addTargetNodeToEdge(srcFunc, srcVar, tgtNode, EdgeId)
def addSourceNodeToEdge(self, srcNode, tgtFunc, tgtVar, EdgeId):
return self._graph.addSourceNodeToEdge(srcNode, tgtFunc, tgtVar, EdgeId)
def printData(self):
print str(self._graph)
def work(self, handler):
for value in self._graph.subgraphs.values():
for block in value.blocks.values():
handler(value, block)
def MarkTaintedSource(self, function, block):
for instruction in block.insns:
line = instruction.Data
op = line['op']
if 'var' in line.keys() and \
op in self.taintedSrcOp:
for srcVar in self.taintedSrcVar:
if srcVar in line['var']:
# node = Node(line['var'], function.Name, Node.SOURCE)
node = self.FindOrCreateNode(function.Name, line['var'], Node.SOURCE)
self._SrcNodes.append(node) # add src node
self.addNode(node)
elif op == 'Expr_FuncCall': # and \
# line['name'] in self.taintedSrcMysql:
self.addNode(Node(line['result'], function.Name, Node.RESULT))
elif op == 'Expr_Param':
param = self.FindOrCreateNode(function.Name, line['result'], Node.PARAMETER)
# param = Node(line['result'], function.Name, Node.PARAMETER)
if function.Name not in self._FuncParams.keys():
self._FuncParams[function.Name] = []
self._FuncParams[function.Name].append(param) # add in order
self.addNode(param)
elif op == 'Terminal_Return':
retValue = line['expr']
if retValue.startswith('Var#'):
# node = Node(retValue, function.Name, Node.RETURN)
node = self.FindOrCreateNode(function.Name, retValue, Node.RETURN)
if function.Name not in self._FuncExist.keys():
self._FuncExist[function.Name] = []
self._FuncExist[function.Name].append(node)
self.addNode(node)
def MarkTaintTarget(self, function, block):
for instruction in block.insns:
line = instruction.Data
op = line['op']
if op in self.tgtToBeTaintedOp:
if 'name' in line.keys(): # Expr_FuncCall
if line['name'] in self.tgtToBeTaintedName or \
line['name'] in self.taintedSrcMysql:
for key in line.keys():
if line[key].startswith('args'):
node = self.FindOrCreateNode(function.Name, line[key])
self._TargetNodes.append(node) # add target node
else: # Terminal_Echo, Expr_Print
node = self.FindOrCreateNode(function.Name, line['expr'])
self._TargetNodes.append(node) # add target node
def FindOrCreateNode(self, funcName, funcVar, type=Node.TARGET):
return self._graph.FindOrCreateNode(funcName, funcVar, type)
def flowthrough(self, function, block):
changed = False
for instruction in block.insns:
# Mark tainted source
line = instruction.Data
# handle function call
if self.handleFunctionCall(function, line):
continue
# Taint propagation
srcSet, tgtSet = self.taintLine(line)
for each in srcSet:
for target in tgtSet:
if self.addEdge(function.Name, each, function.Name, target):
changed = True
return changed
def handleFunctionCall(self, function, line):
if 'op' in line.keys() and line['op'] == 'Expr_FuncCall':
funcName = line['name'][9:-2] + "():"
func = self._graph.getFuncByName(funcName)
if func: # function exists
id = self.assignId()
for key in line.keys():
if key.startswith('args'): # Note edge: main:var -> test():arg[0]
number = int(key[5:-1])
self.addTargetNodeToEdge(function.Name, line[key], self._FuncParams[funcName][number], id)
# TO-DO add backward edge
if funcName in self._FuncExist.keys():
for existNode in self._FuncExist[funcName]: # positive number for caller, and negative number for callee
self.addSourceNodeToEdge(existNode, function.Name, line['result'], -id)
return True
return False
def taintLine(self, line):
srcSet = set()
tgtSet = set()
if line.has_key('op'):
op = line['op']
if op == 'Expr_ArrayDimFetch':
srcSet.add(line['var'])
srcSet.add(line['dim'])
tgtSet.add(line['result'])
elif op == 'Expr_Assign':
srcSet.add(line['expr'])
tgtSet.add(line['var'])
elif op in ['Expr_BinaryOp_Smaller', 'Expr_BinaryOp_Equal', 'Expr_BinaryOp_Greater', 'Expr_BinaryOp_SmallerOrEqual', 'Expr_BinaryOp_GreaterOrEqual']:
pass
elif op in ['Expr_BinaryOp_BitwiseAnd', 'Expr_BinaryOp_BitwiseOr', 'Expr_BinaryOp_Xor']:
pass
elif op.startswith('Expr_BinaryOp'):
srcSet.add(line['left'])
srcSet.add(line['right'])
tgtSet.add(line['result'])
elif op.startswith('Expr_Cast'):
srcSet.add(line['expr'])
tgtSet.add(line['result'])
elif op == 'Expr_FuncCall':
for key in line.keys():
if key.startswith('args'):
srcSet.add(line[key])
tgtSet.add(line['result'])
elif op == 'Phi':
for key in line.keys():
if key.startswith('chose'):
srcSet.add(line[key])
tgtSet.add(line['result'])
elif op in ['Stmt_Jump', 'Stmt_JumpIf']:
pass
elif op.startswith('Terminal'):
pass
elif op == 'Expr_Print':
srcSet.add(line['expr'])
tgtSet.add(line['result'])
else:
if line.has_key('result'):
for key in line.keys():
if key != 'result' and key != 'op' and line[key].startswith('Var#'):
srcSet.add(line[key])
tgtSet.add(line['result'])
if None in srcSet:
srcSet.remove(None)
if None in tgtSet:
tgtSet.remove(None)
return srcSet, tgtSet
# Calculate reachability by depth-first search
def SearchPath(self):
for src in self._SrcNodes:
targets = []
self.DFSearch(src, [], [], targets)
if len(targets) > 0:
print src.Name
for each in targets:
print "\t", each.Name
def DFSearch(self, node, CallString, visited, targets):
if (node.Type & Node.TARGET) and len(CallString) == 0:
targets.append(node)
return True
for successor in node.successors:
if successor in visited:
continue
visited.append(successor)
subnode = successor[0]
edgeId = successor[1]
if edgeId == 0: # intra-procedure
self.DFSearch(subnode, CallString, visited, targets)
elif edgeId > 0: # from caller to callee
# call into
CallString.append(edgeId)
self.DFSearch(subnode, CallString, visited, targets)
elif edgeId < 0: # return from callee to caller
if CallString[-1] + edgeId == 0: # legal
del CallString[-1]
self.DFSearch(subnode, CallString, visited, targets)
visited.remove(successor)
| {
"repo_name": "Panalyzer/Panalyzer",
"path": "src/cfl/InterProcTaintAnalysis.py",
"copies": "1",
"size": "9996",
"license": "mit",
"hash": -9168881039073787000,
"line_mean": 42.0862068966,
"line_max": 161,
"alpha_frac": 0.5269107643,
"autogenerated": false,
"ratio": 4.166736140058358,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5193646904358358,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CwT'
from type.Type import Literal, Normal
from type.Type import Array as TArray
class Graph(object):
def __init__(self, data):
self._func = {}
for key, value in data.items():
self._func[key] = Function(key, value)
@property
def Entry(self):
return self._func['main']
@property
def functions(self):
return self._func
def Method(self, name):
return self._func[name]
def register(self, name, visitor):
for func in self._func.values():
func.register(name, visitor)
def doAnalyse(self):
for func in self._func.values():
func.doAnalyse()
def getFuncByName(self, name):
if name in self._func.keys():
return self._func[name]
else:
return None
class Function(object):
def __init__(self, funcName, data):
self._name = funcName
self._blocks = {}
for key, value in data.items():
self._blocks[key] = BasicBlock(key, value)
self.createGraph()
self._visitors = {}
self._vName = []
@property
def Name(self):
return self._name
@property
def blocks(self):
return self._blocks
@property
def Entry(self):
return self._blocks['Block#1']
def createGraph(self):
for key, value in self._blocks.items():
for each in value.parents:
block = self._blocks[each]
value.predecessors.append(block)
block.successors.append(value)
def register(self, name, visitor):
self._vName.append(name)
self._visitors[name] = visitor()
def doAnalyse(self):
for name in self._vName:
visitor = self._visitors[name]
visitor.doAnalysis(self)
visitor.printData()
def getTypeMap(self):
if 'Literal' in self._vName:
return self._visitors['Literal'].varMaps
return None
def getAliasPair(self):
if 'Alias' in self._vName:
return self._visitors['Alias'].AliasSet
class BasicBlock(object):
def __init__(self, key, cont):
self._id = key
self._predecessors = []
self._successors = []
self._code = []
self._parents = []
if cont.has_key('Parent'):
self._parents = cont['Parent']
for item in cont['code']:
self._code.append(Instruction(item))
@property
def parents(self):
return self._parents
@property
def predecessors(self):
return self._predecessors
@property
def successors(self):
return self._successors
@property
def insns(self):
return self._code
@property
def Id(self):
return self._id
class Instruction(object):
def __init__(self, data):
self._data = data
@property
def Data(self):
return self._data
def isLiteral(self, expr, varMaps):
if expr.startswith('LITERAL'):
return True
elif expr.startswith('Var#') and expr in varMaps.keys():
return isinstance(varMaps[expr], Literal)
else:
return False
def getType(self, varMaps):
op = self._data['op']
if op == 'Expr_Assign':
expr = self._data['expr']
if expr.startswith('LITERAL'):
return self._data['var'], Literal(self._data['expr'])
elif expr.startswith('Var#') and expr in varMaps.keys():
return self._data['var'], varMaps[expr]
elif op.startswith('Expr_Binary'):
if self.isLiteral(self._data['left'], varMaps) or \
self.isLiteral(self._data['right'], varMaps):
return self._data['result'], Literal()
else:
return self._data['result'], Normal()
elif op.startswith('Expr_ArrayDim'):
return self._data['var'], TArray()
elif op == 'Expr_Array':
return self._data['result'], TArray()
return None, None
def getAlias(self, varMaps):
op = self._data['op']
if op == 'Expr_AssignRef':
return self._data['var'], self._data['expr']
elif op == 'Expr_Assign':
expr = self._data['expr']
if expr in varMaps.keys() and isinstance(varMaps[expr], TArray):
return self._data['var'], expr
return None, None
def isGlobal(self):
op = self._data['op']
if op == 'Terminal_GlobalVar':
return 'register', self._data['var'][9:-2]
else:
return 'check', filter(lambda x: x.find('<$') != -1, self.getVars())
def findVarbyName(self, name):
return filter(lambda x: x.find('<$') != -1 and x[x.find('<$')+2:-1] == name, self.getVars())
def getVars(self):
ret = []
if 'var' in self._data.keys():
ret.append(self._data['var'])
if 'result' in self._data.keys():
ret.append(self._data['result'])
if 'expr' in self._data.keys():
ret.append(self._data['expr'])
return ret
| {
"repo_name": "Panalyzer/Panalyzer",
"path": "src/Graph.py",
"copies": "1",
"size": "5159",
"license": "mit",
"hash": 7992706562260699000,
"line_mean": 27.5027624309,
"line_max": 100,
"alpha_frac": 0.5406086451,
"autogenerated": false,
"ratio": 4.021044427123928,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5061653072223928,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CwT'
import struct
import Util
class OATHdr:
def __init__(self):
self.offset = 0
self.magic = []
self.version = []
self.checksum = 0
self.dexfileCount = 0
self.size = 0
self.keyvalueSize = 0
def readfd(self, file, offset):
self.offset = offset
file.seek(offset)
value = struct.unpack("4s4sIIIIIIIIIIIIIIIIIII", file.read(21*4))
self.magic = value[0]
self.version = value[1]
self.dexfileCount = value[5]
self.keyvalueSize = value[20]
def getHeaderSize(self):
return self.keyvalueSize + 21 * 4
def getDexListStart(self):
return self.offset + self.getHeaderSize()
class DexMeta:
def __init__(self):
self.name = None
self.dexOffset = 0
self.classCount = 0
def readfd(self, file, oatFile):
filenameLen = struct.unpack("I", file.read(4))[0]
self.name = struct.unpack(str(filenameLen)+"s", file.read(filenameLen))[0]
value = struct.unpack("II", file.read(4*2))
self.dexOffset = value[1]
file.seek(oatFile.offset + self.dexOffset + 96)
self.classCount = struct.unpack("I", file.read(4))[0]
def getMetaSize(self):
return 4 + len(self.name) + 4*2 + 4*self.classCount
class OATfile:
def __init__(self):
self.offset = 0
self.oatHdr = OATHdr()
def readfd(self, file, offset):
self.offset = offset
self.oatHdr.readfd(file, offset)
def getDexFiles(self, file):
offset = self.oatHdr.getDexListStart()
dexFiles = []
for i in range(self.oatHdr.dexfileCount):
file.seek(offset)
dexMeta = DexMeta()
dexMeta.readfd(file, self)
offset += dexMeta.getMetaSize()
dexFiles.append(dexMeta)
# print hex(offset)
return dexFiles
| {
"repo_name": "CvvT/ElfParse",
"path": "OatParse.py",
"copies": "1",
"size": "1908",
"license": "apache-2.0",
"hash": -2010881188756968000,
"line_mean": 27.4776119403,
"line_max": 82,
"alpha_frac": 0.5780922432,
"autogenerated": false,
"ratio": 3.359154929577465,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9426042439130724,
"avg_score": 0.002240946729348096,
"num_lines": 67
} |
__author__ = 'CwT'
import struct
def elf_hash(funcname):
h = 0
for x in funcname:
h = (h << 4) + ord(x)
g = h & 0xf0000000
h ^= g
h ^= g >> 24
print()
return h
class Ehdr:
def __init__(self):
self.ident = []
self.type = 0 # 0: unknow; 1: relocation 2: execute 3: share
self.machine = 0
self.version = 0 # version: 1
self.entry = 0
self.phoff = 0 # program header table offset
self.shoff = 0 # section header table offset
self.flags = 0
self.ehsize = 0 # size of elf header
self.phentsize = 0 # size of each program table entry
self.phnum = 0 # number of program table entries
self.shentsize = 0 # size of each section table entry
self.shnum = 0 # number of section table entries
self.shstrndx = 0 # section header string index
self.size = 52 # bytes
self.shtable = None
self.phtable = None
self.dyntable = None
def readfromfd(self, file):
file.seek(0)
value = struct.unpack("16sHHIIIIIHHHHHH", file.read(self.size))
self.ident = value[0]
self.type = value[1]
self.machine = value[2]
self.version = value[3]
self.entry = value[4]
self.phoff = value[5]
self.shoff = value[6]
self.flags = value[7]
self.ehsize = value[8]
self.phentsize = value[9]
self.phnum = value[10]
self.shentsize = value[11]
self.shnum = value[12]
self.shstrndx = value[13]
def init_table(self):
self.shtable = ShTable()
self.shtable.readfromfd(file, self.shoff, self.shnum, self.shstrndx)
self.phtable = PhTable()
self.phtable.readfromfd(file, self.phoff, self.phnum)
tmp = self.phtable.getSegment("PT_DYNAMIC")
self.dyntable = DynTable(tmp.offset, tmp.filesz)
self.dyntable.readfromfd(file)
def printf(self):
for x in self.ident:
print hex(ord(x)),
print ''
print(self.type)
print(self.machine)
print(self.version)
print(self.entry)
print(self.phoff)
print(self.shoff)
print(self.flags)
print(self.ehsize)
print(self.phentsize)
print(self.phnum)
print(self.shentsize)
print(self.shnum)
print(self.shstrndx)
class Shder:
def __init__(self):
self.name = 0 # index of section header string table section
self.type = 0
self.flags = 0
self.addr = 0
self.offset = 0 # offset from the beginning of the file
self.size = 0
self.link = 0
self.info = 0
self.addralign = 0
self.entsize = 0
self.size = 40
def readfromfd(self, file):
value = struct.unpack("IIIIIIIIII", file.read(self.size))
self.name = value[0]
self.type = value[1]
self.flags = value[2]
self.addr = value[3]
self.offset = value[4]
self.size = value[5]
self.link = value[6]
self.info = value[7]
self.addralign = value[8]
self.entsize = value[9]
class ShTable:
def __init__(self):
self.shtable = []
self.off = 0
self.num = 0
self.shstrtable = []
self.strtab = []
self.symtab = []
self.hashtable = None
def getstrtablefromfd(self, file, str_index):
sh = self.shtable[str_index]
file.seek(sh.offset)
for i in range(0, sh.size):
self.shstrtable.append(struct.unpack("c", file.read(1))[0])
def readfromfd(self, file, shoff, shnum, str_index):
file.seek(shoff)
self.off = shoff
self.num = shnum
for i in range(0, shnum):
shder = Shder()
shder.readfromfd(file)
self.shtable.append(shder)
self.getstrtablefromfd(file, str_index)
self.getSymstr(file)
self.getSymtab(file)
self.gethash(file)
def getshname(self, index):
start = index
while 1:
if self.shstrtable[index] == '\0':
break
index += 1
return "".join(self.shstrtable[start:index])
def findsection(self, secName):
for i in range(0, self.num):
if self.getshname(self.shtable[i].name) == secName:
return self.shtable[i]
return None
def getSymstr(self, file):
tmp = self.findsection(".dynstr")
if tmp is not None:
file.seek(tmp.offset)
for i in range(0, tmp.size):
self.strtab.append(struct.unpack("c", file.read(1))[0])
else:
print("error in getSymstr")
def getSymtab(self, file):
tmp = self.findsection(".dynsym")
if tmp is not None:
len = tmp.size / tmp.entsize
file.seek(tmp.offset)
for i in range(0, len):
sym = Symbol()
sym.readfromfd(file)
self.symtab.append(sym)
else:
print("error in get symtab")
def gethash(self, file):
tmp = self.findsection(".hash")
if tmp is not None:
self.hashtable = Hash()
self.hashtable.readfromfd(file, tmp.offset)
else:
print("error in gethash")
def getnamebyindex(self, index):
start = index
while 1:
if self.strtab[index] == '\0':
break
index += 1
return "".join(self.strtab[start:index])
def getfuncinfo(self, funcname):
index = elf_hash(funcname) % self.hashtable.nbucket
index = self.hashtable.getbucket(index)
if index >= len(self.symtab):
print("error, get symbol table first")
exit()
str_index = self.symtab[index].name
if self.getnamebyindex(str_index) != funcname:
while 1:
index = self.hashtable.getchain(index)
if index == 0:
print("did not find the func %s" %funcname)
break
str_index = self.symtab[index].name
if self.getnamebyindex(str_index) == funcname:
break
if index == 0:
return None
return self.symtab[index]
def printf(self):
for i in range(0, self.num):
print(self.getshname(self.shtable[i].name))
def printsym(self):
for i in range(0, len(self.symtab)):
print(self.getnamebyindex(self.symtab[i].name))
class Phdr:
TYPE = {'PT_NULL': 0, 'PT_LOAD': 1, 'PT_DYNAMIC': 2, 'PT_INTERP': 3,
'PT_NOTE': 4, 'PT_SHLIB': 5, 'PT_PHDR': 6, }
def __init__(self):
self.type = 0
self.offset = 0
self.vaddr = 0 # virtual address
self.paddr = 0 # physical address
self.filesz = 0 # size in file
self.memsz = 0 # size in memory
self.flags = 0
self.align = 0
self.size = 8 * 4
def readfromfd(self, file):
value = struct.unpack("IIIIIIII", file.read(self.size))
self.type = value[0]
self.offset = value[1]
self.vaddr = value[2]
self.paddr = value[3]
self.filesz = value[4]
self.memsz = value[5]
self.flags = value[6]
self.align = value[7]
class PhTable:
def __init__(self):
self.phtable = []
self.num = 0
self.off = 0
def readfromfd(self, file, offset, phnum):
file.seek(offset)
self.off = offset
self.num = phnum
for i in range(0, self.num):
phdr = Phdr()
phdr.readfromfd(file)
self.phtable.append(phdr)
def getSegment(self, segName):
if segName not in Phdr.TYPE.keys():
print("unknow segment name")
return None
for i in range(0, self.num):
if self.phtable[i].type == Phdr.TYPE[segName]:
print("find %s" % (segName))
return self.phtable[i]
class Symbol:
def __init__(self):
self.name = 0
self.value = 0
self.size = 0
self.info = 0
self.other = 0
self.shndx = 0
self.size = 4 * 3 + 2 * 1 + 2
def readfromfd(self, file):
value = struct.unpack("IIIBBH", file.read(self.size))
self.name = value[0]
self.value = value[1]
self.size = value[2]
self.info = value[3]
self.other = value[4]
self.shndx = value[5]
class Dynamic:
TAG = {0: 'DT_NULL', 1: 'DT_NEEDED', 2: 'DT_PLTRELSZ', 3: 'DT_PLTGOT',
4: 'DT_HASH', 5: 'DT_STRTAB', 6: 'DT_SYMTAB', 7: 'DT_RELA',
8: 'DT_REALSZ', 9: 'DT_RELAENT', 10: 'DT_STRSZ', 11: 'DT_SYMENT',
12: 'DT_INIT', 13: 'DT_FINI', 14: 'DT_SONAME', 15: 'DT_RPATH',
16: 'DT_SYMBOLIC', 17:'DT_REL', 18: 'DT_RELSZ', 19: 'DT_RELENT', 20: 'DT_PLTREL',
21: 'DT_DEBUG', 22: 'DT_TEXTREL', 23: 'DT_JMPREL',
25: 'DT_INIT_ARRAY', 26: 'DT_FINI_ARRAY', 27: 'DT_INIT_ARRAYSZ', 28: 'DT_FINI_ARRAYSZ',
}
def __init__(self):
self.tag = 0
self.union = 0
self.size = 8
def readfromfd(self, file):
value = struct.unpack("iI", file.read(self.size))
self.tag = value[0]
self.union = value[1]
def printf(self):
if self.tag in Dynamic.TAG.keys():
print("tag is: ", Dynamic.TAG[self.tag])
else:
print("tag is: ", self.tag)
print("union is: ", self.union)
print("size is: ", self.size)
class DynTable:
TAG = {'DT_NULL': 0, 'DT_NEEDED': 1, 'DT_PLTRELSZ': 2, 'DT_PLTGOT': 3,
'DT_HASH': 4, 'DT_STRTAB': 5, 'DT_SYMTAB': 6, 'DT_RELA': 7,
'DT_REALSZ': 8, 'DT_RELAENT': 9, 'DT_STRSZ': 10, 'DT_SYMENT': 11,
'DT_INIT': 12, 'DT_FINI': 13, 'DT_SONAME': 14, 'DT_RPATH': 15,
'DT_SYMBOLIC': 16, 'DT_REL': 17, 'DT_RELSZ': 18, 'DT_RELENT': 19, 'DT_PLTREL': 20,
'DT_DEBUG': 21, 'DT_TEXTREL': 22, 'DT_JMPREL': 23,
'DT_INIT_ARRAY': 25, 'DT_FINI_ARRAY': 26, 'DT_INIT_ARRAYSZ': 27, ' DT_FINI_ARRAYSZ': 28, }
def __init__(self, offset, size):
self.off = offset
self.size = size
self.num = size / 8
self.dyn = []
self.strtab = []
self.symtab_off = 0
self.symtab = []
self.hashtable = None
def readfromfd(self, file):
file.seek(self.off)
for i in range(0, self.num):
dyn = Dynamic()
dyn.readfromfd(file)
self.dyn.append(dyn)
self.getStrtable(file)
self.getHashtable(file)
self.symtab_off = self.getsecBytag("DT_SYMTAB").union
# self.getSymboltable(file)
def getsecBytag(self, tag):
for i in range(0, self.num):
if self.dyn[i].tag == DynTable.TAG[tag]:
return self.dyn[i]
print("did not find %s" % tag)
return None
def getStrtable(self, file):
offset = self.getsecBytag("DT_STRTAB").union
size = self.getsecBytag("DT_STRSZ").union
file.seek(offset)
for i in range(0, size):
self.strtab.append(struct.unpack("c", file.read(1))[0])
def getHashtable(self, file):
offset = self.getsecBytag("DT_HASH").union
self.hashtable = Hash()
self.hashtable.readfromfd(file, offset)
def getSymboltable(self, file, filesz):
offset = self.getsecBytag("DT_SYMTAB").union
num = filesz / self.getsecBytag("DT_SYMENT").union # size of each symbol entry
file.seek(offset)
for i in range(0, num):
symbol = Symbol()
symbol.readfromfd(file)
self.symtab.append(symbol)
def getnamebyindex(self, index):
start = index
while 1:
if self.strtab[index] == '\0':
break
index += 1
return "".join(self.strtab[start:index])
def getfuncinfo(self, funcname, file):
index = elf_hash(funcname) % self.hashtable.nbucket
index = self.hashtable.getbucket(index)
file.seek(self.symtab_off + index * 16)
# str_index = self.symtab[index].name
str_index = struct.unpack("I", file.read(4))[0]
if self.getnamebyindex(str_index) != funcname:
while 1:
index = self.hashtable.getchain(index)
print(index, "index")
if index == 0:
print("did not find the func %s" %funcname)
break
file.seek(self.symtab_off + index * 16)
str_index = struct.unpack("I", file.read(4))[0]
if self.getnamebyindex(str_index) == funcname:
break
if index == 0:
return None
func = Symbol()
file.seek(self.symtab_off + index * 16)
func.readfromfd(file)
return func
def printSymbol(self):
for i in range(0, len(self.symtab)):
print(self.getnamebyindex(self.symtab[i].name))
class Hash:
def __init__(self):
self.nbucket = 0
self.nchain = 0
self.bucket = []
self.chain = []
def readfromfd(self, file, offset):
file.seek(offset)
value = struct.unpack("II", file.read(8))
self.nbucket = value[0]
self.nchain = value[1]
for i in range(0, self.nbucket):
self.bucket.append(struct.unpack("I", file.read(4))[0])
for i in range(0, self.nchain):
self.chain.append(struct.unpack("I", file.read(4))[0])
def getbucket(self, index):
if index >= self.nbucket:
print("index %d out of range %d" % (index, self.nbucket))
return -1
return self.bucket[index]
def getchain(self, index):
return self.chain[index]
if __name__ == '__main__':
file = open("libtest.so", "rb")
elfheader = Ehdr()
elfheader.readfromfd(file)
elfheader.printf()
file.close()
| {
"repo_name": "List3nt0/ElfParse",
"path": "ElfParse.py",
"copies": "3",
"size": "14115",
"license": "apache-2.0",
"hash": -185202154555888580,
"line_mean": 31.299771167,
"line_max": 101,
"alpha_frac": 0.5307828551,
"autogenerated": false,
"ratio": 3.3345145287030475,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5365297383803048,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CwT'
class CFLGraph(object):
def __init__(self, SuperGraph):
self._supGraph = SuperGraph
self._nodes = {}
@property
def Entry(self):
return self._supGraph.Entry
@property
def subgraphs(self):
return self._supGraph.functions
@property
def AllNodes(self):
return self._nodes
def getFuncByName(self, name):
return self._supGraph.getFuncByName(name)
def addNode(self, node):
# Add node if not exist
# Update type if exist
if node.Name not in self._nodes.keys():
self._nodes[node.Name] = node
self._nodes[node.Name].update(node.Type)
def addEdge(self, srcFunc, srcVar, tgtFunc, tgtVar):
'''
:param srcFunc:
:param srcVar:
:param tgtFunc:
:param tgtVar:
:return: If add an edge successfully
'''
source = srcFunc + ":" + srcVar
target = tgtFunc + ":" + tgtVar
ret = False
# source must be added before
if source in self._nodes.keys():
if target not in self._nodes.keys(): # target could be added dynamically
self.addNode(Node(tgtVar, tgtFunc))
ret = self._nodes[source].addEdge(self._nodes[target])
return ret
def addTargetNodeToEdge(self, srcFunc, srcVar, tgtNode, EdgeId):
source = srcFunc + ":" + srcVar
ret = False
if source in self._nodes.keys():
ret = self._nodes[source].addEdge(tgtNode, EdgeId)
return ret
def addSourceNodeToEdge(self, srcNode, tgtFunc, tgtVar, EdgeId):
target = tgtFunc + ":" + tgtVar
if target not in self._nodes.keys():
self.addNode(Node(tgtVar, tgtFunc))
return srcNode.addEdge(self._nodes[target], EdgeId)
def isTainted(self, funcName, varName):
if (funcName + ":" + varName) in self._nodes.keys():
return True
return False
def FindOrCreateNode(self, funcName, funcVar, type):
target = funcName + ":" + funcVar
if target not in self._nodes.keys():
self._nodes[target] = Node(funcVar, funcName, type)
else:
self._nodes[target].update(type)
return self._nodes[target]
def __str__(self):
ret = ''
for key, value in self._nodes.items():
ret += (key + ":\n")
ret += str(value)
return ret
class Node(object):
SOURCE = 1
TARGET = 2
MIDDLE = 4
PARAMETER = 8
RETURN = 16
RESULT = 32
def __init__(self, name, functionname, type=MIDDLE):
'''
:param name:
:param functionname:
:param type: including Middle, Source, Target
:return:
'''
self._varName = name
self._func = functionname
self._edges = [] # point to other nodes
self._type = type
@property
def Name(self):
return self._func + ":" + self._varName
@property
def FuncName(self):
return self._func
@property
def VarName(self):
return self._varName
@property
def Type(self):
return self._type
@property
def successors(self):
return self._edges
def addEdge(self, node, EdgeId=0):
tuple = (node, EdgeId)
if tuple not in self._edges:
self._edges.append(tuple)
return True
return False
def update(self, type):
self._type |= type
def __str__(self):
ret = '\t'
for each in self._edges:
ret += (each[0].Name + ", ")
return ret + "\n"
| {
"repo_name": "Panalyzer/Panalyzer",
"path": "src/cfl/Model.py",
"copies": "1",
"size": "3639",
"license": "mit",
"hash": 5028505321046524000,
"line_mean": 25.3695652174,
"line_max": 87,
"alpha_frac": 0.5504259412,
"autogenerated": false,
"ratio": 3.95114006514658,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9961601140294757,
"avg_score": 0.007992973210364514,
"num_lines": 138
} |
__author__ = 'CwT'
from DexParse import *
if __name__ == '__main__':
dexfile = DexFile("classes.dex")
# @Params: classname, acces_flag(public, private...), parent_class_name, Filename
classitem = dexfile.addclass("Lcom/cc/test/DexParse;", Access_Flag["public"],
"Ljava/lang/Object;", "DexParse.java")
classdata = ClassdataItem(None, 2)
dexfile.addclassData(classdata)
classdata.addstaticfield(dexfile.addfield(classitem.classIdx, "int", "IntFromCwt"),
Access_Flag['public'] | Access_Flag["static"])
classdata.addinstancefield(dexfile.addfield(classitem.classIdx, "boolean", "BoolFromCwt"),
Access_Flag["private"])
param_list = []
# param_list.append("[Lcom/lang/String;")
# param_list.append("boolean") # prepare for parameter list
code = CodeItem(None, 2)
# @param register_size, in_size, out_size, tries_size, debug_off,
# insns_size, insns_list, debugref, tries_list, handler
insns = []
tries = []
insns.append(0xe) # return void
debug = DebugInfo(None, 2)
names_list = []
debug_list = [0]
debug.adddebugitem(0, 0, names_list, debug_list)
dexfile.adddebug(debug)
code.addcode(1, 1, 0, 0, 0, len(insns), insns, debug, tries, None)
dexfile.addcode(code)
method_idx = dexfile.addmethod(classitem.classIdx, param_list, "void", "main")
classdata.adddirectmethod(method_idx, Access_Flag['public'] | Access_Flag['static'], code)
classdata.commit()
classitem.addclassdefref(None, None, classdata, None)
tmp_method = dexfile.getmethodItem("Lcom/cc/test/MainActivity;", "onCreate")
coderef = tmp_method['method'].coderef
coderef.insns.insert(0, 0)
coderef.insns.insert(0, method_idx)
coderef.insns.insert(0, 0x71)
coderef.insns_size += 3
dexfile.copytofile("classescp.dex")
| {
"repo_name": "peterdocter/DexParse",
"path": "AddClass.py",
"copies": "2",
"size": "1874",
"license": "apache-2.0",
"hash": -1306748393954928400,
"line_mean": 42.5813953488,
"line_max": 94,
"alpha_frac": 0.6531483458,
"autogenerated": false,
"ratio": 3.1077943615257047,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4760942707325705,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CwT'
from ElfParse import *
if __name__ == '__main__':
file = open("libbaiduprotect.so", 'r+b')
file.seek(0)
elfheader = Ehdr()
elfheader.readfromfd(file)
elfheader.init_table()
addr = elfheader.shtable.findsection(".text").offset
if addr == -1:
print("find section .text failed")
exit(0)
# symbol = elfheader.shtable.getfuncinfo("native_hello")
# symbol = elfheader.dyntable.getfuncinfo("native_hello", file)
# print("func addr :", symbol.value - 1) # i don't know why must minus one, it's the beginning of the text
# file.seek(symbol.value - 1)
# size = symbol.size
# content = []
# for i in range(0, size-1):
# content.append(~struct.unpack("B", file.read(1))[0] & 0xff)
# file.seek(symbol.value - 1)
# # print(struct.unpack("B", file.read(1))[0])
# for i in range(0, size-1):
# # print(struct.pack("B", content[i]), )
# file.write(struct.pack("B", content[i]))
# elfheader.shtable.printsym()
elfheader.printf()
file.close()
| {
"repo_name": "List3nt0/ElfParse",
"path": "Encrypt.py",
"copies": "3",
"size": "1065",
"license": "apache-2.0",
"hash": -5129427311128843000,
"line_mean": 34.5,
"line_max": 111,
"alpha_frac": 0.5962441315,
"autogenerated": false,
"ratio": 2.974860335195531,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5071104466695531,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CwT'
from idaapi import *
from idautils import *
from idc import *
import re
def getDword(addr):
return Dword(addr)
def getByte(addr):
return Byte(addr)
def getWord(addr):
return Word(addr)
class DexorJar:
def __init__(self):
self.pfileName = 0
self.isDex = 0
self.okaytoFree = 0 # do not care
self.pRawDexFile = 0
self.pJarFile = 0
self.pDexMemory = 0 # do not care
def dump(self, addr):
self.pfileName = getDword(addr)
self.isDex = getByte(addr + 4)
self.pRawDexFile = getDword(addr + 8)
self.pJarFile = getDword(addr + 12)
def printf(self):
str = ""
baseaddr = self.pfileName
onebyte = getByte(baseaddr)
while onebyte != 0:
str += chr(onebyte)
baseaddr += 1
onebyte = getByte(baseaddr)
print("filename is:", str)
if self.isDex > 0:
pass
# print("it's a dex file, addr: ", hex(self.pRawDexFile))
else:
# print("it's a jar file, addr: ", hex(self.pJarFile))
jarfile = JarFile()
jarfile.dump(cookie.pJarFile)
jarfile.printf()
dvmaddr = jarfile.pDvmDex
class JarFile:
def __init__(self):
self.archive = None # do not care
self.pcacheFileName = 0
self.pDvmDex = 0
def dump(self, addr):
self.pcacheFileName = getDword(addr + 36)
self.pDvmDex = getDword(addr + 40)
def printf(self):
str = ""
baseaddr = self.pcacheFileName
one = getByte(baseaddr)
while one != 0:
str += chr(one)
baseaddr += 1
one = getByte(baseaddr)
print("cache file name is : ", str)
# print("DvmDex addr is :", hex(self.pDvmDex))
class RawDexFile:
def __init__(self):
self.pcacheFileName = 0
self.pDvmDex = 0
def dump(self, addr):
self.pcacheFileName = getDword(addr)
self.pDvmDex = getDword(addr+4)
def printf(self):
str = ""
baseaddr = self.pcacheFileName
if baseaddr == 0:
print "cache file name is null"
return
one = getByte(baseaddr)
while one != 0:
str += chr(one)
baseaddr += 1
one = getByte(baseaddr)
print("cache file name is : ", str)
# print("DvmDex addr is :", hex(self.pDvmDex))
class DvmDex:
def __init__(self):
self.pDexFile = 0
self.pHeader = 0 # it is a clone of dex file
# just for now
def dump(self, addr):
self.pDexFile = getDword(addr)
self.pHeader = getDword(addr + 4)
def printf(self):
# i wanna see the diff between the pDexFile.dexfile and pheader
print("dexfile addr is: ", hex(self.pDexFile))
# print("header addr is: ", hex(self.pHeader))
experiment = True
if experiment:
gDvm = 0
offset = 0
for i in range(10):
insn = GetDisasm(here()+i*2)
match = re.search(r'PC[\s]*;[\s]*dword_(?P<addr>[0-9A-Z]+)', insn)
if match is not None:
address = "0x" + match.group('addr')
gDvm = Dword(int(address, 16))
match = re.search(r'[R[\d]+,#(?P<off>0x[\dA-F]+)]', insn)
if match is not None:
offset = match.group('off')
offset = int(offset, 16)
target = gDvm + offset
else:
target = int(0x40DDB654) # find it in dvminternalnativeshutdown--->dvmHashTableFree
print "target:", hex(target)
userDex = getDword(target)
size = getDword(userDex)
entry = getDword(userDex+12)
print "Size:", size
print "Entry:", hex(entry)
for i in range(size):
hash = getDword(entry+8*i)
item = getDword(entry+8*i+4)
if hash == item and hash != 0:
cookie = DexorJar()
cookie.dump(hash)
cookie.printf()
if cookie.isDex == 0:
jarfile = JarFile()
jarfile.dump(cookie.pJarFile)
jarfile.printf()
dvmaddr = jarfile.pDvmDex
else:
rawDex = RawDexFile()
rawDex.dump(cookie.pRawDexFile)
rawDex.printf()
dvmaddr = rawDex.pDvmDex
dvmDex = DvmDex()
dvmDex.dump(dvmaddr)
dvmDex.printf()
| {
"repo_name": "CvvT/dumpDex",
"path": "findcookie.py",
"copies": "1",
"size": "4305",
"license": "apache-2.0",
"hash": 5947876427478314000,
"line_mean": 27.3223684211,
"line_max": 88,
"alpha_frac": 0.5479674797,
"autogenerated": false,
"ratio": 3.2055100521221145,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42534775318221146,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CwT'
from idaapi import *
from idautils import *
from idc import *
import struct
import os
import hashlib
Access_Flag = {'public': 1, 'private': 2, 'protected': 4, 'static': 8, 'final': 0x10,
'synchronized': 0x20, 'volatile': 0x40, 'bridge': 0x40, 'transient': 0x80,
'varargs': 0x80, 'native': 0x100, 'interface': 0x200, 'abstract': 0x400,
'strictfp': 0x800, 'synthetic': 0x1000, 'annotation': 0x2000, 'enum': 0x4000,
'constructor': 0x10000, 'declared_synchronized': 0x20000}
TypeDescriptor = {'void': 'V', 'boolean': 'Z', 'byte': 'B', 'short': 'S', 'char': 'C',
'int': 'I', 'long': 'J', 'float': 'F', 'double': 'D', 'boolean[]': '[Z',
'byte[]': '[B', 'short[]': '[S', 'char[]': '[C', 'int[]': 'I',
'long[]': '[J', 'float[]': '[F', 'double[]': 'D'}
ShortyDescriptor = {'void': 'V', 'boolean': 'Z', 'byte': 'B', 'short': 'S', 'char': 'C',
'int': 'I', 'long': 'J', 'float': 'F', 'double': 'D'}
ACSII = {'1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '0': 0,
'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15}
def checksum(f, len):
a = 1
b = 0
f.seek(12)
print("file size is :", len)
for i in range(12, len):
onebyte = struct.unpack("B", f.read(1))[0]
a = (a + onebyte) % 65521
b = (b + a) % 65521
return b << 16 | a
def get_file_sha1(f):
f.seek(32) # skip magic, checksum, sha
sha = hashlib.sha1()
while True:
data = f.read(1024)
if not data:
break
sha.update(data)
return sha.hexdigest()
def getDword(addr):
return Dword(addr)
def getByte(addr):
return Byte(addr)
def getWord(addr):
return Word(addr)
def rightshift(value, n):
mask = 0x80000000
check = value & mask
if check != mask:
return value >> n
else:
submask = mask
for loop in range(0, n):
submask = (submask | (mask >> loop))
strdata = struct.pack("I", submask | (value >> n))
ret = struct.unpack("i", strdata)[0]
return ret
def readunsignedleb128(addr):
res = getByte(addr)
len = 1
if res > 0x7f:
cur = getByte(addr + 1)
res = (res & 0x7f) | ((cur & 0x7f) << 7)
len = 2
if cur > 0x7f:
cur = getByte(addr + 2)
res |= (cur & 0x7f) << 14
len = 3
if cur > 0x7f:
cur = getByte(addr + 3)
res |= (cur & 0x7f) << 21
len = 4
if cur > 0x7f:
cur = getByte(addr + 4)
res |= cur << 28
len = 5
return res, len
def readsignedleb128(addr):
res = getByte(addr)
len = 1
if res <= 0x7f:
res = rightshift((res << 25), 25)
else:
cur = getByte(addr + 1)
res = (res & 0x7f) | ((cur & 0x7f) << 7)
len = 2
if cur <= 0x7f:
res = rightshift((res << 18), 18)
else:
cur = getByte(addr + 2)
res |= (cur & 0x7f) << 14
len = 3
if cur <= 0x7f:
res = rightshift((res << 11), 11)
else:
cur = getByte(addr + 3)
res |= (cur & 0x7f) << 21
len = 4
if cur <= 0x7f:
res = rightshift((res << 4), 4)
else:
cur = getByte(addr + 4)
res |= cur << 28
len = 5
return res, len
def writesignedleb128(num, file):
if num >= 0:
writeunsignedleb128(num, file)
else:
mask = 0x80000000
for i in range(0, 32):
tmp = num & mask
mask >>= 1
if tmp == 0:
break
loop = 32 - i + 1
while loop > 7:
cur = num & 0x7f | 0x80
num >>= 7
file.write(struct.pack("B", cur))
loop -= 7
cur = num & 0x7f
file.write(struct.pack("B", cur))
def signedleb128forlen(num):
if num >= 0:
return unsignedleb128forlen(num)
else:
mask = 0x80000000
for i in range(0, 32):
tmp = num & mask
mask >>= 1
if tmp == 0:
break
loop = 32 - i + 1
if loop % 7 == 0:
return loop / 7
else:
return loop / 7 + 1
def writeunsignedleb128(num, file):
if num <= 0x7f:
file.write(struct.pack("B", num))
else:
cur = num & 0x7F | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if num <= 0x7f:
file.write(struct.pack("B", num))
else:
cur = num & 0x7f | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if num <= 0x7f:
file.write(struct.pack("B", num))
else:
cur = num & 0x7f | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if num <= 0x7f:
file.write(struct.pack("B", num))
else:
cur = num & 0x7f | 0x80
file.write(struct.pack("B", cur))
num >>= 7
file.write(struct.pack("B", num))
def unsignedleb128forlen(num):
len = 1
temp = num
while num > 0x7f:
len += 1
num >>= 7
if len > 5:
print("error for unsignedleb128forlen", temp)
return len
def readunsignedleb128p1(addr):
res, len = readunsignedleb128(addr)
return res - 1, len
def writeunsignedleb128p1(num, file):
writeunsignedleb128(num+1, file)
def unsignedleb128p1forlen(num):
return unsignedleb128forlen(num+1)
def getutf8str(addr):
string = []
while 1:
onebyte = getByte(addr)
addr += 1
if onebyte == 0:
break
string.append(onebyte)
return bytearray(string).decode("utf-8")
def getstr(bytes):
return bytearray(bytes).decode("utf-8")
class DexorJar:
def __init__(self):
self.pfileName = 0
self.isDex = 0
self.okaytoFree = 0 # do not care
self.pRawDexFile = 0
self.pJarFile = 0
self.pDexMemory = 0 # do not care
def dump(self, addr):
self.pfileName = getDword(addr)
self.isDex = getByte(addr + 4)
self.pRawDexFile = getDword(addr + 8)
self.pJarFile = getDword(addr + 12)
def printf(self):
str = ""
baseaddr = self.pfileName
onebyte = getByte(baseaddr)
while onebyte != 0:
str += chr(onebyte)
baseaddr += 1
onebyte = getByte(baseaddr)
print("filename is:", str)
if self.isDex > 0:
print("it's a dex file, addr: ", hex(self.pRawDexFile))
else:
print("it's a jar file, addr: ", hex(self.pJarFile))
class JarFile:
def __init__(self):
self.archive = None # do not care
self.pcacheFileName = 0
self.pDvmDex = 0
def dump(self, addr):
self.pcacheFileName = getDword(addr + 36)
self.pDvmDex = getDword(addr + 40)
def printf(self):
str = ""
baseaddr = self.pcacheFileName
one = getByte(baseaddr)
while one != 0:
str += chr(one)
baseaddr += 1
one = getByte(baseaddr)
print("cache file name is : ", str)
print("DvmDex addr is :", hex(self.pDvmDex))
class DvmDex:
def __init__(self):
self.pDexFile = 0
self.pHeader = 0 # it is a clone of dex file
# just for now
def dump(self, addr):
self.pDexFile = getDword(addr)
self.pHeader = getDword(addr + 4)
def printf(self):
# i wanna see the diff between the pDexFile.dexfile and pheader
print("dexfile addr is: ", hex(self.pDexFile))
print("header addr is: ", hex(self.pHeader))
global baseAddr
class DexFile:
def __init__(self):
self.pOptHeader = 0
self.pHeader = 0
self.pStringIds = 0
self.pTypeIds = 0
self.pFieldIds = 0
self.pMethodIds = 0
self.pProtoIds = 0
self.pClassDefs = 0
self.pLinkData = 0
self.baseAddr = 0
self.dexheader = DexHeader()
self.dexmaplist = DexMapList()
def dump(self, addr):
global baseAddr
self.pOptHeader = getDword(addr)
self.pHeader = getDword(addr + 4)
self.pStringIds = getDword(addr + 8)
self.pTypeIds = getDword(addr + 12)
self.pFieldIds = getDword(addr + 16)
self.pMethodIds = getDword(addr + 20)
self.pProtoIds = getDword(addr + 24)
self.pProtoIds = getDword(addr + 28)
self.pLinkData = getDword(addr + 32)
self.baseAddr = getDword(addr + 44)
baseAddr = self.baseAddr
self.dexheader.dump(self.pHeader)
self.dexmaplist.dump(self.dexheader.mapOff)
self.dexmaplist.dexmapitem[0].item.append(self.dexheader)
print("end build dex and start to get reference")
self.dexmaplist.getreference()
def makeoffset(self):
off = self.dexmaplist.makeoff()
align = off % 4
if align != 0:
off += (4 - align)
self.dexheader.makeoffset(self.dexmaplist.dexmapitem)
self.dexheader.fileSize = off
self.dexheader.dataSize = off - self.dexheader.mapOff
def copytofile(self, filename):
if os.path.exists(filename):
os.remove(filename)
file = open(filename, 'wb+')
file.seek(0, 0)
self.makeoffset()
self.dexmaplist.copy(file)
rest = self.dexheader.fileSize -file.tell()
for i in range(0, rest):
file.write(struct.pack("B", 0))
file_sha = get_file_sha1(file)
tmp = bytes(file_sha)
i = 0
file.seek(12)
while i < 40:
num = (ACSII[tmp[i]] << 4) + ACSII[tmp[i+1]]
file.write(struct.pack("B", num))
i += 2
csum = checksum(file, self.dexheader.fileSize)
print("checksum:", hex(csum), "file size:", self.dexheader.fileSize)
file.seek(8)
file.write(struct.pack("I", csum))
file.close()
def printf(self):
print("dex head addr: ", hex(self.pHeader))
print("dex head addr: ", hex(self.baseAddr))
class DexHeader:
def __init__(self):
self.magic = []
self.checksum = 0
self.signature = []
self.fileSize = 0
self.headerSize = 0
self.endianTag = 0
self.linkSize = 0
self.linkOff = 0
self.mapOff = 0
self.stringIdsSize = 0
self.stringIdsOff = 0
self.typeIdsSize = 0
self.typeIdsOff = 0
self.protoIdsSize = 0
self.protoIdsOff = 0
self.fieldIdsSize = 0
self.fieldIdsOff = 0
self.methodIdsSize = 0
self.methodIdsOff = 0
self.classDefsSize = 0
self.classDefsOff = 0
self.dataSize = 0
self.dataOff = 0
def dump(self, addr):
len = 0
while len < 8:
self.magic.append(getByte(addr + len))
len += 1
self.checksum = getDword(addr + 8)
len = 0
while len < 20:
self.signature.append(getByte(addr + 12 + len))
len += 1
self.fileSize = getDword(addr + 32)
self.headerSize = getDword(addr + 36)
self.endianTag = getDword(addr + 40)
self.linkSize = getDword(addr + 44)
self.linkOff = getDword(addr + 48)
self.mapOff = getDword(addr + 52)
self.stringIdsSize = getDword(addr + 56)
self.stringIdsOff = getDword(addr + 60)
self.typeIdsSize = getDword(addr + 64)
self.typeIdsOff = getDword(addr + 68)
self.protoIdsSize = getDword(addr + 72)
self.protoIdsOff = getDword(addr + 76)
self.fieldIdsSize = getDword(addr + 80)
self.fieldIdsOff = getDword(addr + 88)
self.methodIdsSize = getDword(addr + 92)
self.methodIdsOff = getDword(addr + 96)
self.classDefsSize = getDword(addr + 100)
self.classDefsOff = getDword(addr + 104)
self.dataSize = getDword(addr + 108)
self.dataOff = getDword(addr + 112)
def makeoffset(self, dexmaplist):
self.stringIdsSize = dexmaplist[1].size
self.stringIdsOff = dexmaplist[1].offset
self.typeIdsSize = dexmaplist[2].size
self.typeIdsOff = dexmaplist[2].offset
self.protoIdsSize = dexmaplist[3].size
self.protoIdsOff = dexmaplist[3].offset
self.fieldIdsSize = dexmaplist[4].size
self.fieldIdsOff = dexmaplist[4].offset
self.methodIdsSize = dexmaplist[5].size
self.methodIdsOff = dexmaplist[5].offset
self.classDefsSize = dexmaplist[6].size
self.classDefsOff = dexmaplist[6].offset
self.dataOff = dexmaplist[0x1000].offset
self.dataSize = 0
self.mapOff = dexmaplist[0x1000].offset
self.fileSize = 0
def copytofile(self, file):
file.seek(0, 0)
len = 0
while len < 8:
file.write(struct.pack("B", self.magic[len]))
len += 1
file.write(struct.pack("I", self.checksum))
len = 0
while len < 20:
file.write(struct.pack("B", self.signature[len]))
len += 1
file.write(struct.pack("I", self.fileSize))
file.write(struct.pack("I", self.headerSize))
file.write(struct.pack("I", self.endianTag))
file.write(struct.pack("I", self.linkSize))
file.write(struct.pack("I", self.linkOff))
file.write(struct.pack("I", self.mapOff))
file.write(struct.pack("I", self.stringIdsSize))
file.write(struct.pack("I", self.stringIdsOff))
file.write(struct.pack("I", self.typeIdsSize))
file.write(struct.pack("I", self.typeIdsOff))
file.write(struct.pack("I", self.protoIdsSize))
file.write(struct.pack("I", self.protoIdsOff))
file.write(struct.pack("I", self.fieldIdsSize))
file.write(struct.pack("I", self.fieldIdsOff))
file.write(struct.pack("I", self.methodIdsSize))
file.write(struct.pack("I", self.methodIdsOff))
file.write(struct.pack("I", self.classDefsSize))
file.write(struct.pack("I", self.classDefsOff))
file.write(struct.pack("I", self.dataSize))
file.write(struct.pack("I", self.dataOff))
class DexMapList:
Seq = (0, 1, 2, 3, 4, 5, 6, 0x1000, 0x1001, 0x1002, 0x1003, 0x2001, 0x2000, 0x2002,
0x2003, 0x2004, 0x2005, 0x2006)
def __init__(self):
self.start = 0
self.size = 0
self.dexmapitem = {}
def dump(self, offset):
self.start = offset
self.size = getDword(offset + baseAddr)
mapitem = []
for i in range(0, self.size):
item = DexMapItem()
item.dump(offset + baseAddr + 4 + i * 12)
mapitem.append(item)
item.printf()
for i in range(0, self.size):
mapitem[i].setitem(self.dexmapitem)
self.dexmapitem[mapitem[i].type] = mapitem[i]
def copy(self, file):
for i in range(0, len(DexMapList.Seq)):
index = DexMapList.Seq[i]
if index in self.dexmapitem.keys():
print(index, "start at:", file.tell())
if index != 0x1000:
self.dexmapitem[index].copytofile(file)
else:
self.copytofile(file)
def copytofile(self, file):
print("output map list", file.tell())
file.seek(self.start, 0)
file.write(struct.pack("I", self.size))
for i in range(0, len(DexMapList.Seq)):
index = DexMapList.Seq[i]
if index in self.dexmapitem.keys():
# print(self.dexmapitem[index].type)
file.write(struct.pack("H", self.dexmapitem[index].type))
file.write(struct.pack("H", self.dexmapitem[index].unused))
file.write(struct.pack("I", self.dexmapitem[index].size))
file.write(struct.pack("I", self.dexmapitem[index].offset))
def makeoff(self):
off = 0
for i in range(0, len(DexMapList.Seq)):
index = DexMapList.Seq[i]
if index in self.dexmapitem.keys():
align = off % 4
if align != 0:
off += (4 - align)
if index != 0x1000:
off = self.dexmapitem[index].makeoffset(off)
else:
off = self.makeoffset(off)
return off
def makeoffset(self, off):
self.start = off
off += (4 + self.size * 12)
self.dexmapitem[0x1000].offset = self.start
return off
def getreference(self):
self.dexmapitem[1].getref(self.dexmapitem)
print("string id get ref done")
self.dexmapitem[3].getref(self.dexmapitem)
print("proto id get ref done")
self.dexmapitem[6].getref(self.dexmapitem)
print("class def get ref done")
if 0x1002 in self.dexmapitem.keys():
self.dexmapitem[0x1002].getref(self.dexmapitem)
print("annotation set ref get ref done")
if 0x1003 in self.dexmapitem.keys():
self.dexmapitem[0x1003].getref(self.dexmapitem)
print("annotation set get ref done")
# self.dexmapitem[0x2000].getref(self.dexmapitem)
self.dexmapitem[0x2001].getref(self.dexmapitem)
print("code item get ref done")
if 0x2006 in self.dexmapitem.keys():
self.dexmapitem[0x2006].getref(self.dexmapitem)
print("annotation dir item get ref done")
def getrefbystr(self, str):
return self.dexmapitem[0x2002].getrefbystr(str)
def printf(self, index):
print ("DexMapList:")
print ("size: ", self.size)
for i in self.dexmapitem:
self.dexmapitem[i].printf(index)
class DexMapItem:
Constant = {0: 'TYPE_HEADER_ITEM', 1: 'TYPE_STRING_ID_ITEM', 2: 'TYPE_TYPE_ID_ITEM',
3: 'TYPE_PROTO_ID_ITEM', 4: 'TYPE_FIELD_ID_ITEM', 5: 'TYPE_METHOD_ID_ITEM',
6: 'TYPE_CLASS_DEF_ITEM', 0x1000: 'TYPE_MAP_LIST', 0x1001: 'TYPE_TYPE_LIST',
0x1002: 'TYPE_ANNOTATION_SET_REF_LIST', 0x1003: 'TYPE_ANNOTATION_SET_ITEM',
0x2000: 'TYPE_CLASS_DATA_ITEM', 0x2001: 'TYPE_CODE_ITEM', 0x2002: 'TYPE_STRING_DATA_ITEM',
0x2003: 'TYPE_DEBUG_INFO_ITEM', 0x2004: 'TYPE_ANNOTATION_ITEM', 0x2005: 'TYPE_ENCODED_ARRAY_ITEM',
0x2006: 'TYPE_ANNOTATIONS_DIRECTORY_ITEM'}
def __init__(self):
self.type = 0
self.unused = 0
self.size = 0
self.offset = 0
self.item = []
self.len = 0
def dump(self, addr):
self.type = getWord(addr)
self.unused = getWord(addr + 2)
self.size = getDword(addr + 4)
self.offset = getDword(addr + 8)
def copytofile(self, file):
file.seek(self.offset, 0)
if self.type <= 0x2006:
align = file.tell() % 4
if align != 0:
for i in range(0, 4-align):
file.write(struct.pack("B", 0))
print("copytofile:", DexMapItem.Constant[self.type], file.tell())
for i in range(0, self.size):
if self.type == 0x2000:
print("index, offset", i, hex(self.item[i].start), self.item[i].static_field_size, self.item[i].instance_fields_size, self.item[i].direct_methods_size, self.item[i].virtual_methods_size)
self.item[i].copytofile(file)
# if self.type == 0x2002:
# print("for debug", i, getstr(self.item[i].str))
def printf(self):
print ("type: ", DexMapItem.Constant[self.type])
print ("size: ", self.size)
print ("offset: ", hex(self.offset), hex(self.offset + baseAddr))
# if self.type == index:
# for i in range(0, self.size):
# self.item[i].printf()
# print ()
def setitem(self, dexmapitem):
self.printf()
addr = baseAddr + self.offset
for i in range(0, self.size):
if self.type == 1: # string
dexstringid = DexStringID()
dexstringid.dump(addr + 4 * i)
self.item.append(dexstringid)
elif self.type == 2:
dextypeid = DexTypeID()
dextypeid.dump(addr + 4 * i, dexmapitem[1].item)
self.item.append(dextypeid) # make sure has already build string table
elif self.type == 3:
dexprotoid = DexProtoId()
dexprotoid.dump(addr + 12 * i, dexmapitem[1].item, dexmapitem[2].item)
self.item.append(dexprotoid)
elif self.type == 4:
dexfieldid = DexFieldId()
dexfieldid.dump(addr + 8 * i, dexmapitem[1].item, dexmapitem[2].item)
self.item.append(dexfieldid)
elif self.type == 5:
dexmethodid = DexMethodId()
dexmethodid.dump(addr + 8 * i, dexmapitem[1].item, dexmapitem[2].item)
self.item.append(dexmethodid)
elif self.type == 6:
dexclassdef = DexClassDef()
dexclassdef.dump(addr + 32 * i, dexmapitem[1].item, dexmapitem[2].item)
self.item.append(dexclassdef)
elif self.type == 0x1001: # TYPE_TYPE_LIST
typeitem = TypeItem()
typeitem.dump(addr, dexmapitem[2].item)
addr += typeitem.len
self.item.append(typeitem)
elif self.type == 0x1002: # TYPE_ANNOTATION_SET_REF_LIST
annoitem = AnnotationsetrefList()
annoitem.dump(addr)
addr += annoitem.len
self.item.append(annoitem)
elif self.type == 0x1003: # TYPE_ANNOTATION_SET_ITEM
annoitem = AnnotationsetItem()
annoitem.dump(addr)
addr += annoitem.len
self.item.append(annoitem)
elif self.type == 0x2000: # TYPE_CLASS_DATA_ITEM
classitem = ClassdataItem()
classitem.dump(addr, dexmapitem[0x2001].item)
addr += classitem.len
self.item.append(classitem)
# elif self.type == 0x2001: # TYPE_CODE_ITEM
# codeitem = CodeItem()
# codeitem.dump(addr)
# addr += codeitem.len
# self.item.append(codeitem)
elif self.type == 0x2002: # TYPE_STRING_DATA_ITEM
stringdata = StringData()
stringdata.dump(addr)
addr += stringdata.len
self.item.append(stringdata)
elif self.type == 0x2003: # TYPE_DEBUG_INFO_ITEM
debuginfo = DebugInfo()
debuginfo.dump(addr)
addr += debuginfo.len
self.item.append(debuginfo)
elif self.type == 0x2004: # TYPE_ANNOTATION_ITEM
item = AnnotationItem()
item.dump(addr)
addr += item.len
self.item.append(item)
elif self.type == 0x2005: # TYPE_ENCODED_ARRAY_ITEM
arrayitem = EncodedArrayItem()
arrayitem.dump(addr)
addr += arrayitem.len
self.item.append(arrayitem)
elif self.type == 0x2006: # TYPE_ANNOTATIONS_DIRECTORY_ITEM
dirItem = AnnotationsDirItem()
dirItem.dump(addr)
addr += dirItem.len
self.item.append(dirItem)
def makeoffset(self, off):
if self.type < 0x2000 or self.type == 0x2001 or self.type == 0x2006:
align = off % 4
if align != 0:
off += (4 - align)
self.offset = off
if self.type == 0: # header
self.len = 112
elif self.type == 1: # string id
self.len = 4 * self.size
elif self.type == 2: # type id
self.len = 4 * self.size
elif self.type == 3: # proto id
self.len = 12 * self.size
elif self.type == 4: # field id
self.len = 8 * self.size
elif self.type == 5: # method id
self.len = 8 * self.size
elif self.type == 6: # class def
self.len = 32 * self.size
elif self.type == 0x1000: # map list, resolve specially in dexmaplist class
pass
elif 0x1001 <= self.type <= 0x2006: # type list, annotation ref set list, annotation set item...
for i in range(0, self.size):
off = self.item[i].makeoffset(off)
# if self.type == 0x2002:
# print("for debug", i, off)
self.len = off - self.offset
if self.type == 0x2000:
print("the off is:", off)
if self.type <= 6:
return off + self.len
else:
return off
def getref(self, dexmaplist):
for i in range(0, self.size):
self.item[i].getreference(dexmaplist)
def getreference(self, offset): # offset
if offset == 0:
return None
i = 0
for i in range(0, self.size):
if self.item[i].start == offset + baseAddr:
return self.item[i]
# if i >= self.size:
# os._exit(offset)
print("failed : don not find the refernce")
return None
def getrefbystr(self, str): # for modify the string data
if self.type == 0x2002:
for i in range(0, self.size):
if getstr(self.item[i].str) == str:
return self.item[i]
else:
print("error occur here", self.type)
return None
def getindexbyname(self, str): # search for type id item
for i in range(0, self.size):
if self.item[i].str == str:
print("find index of", DexMapItem.Constant[self.type], str)
return i
print("did not find it in", DexMapItem.Constant[self.type])
return -1
def getindexbyproto(self, short_idx, return_type_idx, param_list, length): # called by item, index of 3
for i in range(0, self.size):
if short_idx == self.item[i].shortyIdx and return_type_idx == self.item[i].returnTypeIdx:
if self.item[i].ref is not None:
if self.item[i].ref.equal(param_list, length):
return i
return -1
class DexStringID:
def __init__(self):
self.stringDataOff = 0
self.size = 0
self.str = ""
self.ref = None
def dump(self, addr):
self.stringDataOff = getDword(addr)
self.size, len = readunsignedleb128(self.stringDataOff + baseAddr)
self.str = getutf8str(self.stringDataOff + len + baseAddr)
def copytofile(self, file):
# self.stringDataoff = self.ref.start
file.write(struct.pack("I", self.ref.start))
def getreference(self, dexmaplist):
self.ref = dexmaplist[0x2002].getreference(self.stringDataOff)
# if self.ref is not None:
# self.ref.printf()
def printf(self):
print ("size: ", self.size, " str: ", self.str, "dataof: ", self.stringDataOff)
class DexTypeID:
def __init__(self):
self.descriptorIdx = 0
self.str = ""
def dump(self, addr, str_table):
self.descriptorIdx = getDword(addr)
self.str = str_table[self.descriptorIdx].str
def copytofile(self, file):
file.write(struct.pack("I", self.descriptorIdx))
def printf(self):
print ("type id: ", self.str)
class DexProtoId:
def __init__(self):
self.shortyIdx = 0
self.returnTypeIdx = 0
self.parametersOff = 0
self.name = ""
self.returnstr = ""
self.ref = None
def dump(self, addr, str_table, type_table):
self.shortyIdx = getDword(addr)
self.returnTypeIdx = getDword(addr + 4)
self.parametersOff = getDword(addr + 8)
self.name = str_table[self.shortyIdx].str
self.returnstr = type_table[self.returnTypeIdx].str
def copytofile(self, file):
file.write(struct.pack("I", self.shortyIdx))
file.write(struct.pack("I", self.returnTypeIdx))
if self.ref is not None:
file.write(struct.pack("I", self.ref.start))
else:
file.write(struct.pack("I", 0))
def getreference(self, dexmaplist):
self.ref = dexmaplist[0x1001].getreference(self.parametersOff)
def printf(self):
print ("return Type:", self.returnstr)
print ("methodname:", self.name)
if self.ref is not None:
self.ref.printf()
class DexFieldId:
def __init__(self):
self.classIdx = 0
self.typeIdx = 0
self.nameIdx = 0
self.classstr = ""
self.typestr = ""
self.name = ""
def dump(self, addr, str_table, type_table):
self.classIdx = getWord(addr)
self.typeIdx = getWord(addr + 2)
self.nameIdx = getDword(addr + 4)
self.classstr = type_table[self.classIdx].str
self.typestr = type_table[self.typeIdx].str
self.name = str_table[self.nameIdx].str
def copytofile(self, file):
file.write(struct.pack("H", self.classIdx))
file.write(struct.pack("H", self.typeIdx))
file.write(struct.pack("I", self.nameIdx))
def printf(self):
print ("classstr:", self.classstr)
print ("typestr:", self.typestr)
print ("name:", self.name)
print ()
class DexMethodId:
def __init__(self):
self.classIdx = 0
self.protoIdx = 0
self.nameIdx = 0
self.classstr = ""
self.name = ""
def dump(self, addr, str_table, type_table):
self.classIdx = getWord(addr)
self.protoIdx = getWord(addr + 2)
self.nameIdx = getWord(addr + 4)
self.classstr = type_table[self.classIdx].str
self.name = str_table[self.nameIdx].str
def copytofile(self, file):
file.write(struct.pack("H", self.classIdx))
file.write(struct.pack("H", self.protoIdx))
file.write(struct.pack("I", self.nameIdx))
def printf(self):
print ("classstr:", self.classstr)
print ("name:", self.name)
print ()
class DexClassDef:
def __init__(self):
self.classIdx = 0
self.accessFlags = 0
self.superclassIdx = 0
self.interfacesOff = 0
self.sourceFileIdx = 0
self.annotationsOff = 0
self.classDataOff = 0
self.staticValuesOff = 0
self.classstr = ""
self.superclassstr = ""
self.sourceFilestr = ""
self.interfacesRef = None
self.annotationsRef = None
self.classDataRef = None
self.staticValuesRef = None
def dump(self, addr, str_table, type_table):
self.classIdx = getDword(addr)
self.accessFlags = getDword(addr + 4)
self.superclassIdx = getDword(addr + 8)
self.interfacesOff = getDword(addr + 12)
self.sourceFileIdx = getDword(addr + 16)
self.annotationsOff = getDword(addr + 20)
self.classDataOff = getDword(addr + 24)
self.staticValuesOff = getDword(addr + 28)
self.classstr = type_table[self.classIdx].str
self.superclassstr = type_table[self.superclassIdx].str
if self.sourceFileIdx == 0xFFFFFFFF:
self.sourceFilestr = "NO_INDEX"
else:
self.sourceFilestr = str_table[self.sourceFileIdx].str
# get class data reference by its name,e.g. Lcom/cc/test/MainActivity;
def getclassdefref(self, str):
if self.classstr == str and self.classDataOff > 0:
return self.classDataRef
return None
def copytofile(self, file):
file.write(struct.pack("I", self.classIdx))
file.write(struct.pack("I", self.accessFlags))
file.write(struct.pack("I", self.superclassIdx))
if self.interfacesRef is not None:
file.write(struct.pack("I", self.interfacesRef.start))
# print(self.interfacesRef.start)
else:
file.write(struct.pack("I", 0))
file.write(struct.pack("I", self.sourceFileIdx))
if self.annotationsRef is not None:
file.write(struct.pack("I", self.annotationsRef.start))
# print(self.annotationsRef.start)
else:
file.write(struct.pack("I", 0))
if self.classDataRef is not None:
file.write(struct.pack("I", self.classDataRef.start))
else:
file.write(struct.pack("I", 0))
if self.staticValuesRef is not None:
file.write(struct.pack("I", self.staticValuesRef.start))
else:
file.write(struct.pack("I", 0))
def getreference(self, dexmaplist):
self.interfacesRef = dexmaplist[0x1001].getreference(self.interfacesOff)
if 0x2006 in dexmaplist.keys():
self.annotationsRef = dexmaplist[0x2006].getreference(self.annotationsOff)
self.classDataRef = dexmaplist[0x2000].getreference(self.classDataOff)
if 0x2005 in dexmaplist.keys():
self.staticValuesRef = dexmaplist[0x2005].getreference(self.staticValuesOff)
def printf(self):
print ("classtype:", self.classIdx, self.classstr)
print("access flag:", self.accessFlags)
print ("superclasstype:", self.superclassIdx, self.superclassstr)
print ("iterface off", self.interfacesOff)
print("source file index", self.sourceFilestr)
print("annotations off", self.annotationsOff)
print("class data off", self.classDataOff)
print("static values off", self.staticValuesOff)
if self.interfacesRef is not None:
self.interfacesRef.printf()
if self.annotationsRef is not None:
self.annotationsRef.printf()
if self.classDataRef is not None:
self.classDataRef.printf()
if self.staticValuesRef is not None:
self.staticValuesRef.printf()
class TypeItem: # alignment: 4 bytes
def __init__(self):
self.start = 0
self.size = 0
self.list = []
self.str = []
self.len = 0
def dump(self, addr, type_table):
self.start = addr
self.size = getDword(addr)
self.len = 4 + 2 * self.size
for i in range(0, self.size):
self.list.append(getWord(addr + 4 + 2 * i))
self.str.append(type_table[self.list[i]].str)
if self.size % 2 == 1:
getWord(addr + 4 + 2 * self.size)
self.len += 2
def copytofile(self, file):
file.write(struct.pack("I", self.size))
for i in range(0, self.size):
file.write(struct.pack("H", self.list[i]))
if self.size % 2 == 1:
file.write(struct.pack("H", 0))
def equal(self, param_list, length):
if length != self.size:
return False
for i in range(0, self.size):
if param_list[i] != self.str[i]:
return False
return True
def makeoffset(self, off):
align = off % 4
if align != 0:
off += (4 - align)
self.len = 4 + 2 * self.size
self.start = off
return off + self.len
def printf(self):
for i in range(0, self.size):
print (self.list[i], self.str[i])
# alignment: 4bytes
class AnnotationsetItem:
def __init__(self):
self.start = 0
self.len = 0
self.size = 0
self.entries = []
self.ref = []
def dump(self, addr):
self.start = addr
self.size = getDword(addr)
self.len = 4 + 4 * self.size
for i in range(0, self.size):
self.entries.append(getDword(addr + 4 + 4 * i))
def copytofile(self, file):
file.write(struct.pack("I", self.size))
for i in range(0, self.size):
file.write(struct.pack("I", self.ref[i].start))
def makeoffset(self, off):
align = off % 4
if align != 0:
off += (4 - align)
self.start = off
self.len = 4 + 4 * self.size
return off + self.len
def getreference(self, dexmaplist):
for i in range(0, self.size):
self.ref.append(dexmaplist[0x2004].getreference(self.entries[i]))
def printf(self):
print ("size: ", self.size)
# alignment: 4bytes
class AnnotationsetrefList:
def __init__(self):
self.start = 0
self.size = 0
self.list = [] # annotaions_off, offset of annotation_set_item
self.ref = []
self.len = 0
def dump(self, addr):
self.start = addr
self.size = getDword(addr)
self.len = 4 + 4 * self.size
for i in range(0, self.size):
self.list.append(getDword(addr + 4 + 4 * i))
def copytofile(self, file):
file.write(struct.pack("I", self.size))
for i in range(0, self.size):
if self.ref[i] is not None:
file.write(struct.pack("I", self.ref[i].start))
else:
file.write(struct.pack("I", 0))
def makeoffset(self, off):
align = off % 4
if align != 0:
off += (4 - align)
self.start = off
self.len = 4 + 4 * self.size
return off + self.len
def getreference(self, dexmaplist):
for i in range(0, self.size):
self.ref.append(dexmaplist[0x1003].getreference(self.list[i]))
def printf(self):
print ("size: ", self.size)
# alignment:none
class ClassdataItem:
def __init__(self):
self.start = 0
self.len = 0
self.static_field_size = 0
self.instance_fields_size = 0
self.direct_methods_size = 0
self.virtual_methods_size = 0
self.static_fields = []
self.instance_fields = []
self.direct_methods = []
self.virtual_methods = []
def dump(self, addr, code_table):
self.start = addr
self.static_field_size, length = readunsignedleb128(addr)
self.len += length
self.instance_fields_size, length = readunsignedleb128(addr + self.len)
self.len += length
self.direct_methods_size, length = readunsignedleb128(addr + self.len)
self.len += length
self.virtual_methods_size, length = readunsignedleb128(addr + self.len)
self.len += length
print("class item", self.static_field_size, self.instance_fields_size, self.direct_methods_size, self.virtual_methods_size)
for i in range(0, self.static_field_size):
field = Encodedfield()
field.dump(addr + self.len)
self.len += field.len
self.static_fields.append(field)
for i in range(0, self.instance_fields_size):
field = Encodedfield()
field.dump(addr + self.len)
self.len += field.len
self.instance_fields.append(field)
for i in range(0, self.direct_methods_size):
method = Encodedmethod()
method.dump(addr + self.len, code_table)
self.len += method.len
self.direct_methods.append(method)
for i in range(0, self.virtual_methods_size):
method = Encodedmethod()
method.dump(addr + self.len, code_table)
self.len += method.len
self.virtual_methods.append(method)
def copytofile(self, file):
file.seek(self.start)
writeunsignedleb128(self.static_field_size, file)
writeunsignedleb128(self.instance_fields_size, file)
writeunsignedleb128(self.direct_methods_size, file)
writeunsignedleb128(self.virtual_methods_size, file)
for i in range(0, self.static_field_size):
self.static_fields[i].copytofile(file)
for i in range(0, self.instance_fields_size):
self.instance_fields[i].copytofile(file)
for i in range(0, self.direct_methods_size):
self.direct_methods[i].copytofile(file)
for i in range(0, self.virtual_methods_size):
self.virtual_methods[i].copytofile(file)
# besides adding refenrence, also need to set the correct index
def getreference(self, dexmaplist):
last = 0
for i in range(0, self.static_field_size):
self.static_fields[i].field_idx = last + self.static_fields[i].field_idx_diff
last = self.static_fields[i].field_idx
last = 0
for i in range(0, self.instance_fields_size):
self.instance_fields[i].field_idx = last + self.instance_fields[i].field_idx_diff
last = self.instance_fields[i].field_idx
last = 0
for i in range(0, self.direct_methods_size):
self.direct_methods[i].getreference(dexmaplist)
self.direct_methods[i].method_idx = last + self.direct_methods[i].method_idx_diff
last = self.direct_methods[i].method_idx
last = 0
for i in range(0, self.virtual_methods_size):
self.virtual_methods[i].getreference(dexmaplist)
self.virtual_methods[i].method_idx = last + self.virtual_methods[i].method_idx_diff
last = self.virtual_methods[i].method_idx
def makeoffset(self, off):
self.start = off
off += unsignedleb128forlen(self.static_field_size)
off += unsignedleb128forlen(self.instance_fields_size)
off += unsignedleb128forlen(self.direct_methods_size)
off += unsignedleb128forlen(self.virtual_methods_size)
for i in range(0, self.static_field_size):
off = self.static_fields[i].makeoffset(off)
for i in range(0, self.instance_fields_size):
off = self.instance_fields[i].makeoffset(off)
for i in range(0, self.direct_methods_size):
off = self.direct_methods[i].makeoffset(off)
for i in range(0, self.virtual_methods_size):
off = self.virtual_methods[i].makeoffset(off)
self.len = off - self.start
return off
def printf(self):
print ("static field size: ", self.static_field_size)
print ("instance fields size: ", self.instance_fields_size)
print ("direct methods size: ", self.direct_methods_size)
print ("virtual methods size: ", self.virtual_methods_size)
for i in range(0, self.static_field_size):
self.static_fields[i].printf()
for i in range(0, self.instance_fields_size):
self.instance_fields[i].printf()
for i in range(0, self.direct_methods_size):
self.direct_methods[i].printf()
for i in range(0, self.virtual_methods_size):
self.virtual_methods[i].printf()
class Encodedfield:
def __init__(self):
self.start = 0
self.len = 0
self.field_idx_diff = 0
self.access_flags = 0
self.field_idx = 0 # need to set later
def dump(self, addr):
self.start = addr
self.field_idx_diff, length = readunsignedleb128(addr)
self.len += length
self.access_flags, length = readunsignedleb128(addr + self.len)
self.len += length
def __lt__(self, other): # for sort
return self.field_idx_diff < other.field_idx_diff
def copytofile(self, file):
writeunsignedleb128(self.field_idx_diff, file)
writeunsignedleb128(self.access_flags, file)
def makeoffset(self, off):
self.start = off
self.len += unsignedleb128forlen(self.field_idx_diff)
self.len += unsignedleb128forlen(self.access_flags)
return off + self.len
def printf(self):
print ("diff: ", self.field_idx_diff)
print ("access: ", self.access_flags)
class Encodedmethod:
def __init__(self):
self.start = 0
self.len = 0
self.method_idx_diff = 0
self.access_flags = 0
self.code_off = 0
self.method_idx = 0
self.coderef = None
self.modified = 0 # if set this var, means that code_off will moodified to zero
def dump(self, addr, code_table):
self.start = addr
self.method_idx_diff, length = readunsignedleb128(addr)
self.len += length
self.access_flags, length = readunsignedleb128(addr + self.len)
self.len += length
self.code_off, length = readunsignedleb128(addr + self.len)
self.len += length
if self.code_off != 0:
self.coderef = CodeItem()
self.coderef.dump(int(self.code_off + baseAddr) & 0xFFFFFFFF)
code_table.append(self.coderef)
def copytofile(self, file):
writeunsignedleb128(self.method_idx_diff, file)
writeunsignedleb128(self.access_flags, file)
if self.modified == 1:
writeunsignedleb128(0, file)
elif self.coderef is not None:
writeunsignedleb128(self.coderef.start, file)
else:
writeunsignedleb128(0, file)
def makeoffset(self, off):
self.start = off
self.len += unsignedleb128forlen(self.method_idx_diff)
self.len += unsignedleb128forlen(self.access_flags)
if self.modified == 1:
self.len += unsignedleb128forlen(0)
elif self.coderef is not None:
self.len += unsignedleb128forlen(self.coderef.start)
else:
self.len += unsignedleb128forlen(0)
return off + self.len
def getreference(self, dexmaplist):
self.coderef = dexmaplist[0x2001].getreference(self.code_off)
def printf(self):
print ("method_idx_diff: ", self.method_idx_diff)
print("method idx:", self.method_idx)
print ("access: ", self.access_flags)
print ("code off: ", self.code_off)
# alignment: 4bytes
class CodeItem:
def __init__(self):
self.start = 0
self.len = 0
self.register_size = 0
self.ins_size = 0
self.outs_size = 0
self.tries_size = 0
self.debug_info_off = 0
self.insns_size = 0
self.insns = []
self.debugRef = None
self.padding = 0
self.tries = []
self.handler = None
def dump(self, addr):
self.start = addr
self.register_size = getWord(addr)
self.ins_size = getWord(addr + 2)
self.outs_size = getWord(addr + 4)
self.tries_size = getWord(addr + 6)
self.debug_info_off = getDword(addr + 8)
self.insns_size = getDword(addr + 12)
self.len += 16
print(self.start, self.register_size, self.ins_size, self.outs_size, self.tries_size, self.debug_info_off, self.insns_size)
for i in range(0, self.insns_size):
self.insns.append(getWord(addr + self.len + 2 * i))
self.len += 2 * self.insns_size
if self.tries_size != 0 and self.insns_size % 2 == 1:
self.len += 2
for i in range(0, self.tries_size):
tryitem = TryItem()
tryitem.dump(addr + self.len + 8 * i)
self.tries.append(tryitem)
self.len += 8 * self.tries_size
if self.tries_size != 0:
self.handler = EncodedhandlerList()
self.handler.dump(addr + self.len)
self.len += self.handler.len
align = self.len % 4
if align != 0:
self.len += (4 - align)
def copytofile(self, file):
file.seek(self.start, 0)
file.write(struct.pack("H", self.register_size))
file.write(struct.pack("H", self.ins_size))
file.write(struct.pack("H", self.outs_size))
file.write(struct.pack("H", self.tries_size))
if self.debugRef is not None:
file.write(struct.pack("I", self.debugRef.start))
else:
file.write(struct.pack("I", 0))
file.write(struct.pack("I", self.insns_size))
for i in range(0, self.insns_size):
file.write(struct.pack("H", self.insns[i]))
if self.tries_size != 0 and self.insns_size % 2 == 1:
file.write(struct.pack("H", self.padding))
for i in range(0, self.tries_size):
self.tries[i].copytofile(file)
if self.tries_size != 0:
self.handler.copytofile(file)
align = file.tell() % 4 # for alignment
if align != 0:
for i in range(0, 4-align):
file.write(struct.pack("B", 0))
# print("code item addr:", file.tell())
def makeoffset(self, off):
align = off % 4
if align != 0:
off += (4 - align)
self.start = off
off += (4 * 2 + 2 * 4) # 4 ushort and 2 uint
off += (2 * self.insns_size)
if self.tries_size != 0 and self.insns_size % 2 == 1: # for padding
off += 2
for i in range(0, self.tries_size):
off = self.tries[i].makeoffset(off)
if self.tries_size != 0:
off = self.handler.makeoffset(off)
self.len = off - self.start
return off
def getreference(self, dexmaplist):
self.debugRef = dexmaplist[0x2003].getreference(self.debug_info_off)
def printf(self):
print("registers_size:", self.register_size)
print("ins_size, outs_size, tries_size:", self.ins_size, self.outs_size, self.tries_size)
print("debug info of:", self.debug_info_off)
print("insn_size:", self.insns_size)
for i in range(0, self.insns_size):
print(self.insns[i])
# tmp = Instruction.InstructionSet(self.insns)
# tmp.printf()
class TryItem:
def __init__(self):
self.start = 0
self.len = 8
self.start_addr = 0
self.insn_count = 0
self.handler_off = 0
def dump(self, addr):
self.start = addr
self.start_addr = getDword(addr)
self.insn_count = getWord(addr + 4)
self.handler_off = getWord(addr + 6)
def copytofile(self, file):
file.write(struct.pack("I", self.start_addr))
file.write(struct.pack("H", self.insn_count))
file.write(struct.pack("H", self.handler_off))
def makeoffset(self, off):
self.start = off
self.len = 4 + 2 + 2
return off + self.len
def printf(self):
print ("start_Addr: ", self.start_addr)
print ("insn_count: ", self.insn_count)
print ("handler_off: ", self.handler_off)
print ()
class EncodedhandlerList:
def __init__(self):
self.start = 0
self.len = 0
self.size = 0
self.list = []
def dump(self, addr):
self.start = addr
self.size, length = readunsignedleb128(addr)
self.len += length
for i in range(0, self.size):
handler = EncodedhandlerItem()
handler.dump(addr + self.len)
self.len += handler.len
self.list.append(handler)
def copytofile(self, file):
file.seek(self.start, 0)
writeunsignedleb128(self.size, file)
for i in range(0, self.size):
self.list[i].copytofile(file)
def makeoffset(self, off):
self.start = off
off += unsignedleb128forlen(self.size)
for i in range(0, self.size):
off = self.list[i].makeoffset(off)
return off
class EncodedhandlerItem:
def __init__(self):
self.start = 0
self.len = 0
self.size = 0
self.handlers = []
self.catch_all_addr = 0
def dump(self, addr):
self.start = addr
self.size, length = readsignedleb128(addr)
self.len += length
for i in range(0, abs(self.size)):
pair = EncodedTypeAddrPair()
pair.dump(addr + self.len)
self.len += pair.len
self.handlers.append(pair)
if self.size <= 0:
self.catch_all_addr, length = readunsignedleb128(addr + self.len)
self.len += length
def copytofile(self, file):
writesignedleb128(self.size, file)
for i in range(0, abs(self.size)):
self.handlers[i].copytofile(file)
if self.size <= 0:
writeunsignedleb128(self.catch_all_addr, file)
def makeoffset(self, off):
self.start = off
off += signedleb128forlen(self.size)
for i in range(0, abs(self.size)):
off = self.handlers[i].makeoffset(off)
if self.size <= 0:
off += unsignedleb128forlen(self.catch_all_addr)
self.len = off - self.start
return off
class EncodedTypeAddrPair:
def __init__(self):
self.type_idx = 0
self.addr = 0
self.len = 0
def dump(self, addr):
self.type_idx, length = readunsignedleb128(addr)
self.len += length
self.addr, length = readunsignedleb128(addr + length)
self.len += length
def copytofile(self, file):
writeunsignedleb128(self.type_idx, file)
writeunsignedleb128(self.addr, file)
def makeoffset(self, off):
off += unsignedleb128forlen(self.type_idx)
off += unsignedleb128forlen(self.addr)
return off
def printf(self):
print ("type idx: ", self.type_idx)
print ("addr: ", self.addr)
print ()
class StringData:
def __init__(self):
self.start = 0
self.len = 0
self.size = 0
self.str = []
def dump(self, addr):
self.start = addr
self.size, length = readunsignedleb128(addr)
self.len += length
while 1:
onebyte = getByte(addr + self.len)
self.len += 1
if onebyte == 0:
break
self.str.append(onebyte)
def copytofile(self, file):
writeunsignedleb128(self.size, file)
for i in range(0, len(self.str)):
file.write(struct.pack("B", self.str[i]))
file.write(struct.pack("B", 0))
def makeoffset(self, off):
self.start = off
self.len = len(self.str) + unsignedleb128forlen(self.size)
return off + self.len + 1 # 1 byte for '\0'
def modify(self, str):
self.size = len(str)
self.str = bytearray(str)
def printf(self):
print (getstr(self.str))
# alignment: none
class DebugInfo:
def __init__(self):
self.start = 0
self.len = 0
self.line_start = 0
self.parameters_size = 0
self.parameter_names = []
self.debug = []
def dump(self, addr):
self.start = addr
self.line_start, length = readunsignedleb128(addr)
self.len += length
self.parameters_size, length = readunsignedleb128(addr + self.len)
self.len += length
for i in range(0, self.parameters_size):
num, length = readunsignedleb128p1(addr + self.len)
self.len += length
self.parameter_names.append(num)
while 1:
onebyte = getByte(addr + self.len)
self.len += 1
self.debug.append(onebyte)
if onebyte == 0:
break
elif onebyte == 1 or onebyte == 5 or onebyte == 6:
num, length = readunsignedleb128(addr + self.len)
self.len += length
self.debug.append(num)
elif onebyte == 2:
num, length = readsignedleb128(addr + self.len)
self.len += length
self.debug.append(num)
elif onebyte == 3:
num, length = readunsignedleb128(addr + self.len)
self.len += length
self.debug.append(num)
num, length = readunsignedleb128p1(addr + self.len)
self.len += length
self.debug.append(num)
num, length = readunsignedleb128p1(addr + self.len)
self.len += length
self.debug.append(num)
elif onebyte == 4:
num, length = readunsignedleb128(addr + self.len)
self.len += length
self.debug.append(num)
num, length = readunsignedleb128p1(addr + self.len)
self.len += length
self.debug.append(num)
num, length = readunsignedleb128p1(addr + self.len)
self.len += length
self.debug.append(num)
num, length = readunsignedleb128p1(addr + self.len)
self.len += length
self.debug.append(num)
elif onebyte == 9:
num, length = readunsignedleb128p1(addr + self.len)
self.len += length
self.debug.append(num)
def adddebugitem(self, linestart, paramsize, names_list, debug_list):
self.line_start = linestart
self.parameters_size = paramsize
self.parameter_names = names_list
self.debug = debug_list
def copytofile(self, file):
file.seek(self.start, 0)
writeunsignedleb128(self.line_start, file)
writeunsignedleb128(self.parameters_size, file)
for i in range(0, self.parameters_size):
# print(self.parameter_names[i])
# if i == self.parameters_size-1:
# writeunsignedleb128p1alignshort(self.parameter_names[i], file)
# else:
writeunsignedleb128p1(self.parameter_names[i], file)
index = 0
while 1:
onebyte = self.debug[index]
file.write(struct.pack("B", onebyte))
index += 1
if onebyte == 0:
break
elif onebyte == 1:
writeunsignedleb128(self.debug[index], file)
index += 1
elif onebyte == 2:
writesignedleb128(self.debug[index], file)
index += 1
elif onebyte == 3:
writeunsignedleb128(self.debug[index], file)
writeunsignedleb128p1(self.debug[index+1], file)
writeunsignedleb128p1(self.debug[index+2], file)
index += 3
elif onebyte == 4:
writeunsignedleb128(self.debug[index], file)
writeunsignedleb128p1(self.debug[index+1], file)
writeunsignedleb128p1(self.debug[index+2], file)
writeunsignedleb128p1(self.debug[index+3], file)
index += 4
elif onebyte == 5:
writeunsignedleb128(self.debug[index], file)
index += 1
elif onebyte == 6:
writeunsignedleb128(self.debug[index], file)
index += 1
elif onebyte == 9:
writeunsignedleb128p1(self.debug[index], file)
index += 1
def printf(self):
print(self.line_start, self.parameters_size)
def makeoffset(self, off):
self.start = off
off += unsignedleb128forlen(self.line_start)
off += unsignedleb128forlen(self.parameters_size)
for i in range(0, self.parameters_size):
off += unsignedleb128p1forlen(self.parameter_names[i])
index = 0
while 1:
onebyte = self.debug[index]
off += 1
index += 1
if onebyte == 0:
break
elif onebyte == 1:
off += unsignedleb128forlen(self.debug[index])
index += 1
elif onebyte == 2:
off += signedleb128forlen(self.debug[index])
index += 1
elif onebyte == 3:
off += unsignedleb128forlen(self.debug[index])
off += unsignedleb128p1forlen(self.debug[index+1])
off += unsignedleb128p1forlen(self.debug[index+2])
index += 3
elif onebyte == 4:
off += unsignedleb128forlen(self.debug[index])
off += unsignedleb128p1forlen(self.debug[index+1])
off += unsignedleb128p1forlen(self.debug[index+2])
off += unsignedleb128p1forlen(self.debug[index+3])
index += 4
elif onebyte == 5:
off += unsignedleb128forlen(self.debug[index])
index += 1
elif onebyte == 6:
off += unsignedleb128forlen(self.debug[index])
index += 1
elif onebyte == 9:
off += unsignedleb128p1forlen(self.debug[index])
index += 1
self.len = off - self.start
return off
# alignment: none
class AnnotationItem:
Visibity = {0: 'VISIBITITY_BUILD', 1: 'VISIBILITY_RUNTIME', 2: 'VISIBILITY_SYSTEM'}
def __init__(self):
self.start = 0
self.len = 0
self.visibility = 0
self.annotation = EncodedAnnotation()
def dump(self, addr):
self.start = addr
self.visibility = getByte(addr) # infile
self.annotation.dump(addr + 1)
self.len = self.annotation.len + 1
def copytofile(self, file):
file.write(struct.pack("B", self.visibility))
self.annotation.copytofile(file)
def makeoffset(self, off):
self.start = off
off += 1
off = self.annotation.makeoffset(off)
self.len = off - self.start
return off
class EncodedAnnotation:
def __init__(self):
self.len = 0
self.type_idx = 0
self.size = 0
self.elements = []
def dump(self, addr):
self.type_idx, length = readunsignedleb128(addr)
self.len += length
self.size, length = readunsignedleb128(addr + self.len)
self.len += length
self.elements = [] # annotation_element[size]
for i in range(0, self.size):
element = AnnotationElement()
element.dump(addr + self.len)
self.len += element.len
self.elements.append(element)
def copytofile(self, file):
writeunsignedleb128(self.type_idx, file)
writeunsignedleb128(self.size, file)
for i in range(0, self.size):
self.elements[i].copytofile(file)
def makeoffset(self, off):
off += unsignedleb128forlen(self.type_idx)
off += unsignedleb128forlen(self.size)
for i in range(0, self.size):
off = self.elements[i].makeoffset(off)
return off
class AnnotationElement:
def __init__(self):
self.len = 0
self.name_idx = 0
self.value = EncodedValue()
def dump(self, addr):
self.name_idx, length = readunsignedleb128(addr)
self.len += length
self.value.dump(addr + self.len)
self.len += self.value.len
def copytofile(self, file):
writeunsignedleb128(self.name_idx, file)
self.value.copytofile(file)
def makeoffset(self, off):
off += unsignedleb128forlen(self.name_idx)
off = self.value.makeoffset(off)
return off
class EncodedValue:
def __init__(self):
self.len = 0
self.onebyte = 0
self.type = 0
self.arg = 0
self.value = []
def dump(self, addr):
self.onebyte = getByte(addr)
self.type = self.onebyte & 0x1F
self.arg = (self.onebyte >> 5) & 0x7
if self.type == 0x00:
# print 'here 0x00 VALUE_BYTE in class : ' + str(curClass_idx)
if self.arg != 0:
print ("[-] Ca ,get error in VALUE_BYTE")
self.value.append(getByte(addr + 1))
self.len = 2
elif self.type == 0x02:
# print 'here 0x02 VALUE_SHORT in class : ' + str(curClass_idx)
if self.arg >= 2:
print ("[-] Ca ,get error in VALUE_SHORT at class : ")
for i in range(0, self.arg+1):
self.value.append(getByte(addr + 1 + i))
self.len = self.arg + 2
elif self.type == 0x03:
# print 'here 0x03 VALUE_CHAR in class : ' + str(curClass_idx)
for i in range(0, self.arg+1):
self.value.append(getByte(addr + 1 + i))
self.len = self.arg + 2
elif self.type == 0x04:
# print 'here 0x04 VALUE_INT in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_INT at class : ")
for i in range(0, self.arg+1):
self.value.append(getByte(addr + 1 + i))
self.len = self.arg + 2
elif self.type == 0x06:
# print 'here 0x06 VALUE_LONG in class : ' + str(curClass_idx)
if self.arg >= 8:
print ("[-] Ca ,get error in VALUE_LONG at class : ")
for i in range(0, self.arg+1):
self.value.append(getByte(addr + 1 + i))
self.len = self.arg + 2
elif self.type == 0x10:
# print 'here 0x10 VALUE_FLOAT in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_FLOAT at class : ")
for i in range(0, self.arg+1):
self.value.append(getByte(addr + 1 + i))
self.len = self.arg + 2
elif self.type == 0x11:
# print 'here 0x11 VALUE_DOUBLE in class : ' + str(curClass_idx)
if self.arg >= 8:
print ("[-] Ca ,get error in VALUE_DOUBLE at class : ")
for i in range(0, self.arg+1):
self.value.append(getByte(addr + 1 + i))
self.len = self.arg + 2
elif self.type == 0x17:
# print 'here 0x17 VALUE_STRING in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_STRING at class : ", hex(addr))
for i in range(0, self.arg+1):
self.value.append(getByte(addr + 1 + i))
self.len = self.arg + 2
elif self.type == 0x18:
# print 'here 0x18 VALUE_TYPE in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_TYPE at class : ")
for i in range(0, self.arg+1):
self.value.append(getByte(addr + 1 + i))
self.len = self.arg + 2
elif self.type == 0x19:
# print 'here 0x19 VALUE_FIELD in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_FIELD at class : ")
for i in range(0, self.arg+1):
self.value.append(getByte(addr + 1 + i))
self.len = self.arg + 2
elif self.type == 0x1a:
# print 'here 0x1a VALUE_METHOD in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_METHOD at class : ")
for i in range(0, self.arg+1):
self.value.append(getByte(addr + 1 + i))
self.len = self.arg + 2
elif self.type == 0x1b:
# print 'here 0x1b VALUE_ENUM in class : ' + str(curClass_idx)
if self.arg >= 4:
print ("[-] Ca ,get error in VALUE_ENUM at class : ")
for i in range(0, self.arg+1):
self.value.append(getByte(addr + 1 + i))
self.len = self.arg + 2
elif self.type == 0x1c:
# print 'here 0x1c VALUE_ARRAY in class : ' + str(curClass_idx)
if self.arg != 0x00:
print ("[-] Ca ,get error in VALUE_ARRAY")
array = EncodedArray()
array.dump(addr + 1)
self.len = array.len + 1
self.value.append(array)
elif self.type == 0x1d:
# print 'here 0x1d VALUE_ANNOTATION in class : ' + str(curClass_idx)
anno = EncodedAnnotation()
anno.dump(addr + 1)
self.len = anno.len + 1
self.value.append(anno)
else:
self.len = 1
# if case(0x1e):
# print 'here 0x1e VALUE_NULL in class : ' + str(curClass_idx)
# break
# if case(0x1f):
# print 'here 0x1f VALUE_BOOLEAN in class : ' + str(curClass_idx)
# break
def copytofile(self, file):
file.write(struct.pack("B", self.onebyte))
if self.type <= 0x1b:
for i in range(0, self.arg+1):
file.write(struct.pack("B", self.value[i]))
elif self.type == 0x1c:
self.value[0].copytofile(file)
elif self.type == 0x1d:
self.value[0].copytofile(file)
def makeoffset(self, off):
off += 1
if self.type <= 0x1b:
off += self.arg+1
elif self.type == 0x1c:
off = self.value[0].makeoffset(off)
elif self.type == 0x1d:
off = self.value[0].makeoffset(off)
return off
def printf(self):
print("encoded value :", self.type, self.arg)
class EncodedArray:
def __init__(self):
self.size = 0
self.len = 0
self.values = []
def dump(self, addr):
self.size, length = readunsignedleb128(addr)
self.len += length
for i in range(0, self.size):
value = EncodedValue()
value.dump(addr + self.len)
self.len += value.len
self.values.append(value)
def copytofile(self, file):
writeunsignedleb128(self.size, file)
for i in range(0, self.size):
self.values[i].copytofile(file)
def makeoffset(self, off):
off += unsignedleb128forlen(self.size)
for i in range(0, self.size):
off = self.values[i].makeoffset(off)
return off
def printf(self):
print("encoded array size", self.size)
# alignment: none
class EncodedArrayItem:
def __init__(self):
self.start = 0
self.len = 0
self.value = EncodedArray()
def dump(self, addr):
self.start = addr
self.len = 0
self.value.dump(addr)
self.len = self.value.len
def copytofile(self, file):
self.value.copytofile(file)
def makeoffset(self, off):
# if self.start == 1096008:
self.start = off
off = self.value.makeoffset(off)
self.len = off - self.start
return off
def printf(self):
print("None for EncodedArrayItem by now")
# alignment: 4 bytes
class AnnotationsDirItem:
def __init__(self):
self.start = 0
self.len = 0
self.class_annotations_off = 0
self.fields_size = 0
self.annotated_methods_size = 0
self.annotate_parameters_size = 0
self.field_annotations = [] # field_annotation[size]
self.method_annotations = []
self.parameter_annotations = []
self.class_annotations_ref = None
def dump(self, addr):
self.start = addr
self.class_annotations_off = getDword(addr) # in file
self.fields_size = getDword(addr + 4) # in file
self.annotated_methods_size = getDword(addr + 8) # in file
self.annotate_parameters_size = getDword(addr + 12) # in file
self.len = 16
for i in range(0, self.fields_size):
field = FieldAnnotation()
field.dump(addr + self.len + 8 * i)
self.field_annotations.append(field)
self.len += 8 * self.fields_size
for i in range(0, self.annotated_methods_size):
method = MethodAnnotation()
method.dump(addr + self.len + 8 * i)
self.method_annotations.append(method)
self.len += 8 * self.annotated_methods_size
for i in range(0, self.annotate_parameters_size):
param = ParamterAnnotation()
param.dump(addr + 8 * i)
self.parameter_annotations.append(param)
self.len += 8 * self.annotate_parameters_size
def copytofile(self, file):
if self.class_annotations_ref is not None:
file.write(struct.pack("I", self.class_annotations_ref.start))
else:
file.write(struct.pack("I", self.class_annotations_off))
file.write(struct.pack("I", self.fields_size))
file.write(struct.pack("I", self.annotated_methods_size))
file.write(struct.pack("I", self.annotate_parameters_size))
for i in range(0, self.fields_size):
self.field_annotations[i].copytofile(file)
for i in range(0, self.annotated_methods_size):
self.method_annotations[i].copytofile(file)
for i in range(0, self.annotate_parameters_size):
self.parameter_annotations[i].copytofile(file)
def makeoffset(self, off):
self.start = off
off += 4 * 4
for i in range(0, self.fields_size):
off = self.field_annotations[i].makeoffset(off)
for i in range(0, self.annotated_methods_size):
off = self.method_annotations[i].makeoffset(off)
for i in range(0, self.annotate_parameters_size):
off = self.parameter_annotations[i].makeoffset(off)
self.len = off - self.start
return off
def getreference(self, dexmaplist):
self.class_annotations_ref = dexmaplist[0x1003].getreference(self.class_annotations_off)
for i in range(0, self.fields_size):
self.field_annotations[i].getreference(dexmaplist)
for i in range(0, self.annotated_methods_size):
self.method_annotations[i].getreference(dexmaplist)
for i in range(0, self.annotate_parameters_size):
self.parameter_annotations[i].getreference(dexmaplist)
def printf(self):
print("None for AnnotationDirItem by now")
class FieldAnnotation:
def __init__(self):
self.field_idx = 0
self.annotations_off = 0
self.annotations_off_ref = None
def dump(self, addr):
self.field_idx = getDword(addr) # in file
self.annotations_off = getDword(addr + 4) # in file, offset of annotation_set_item
self.annotations_off_ref = None
def copytofile(self, file):
file.write(struct.pack("I", self.field_idx))
file.write(struct.pack("I", self.annotations_off_ref.start))
def makeoffset(self, off):
off += 4 * 2
return off
def getreference(self, dexmaplist):
self.annotations_off_ref = dexmaplist[0x1003].getreference(self.annotations_off)
class MethodAnnotation:
def __init__(self):
self.method_idx = 0
self.annotations_off = 0
self.annotations_off_ref = None
def dump(self, addr):
self.method_idx = getDword(addr) # in file
self.annotations_off = getDword(addr + 4) # in file
self.annotations_off_ref = None
def copytofile(self, file):
file.write(struct.pack("I", self.method_idx))
file.write(struct.pack("I", self.annotations_off_ref.start))
def makeoffset(self, off):
off += 4 * 2
return off
def getreference(self, dexmaplist):
self.annotations_off_ref = dexmaplist[0x1003].getreference(self.annotations_off)
class ParamterAnnotation:
def __init__(self):
self.method_idx = 0
self.annotations_off = 0
self.annotations_off_ref = None
def dump(self, addr):
self.method_idx = getDword(addr) # in file
self.annotations_off = getDword(addr + 4) # in file. offset of "annotation_set_ref_list"
self.annotations_off_ref = None
def copytofile(self, file):
file.write(struct.pack("I", self.method_idx))
if self.annotations_off_ref is not None:
file.write(struct.pack("I", self.annotations_off_ref.start))
else:
file.write(struct.pack("I", 0))
def makeoffset(self, off):
off += 4 * 2
return off
def getreference(self, dexmaplist):
self.annotations_off_ref = dexmaplist[0x1002].getreference(self.annotations_off)
addr = 1558176632
print(hex(addr))
cookie = DexorJar()
cookie.dump(addr)
cookie.printf()
if cookie.isDex == 0:
jarfile = JarFile()
jarfile.dump(cookie.pJarFile)
jarfile.printf()
dvmaddr = jarfile.pDvmDex
else:
print("not support yet")
dvmDex = DvmDex()
dvmDex.dump(dvmaddr)
dvmDex.printf()
dexfile = DexFile()
dexfile.dump(dvmDex.pDexFile)
print("begin copy to file:")
dexfile.copytofile("dump.dex")
| {
"repo_name": "keulraesik/dumpDex",
"path": "dump.py",
"copies": "5",
"size": "76543",
"license": "apache-2.0",
"hash": 968704851888276900,
"line_mean": 34.5683085502,
"line_max": 206,
"alpha_frac": 0.551859739,
"autogenerated": false,
"ratio": 3.5134031029101256,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0024213306412503685,
"num_lines": 2152
} |
__author__ = 'CwT'
from idaapi import *
from idautils import *
from idc import *
import struct
global baseAddr
def writefile(file, addr, len):
for i in range(len):
one = getByte(addr+i)
file.write(struct.pack("B", one))
def getDword(addr):
return Dword(addr)
def getByte(addr):
return Byte(addr)
def getWord(addr):
return Word(addr)
def dexGetStringData(dexfile, offset):
addr = dexfile.baseAddr + offset
while getByte(addr) > 0x7f: # skip uleb len
addr += 1
addr += 1
str = ""
one = getByte(addr)
while one != 0:
str += chr(one)
addr += 1
one = getByte(addr)
return str
def dexGetStringId(dexfile, idx):
return getDword(dexfile.pStringIds+4*idx)
def dexStringById(dexfile, idx):
offset = dexGetStringId(dexfile, idx)
return dexGetStringData(dexfile, offset)
def dexGetTypeId(dexfile, idx):
return getDword(dexfile.pTypeIds+4*idx)
def dexStringByTypeIdx(dexfile, idx):
return dexStringById(dexfile, dexGetTypeId(dexfile, idx))
def dexGetClassDescriptor(dexfile, classdef):
return dexStringByTypeIdx(dexfile, classdef.classIdx)
def slashtodot(str):
ret = ""
for i in str:
if i == '/':
ret += '.'
elif i == ';':
continue
else:
ret += i
return ret
def rightshift(value, n):
mask = 0x80000000
check = value & mask
if check != mask:
return value >> n
else:
submask = mask
for loop in range(0, n):
submask = (submask | (mask >> loop))
strdata = struct.pack("I", submask | (value >> n))
ret = struct.unpack("i", strdata)[0]
return ret
def readunsignedleb128(addr):
res = getByte(addr)
len = 1
if res > 0x7f:
cur = getByte(addr + 1)
res = (res & 0x7f) | ((cur & 0x7f) << 7)
len = 2
if cur > 0x7f:
cur = getByte(addr + 2)
res |= (cur & 0x7f) << 14
len = 3
if cur > 0x7f:
cur = getByte(addr + 3)
res |= (cur & 0x7f) << 21
len = 4
if cur > 0x7f:
cur = getByte(addr + 4)
res |= cur << 28
len = 5
return res, len
def readsignedleb128(addr):
res = getByte(addr)
len = 1
if res <= 0x7f:
res = rightshift((res << 25), 25)
else:
cur = getByte(addr + 1)
res = (res & 0x7f) | ((cur & 0x7f) << 7)
len = 2
if cur <= 0x7f:
res = rightshift((res << 18), 18)
else:
cur = getByte(addr + 2)
res |= (cur & 0x7f) << 14
len = 3
if cur <= 0x7f:
res = rightshift((res << 11), 11)
else:
cur = getByte(addr + 3)
res |= (cur & 0x7f) << 21
len = 4
if cur <= 0x7f:
res = rightshift((res << 4), 4)
else:
cur = getByte(addr + 4)
res |= cur << 28
len = 5
return res, len
def writesignedleb128(num, file):
if num >= 0:
writeunsignedleb128(num, file)
else:
mask = 0x80000000
for i in range(0, 32):
tmp = num & mask
mask >>= 1
if tmp == 0:
break
loop = 32 - i + 1
while loop > 7:
cur = num & 0x7f | 0x80
num >>= 7
file.write(struct.pack("B", cur))
loop -= 7
cur = num & 0x7f
file.write(struct.pack("B", cur))
def signedleb128forlen(num):
if num >= 0:
return unsignedleb128forlen(num)
else:
mask = 0x80000000
for i in range(0, 32):
tmp = num & mask
mask >>= 1
if tmp == 0:
break
loop = 32 - i + 1
if loop % 7 == 0:
return loop / 7
else:
return loop / 7 + 1
def writeunsignedleb128(num, file):
if num <= 0x7f:
file.write(struct.pack("B", num))
else:
cur = num & 0x7F | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if num <= 0x7f:
file.write(struct.pack("B", num))
else:
cur = num & 0x7f | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if num <= 0x7f:
file.write(struct.pack("B", num))
else:
cur = num & 0x7f | 0x80
file.write(struct.pack("B", cur))
num >>= 7
if num <= 0x7f:
file.write(struct.pack("B", num))
else:
cur = num & 0x7f | 0x80
file.write(struct.pack("B", cur))
num >>= 7
file.write(struct.pack("B", num))
def unsignedleb128forlen(num):
len = 1
temp = num
while num > 0x7f:
len += 1
num >>= 7
if len > 5:
print("error for unsignedleb128forlen", temp)
return len
def readunsignedleb128p1(addr):
res, len = readunsignedleb128(addr)
return res - 1, len
def writeunsignedleb128p1(num, file):
writeunsignedleb128(num+1, file)
def unsignedleb128p1forlen(num):
return unsignedleb128forlen(num+1)
class DvmDex:
def __init__(self):
self.pDexFile = 0
self.pHeader = 0 # it is a clone of dex file
# just for now
def dump(self, addr):
self.pDexFile = getDword(addr)
self.pHeader = getDword(addr + 4)
def printf(self):
# i wanna see the diff between the pDexFile.dexfile and pheader
print("dexfile addr is: ", hex(self.pDexFile))
print("header addr is: ", hex(self.pHeader))
class DexFile:
def __init__(self):
self.pOptHeader = 0
self.pHeader = 0
self.pStringIds = 0
self.pTypeIds = 0
self.pFieldIds = 0
self.pMethodIds = 0
self.pProtoIds = 0
self.pClassDefs = 0
self.pLinkData = 0
self.baseAddr = 0
self.OptHeader = OptHeader()
self.dexHeader = DexHeader()
def dump(self, addr):
global baseAddr
self.pOptHeader = getDword(addr)
self.pHeader = getDword(addr + 4)
self.pStringIds = getDword(addr + 8)
self.pTypeIds = getDword(addr + 12)
self.pFieldIds = getDword(addr + 16)
self.pMethodIds = getDword(addr + 20)
self.pProtoIds = getDword(addr + 24)
self.pClassDefs = getDword(addr + 28)
self.pLinkData = getDword(addr + 32)
self.baseAddr = getDword(addr + 44)
baseAddr = self.baseAddr
self.OptHeader.dump(self.pOptHeader)
self.dexHeader.dump(self.pHeader)
self.fixDexHeader()
def fixDexHeader(self):
self.dexHeader.stringIdsOff = self.pStringIds - self.pHeader
self.dexHeader.typeIdsOff = self.pTypeIds - self.pHeader
self.dexHeader.fieldIdsOff = self.pFieldIds - self.pHeader
self.dexHeader.methodIdsOff = self.pMethodIds - self.pHeader
self.dexHeader.protoIdsOff = self.pProtoIds - self.pHeader
self.dexHeader.classDefsOff = self.pClassDefs - self.pHeader
if self.dexHeader.dataOff == 0:
self.dexHeader.dataOff = self.dexHeader.classDefsOff + self.dexHeader.classDefsSize*32
# We should figure out a new method to fix the data size
# self.dexHeader.dataSize = 0x5DD28000 - self.baseAddr - self.dexHeader.dataOff
def lookupClass(self, type):
num_class_def = self.dexHeader.classDefsSize
print "num class def:", num_class_def
for i in range(num_class_def):
classdef = DexClassDef()
classdef.dump(self.pClassDefs+32*i)
descriptor = dexGetClassDescriptor(self, classdef)
if descriptor == type:
print "Find the class", descriptor
if classdef.classDataOff == 0:
print "classDataOff is 0"
return
classdata = ClassdataItem()
classdata.dump(int(self.baseAddr+classdef.classDataOff) & 0xffffffff)
print "direct methods:", classdata.direct_methods_size
for j in range(classdata.direct_methods_size):
method = classdata.direct_methods[j]
method.printf()
print "virtual methods:", classdata.virtual_methods_size
for j in range(classdata.virtual_methods_size):
method = classdata.virtual_methods[j]
method.printf()
def printf(self):
print("dex head addr: ", hex(self.pHeader))
print("dex head addr: ", hex(self.baseAddr))
class DexClassDef:
def __init__(self):
self.classIdx = 0
self.accessFlags = 0
self.superclassIdx = 0
self.interfacesOff = 0
self.sourceFileIdx = 0
self.annotationsOff = 0
self.classDataOff = 0
self.staticValuesOff = 0
def dump(self, addr):
self.classIdx = getDword(addr)
self.accessFlags = getDword(addr + 4)
self.superclassIdx = getDword(addr + 8)
self.interfacesOff = getDword(addr + 12)
self.sourceFileIdx = getDword(addr + 16)
self.annotationsOff = getDword(addr + 20)
self.classDataOff = getDword(addr + 24)
self.staticValuesOff = getDword(addr + 28)
def copytofile(self, file):
file.write(struct.pack("I", self.classIdx))
file.write(struct.pack("I", self.accessFlags))
file.write(struct.pack("I", self.superclassIdx))
file.write(struct.pack("I", self.interfacesOff))
file.write(struct.pack("I", self.sourceFileIdx))
file.write(struct.pack("I", self.annotationsOff))
file.write(struct.pack("I", self.classDataOff))
file.write(struct.pack("I", self.staticValuesOff))
class DexHeader:
def __init__(self):
self.magic = []
self.checksum = 0
self.signature = []
self.fileSize = 0
self.headerSize = 0
self.endianTag = 0
self.linkSize = 0
self.linkOff = 0
self.mapOff = 0
self.stringIdsSize = 0
self.stringIdsOff = 0
self.typeIdsSize = 0
self.typeIdsOff = 0
self.protoIdsSize = 0
self.protoIdsOff = 0
self.fieldIdsSize = 0
self.fieldIdsOff = 0
self.methodIdsSize = 0
self.methodIdsOff = 0
self.classDefsSize = 0
self.classDefsOff = 0
self.dataSize = 0 # have it
self.dataOff = 0 # have it
def dump(self, addr):
len = 0
while len < 8:
self.magic.append(getByte(addr + len))
len += 1
self.checksum = getDword(addr + 8)
len = 0
while len < 20:
self.signature.append(getByte(addr + 12 + len))
len += 1
self.fileSize = getDword(addr + 32)
self.headerSize = getDword(addr + 36)
self.endianTag = getDword(addr + 40)
self.linkSize = getDword(addr + 44)
self.linkOff = getDword(addr + 48)
self.mapOff = getDword(addr + 52)
self.stringIdsSize = getDword(addr + 56)
self.stringIdsOff = getDword(addr + 60)
self.typeIdsSize = getDword(addr + 64)
self.typeIdsOff = getDword(addr + 68)
self.protoIdsSize = getDword(addr + 72)
self.protoIdsOff = getDword(addr + 76)
self.fieldIdsSize = getDword(addr + 80)
self.fieldIdsOff = getDword(addr + 84)
self.methodIdsSize = getDword(addr + 88)
self.methodIdsOff = getDword(addr + 92)
self.classDefsSize = getDword(addr + 96)
self.classDefsOff = getDword(addr + 100)
self.dataSize = getDword(addr + 104)
self.dataOff = getDword(addr + 108)
def printf(self):
print "string off", self.stringIdsOff
print "type off", self.typeIdsOff
print "proto off", self.protoIdsOff
print "field off", self.fieldIdsOff
print "method off", self.methodIdsOff
print "classdef off", self.classDefsOff
print "classdef size:", self.classDefsSize
def copytofile(self, file):
len = 0
while len < 8:
file.write(struct.pack("B", self.magic[len]))
len += 1
file.write(struct.pack("I", self.checksum))
len = 0
while len < 20:
file.write(struct.pack("B", self.signature[len]))
len += 1
file.write(struct.pack("I", self.fileSize))
file.write(struct.pack("I", self.headerSize))
file.write(struct.pack("I", self.endianTag))
file.write(struct.pack("I", self.linkSize))
file.write(struct.pack("I", self.linkOff))
file.write(struct.pack("I", self.mapOff))
file.write(struct.pack("I", self.stringIdsSize))
file.write(struct.pack("I", self.stringIdsOff))
file.write(struct.pack("I", self.typeIdsSize))
file.write(struct.pack("I", self.typeIdsOff))
file.write(struct.pack("I", self.protoIdsSize))
file.write(struct.pack("I", self.protoIdsOff))
file.write(struct.pack("I", self.fieldIdsSize))
file.write(struct.pack("I", self.fieldIdsOff))
file.write(struct.pack("I", self.methodIdsSize))
file.write(struct.pack("I", self.methodIdsOff))
file.write(struct.pack("I", self.classDefsSize))
file.write(struct.pack("I", self.classDefsOff))
file.write(struct.pack("I", self.dataSize))
file.write(struct.pack("I", self.dataOff))
class OptHeader:
def __init__(self):
self.magic = [] # take 8 bytes
self.dexoffset = 0
self.dexLength = 0
self.depsOffset = 0
self.depsLength = 0
self.optOffset = 0
self.optLength = 0
self.flag = 0
self.checksum = 0
def dump(self, addr):
if addr == 0:
return
len = 0
while len < 8:
self.magic.append(getByte(addr + len))
len += 1
self.dexoffset = getDword(addr+8)
self.dexLength = getDword(addr+12)
self.depsOffset = getDword(addr+16)
self.depsLength = getDword(addr+20)
self.optOffset = getDword(addr+24)
self.optLength = getDword(addr+28)
self.flag = getDword(addr+32)
self.checksum = getDword(addr+36)
def copytofile(self, file):
len = 0
while len < 8:
file.write(struct.pack("B", self.magic[len]))
len += 1
file.write(struct.pack("I", self.dexoffset))
file.write(struct.pack("I", self.dexLength))
file.write(struct.pack("I", self.depsOffset))
file.write(struct.pack("I", self.depsLength))
file.write(struct.pack("I", self.optOffset))
file.write(struct.pack("I", self.optLength))
file.write(struct.pack("I", self.flag))
file.write(struct.pack("I", self.checksum))
class ClassdataItem:
def __init__(self):
self.len = 0
self.static_field_size = 0
self.instance_fields_size = 0
self.direct_methods_size = 0
self.virtual_methods_size = 0
self.static_fields = []
self.instance_fields = []
self.direct_methods = []
self.virtual_methods = []
def dump(self, addr):
self.static_field_size, length = readunsignedleb128(addr)
self.len += length
self.instance_fields_size, length = readunsignedleb128(addr + self.len)
self.len += length
self.direct_methods_size, length = readunsignedleb128(addr + self.len)
self.len += length
self.virtual_methods_size, length = readunsignedleb128(addr + self.len)
self.len += length
for i in range(0, self.static_field_size):
field = Encodedfield()
field.dump(addr + self.len)
self.len += field.len
self.static_fields.append(field)
for i in range(0, self.instance_fields_size):
field = Encodedfield()
field.dump(addr + self.len)
self.len += field.len
self.instance_fields.append(field)
for i in range(0, self.direct_methods_size):
method = Encodedmethod()
method.dump(addr + self.len)
self.len += method.len
self.direct_methods.append(method)
for i in range(0, self.virtual_methods_size):
method = Encodedmethod()
method.dump(addr + self.len)
self.len += method.len
self.virtual_methods.append(method)
def recallLength(self):
self.len = 0
self.len += unsignedleb128forlen(self.static_field_size)
self.len += unsignedleb128forlen(self.instance_fields_size)
self.len += unsignedleb128forlen(self.direct_methods_size)
self.len += unsignedleb128forlen(self.virtual_methods_size)
for i in range(0, self.static_field_size):
self.len += self.static_fields[i].len
for i in range(0, self.instance_fields_size):
self.len += self.instance_fields[i].len
for i in range(0, self.direct_methods_size):
self.len += self.direct_methods[i].recallLength()
for i in range(0, self.virtual_methods_size):
self.len += self.virtual_methods[i].recallLength()
return self.len
def copytofile(self, file):
writeunsignedleb128(self.static_field_size, file)
writeunsignedleb128(self.instance_fields_size, file)
writeunsignedleb128(self.direct_methods_size, file)
writeunsignedleb128(self.virtual_methods_size, file)
for i in range(0, self.static_field_size):
self.static_fields[i].copytofile(file)
for i in range(0, self.instance_fields_size):
self.instance_fields[i].copytofile(file)
for i in range(0, self.direct_methods_size):
self.direct_methods[i].copytofile(file)
for i in range(0, self.virtual_methods_size):
self.virtual_methods[i].copytofile(file)
class Encodedfield:
def __init__(self):
self.len = 0
self.field_idx_diff = 0
self.access_flags = 0
self.field_idx = 0 # need to set later
def dump(self, addr):
self.field_idx_diff, length = readunsignedleb128(addr)
self.len += length
self.access_flags, length = readunsignedleb128(addr + self.len)
self.len += length
def copytofile(self, file):
writeunsignedleb128(self.field_idx_diff, file)
writeunsignedleb128(self.access_flags, file)
class Encodedmethod:
def __init__(self):
self.len = 0
self.method_idx_diff = 0
self.access_flags = 0
self.code_off = 0
self.method_idx = 0
def dump(self, addr):
self.method_idx_diff, length = readunsignedleb128(addr)
self.len += length
self.access_flags, length = readunsignedleb128(addr + self.len)
self.len += length
self.code_off, length = readunsignedleb128(addr + self.len)
self.len += length
def recallLength(self):
self.len = 0
self.len += unsignedleb128forlen(self.method_idx_diff)
self.len += unsignedleb128forlen(self.access_flags)
self.len += unsignedleb128forlen(self.code_off)
return self.len
def copytofile(self, file):
writeunsignedleb128(self.method_idx_diff, file)
writeunsignedleb128(self.access_flags, file)
writeunsignedleb128(self.code_off, file)
def printf(self):
print "code offset:", self.code_off
print "access flag:", self.access_flags
# alignment: 4bytes
class CodeItem:
def __init__(self):
self.len = 0
self.register_size = 0
self.ins_size = 0
self.outs_size = 0
self.tries_size = 0
self.debug_info_off = 0
self.insns_size = 0
self.insns = []
self.debugRef = None
self.padding = 0
self.tries = []
self.handler = None
def dump(self, addr):
self.register_size = getWord(addr) # 2
self.ins_size = getWord(addr + 2) # 0
self.outs_size = getWord(addr + 4) # 0x4187
self.tries_size = getWord(addr + 6) # 0x13
self.debug_info_off = getDword(addr + 8) # 0xD
self.insns_size = getDword(addr + 12) # 0x22
self.len += 16
for i in range(0, self.insns_size):
self.insns.append(getWord(addr + self.len + 2 * i))
self.len += 2 * self.insns_size
if self.tries_size != 0 and self.insns_size % 2 == 1:
self.len += 2
for i in range(0, self.tries_size):
tryitem = TryItem()
tryitem.dump(addr + self.len + 8 * i)
self.tries.append(tryitem)
self.len += 8 * self.tries_size
if self.tries_size != 0:
self.handler = EncodedhandlerList()
self.handler.dump(addr + self.len)
self.len += self.handler.len
# align = self.len % 4
# if align != 0:
# self.len += (4 - align)
class TryItem:
def __init__(self):
self.start = 0
self.len = 8
self.start_addr = 0
self.insn_count = 0
self.handler_off = 0
def dump(self, addr):
self.start = addr
self.start_addr = getDword(addr)
self.insn_count = getWord(addr + 4)
self.handler_off = getWord(addr + 6)
class EncodedhandlerList:
def __init__(self):
self.start = 0
self.len = 0
self.size = 0
self.list = []
def dump(self, addr):
self.start = addr
self.size, length = readunsignedleb128(addr)
self.len += length
for i in range(0, self.size):
handler = EncodedhandlerItem()
handler.dump(addr + self.len)
self.len += handler.len
self.list.append(handler)
class EncodedhandlerItem:
def __init__(self):
self.start = 0
self.len = 0
self.size = 0
self.handlers = []
self.catch_all_addr = 0
def dump(self, addr):
self.start = addr
self.size, length = readsignedleb128(addr)
self.len += length
for i in range(0, abs(self.size)):
pair = EncodedTypeAddrPair()
pair.dump(addr + self.len)
self.len += pair.len
self.handlers.append(pair)
if self.size <= 0:
self.catch_all_addr, length = readunsignedleb128(addr + self.len)
self.len += length
class EncodedTypeAddrPair:
def __init__(self):
self.type_idx = 0
self.addr = 0
self.len = 0
def dump(self, addr):
self.type_idx, length = readunsignedleb128(addr)
self.len += length
self.addr, length = readunsignedleb128(addr + length)
self.len += length
address = int(0x5d4e8020) # DexFile address
dexfile = DexFile()
dexfile.dump(address)
dexfile.dexHeader.printf()
dexfile.lookupClass("Lcom/baidu/lbsapi/auth/LBSAuthManagerListener;")
# dexfile.copytofile()
| {
"repo_name": "CvvT/dumpDex",
"path": "lookup.py",
"copies": "1",
"size": "23228",
"license": "apache-2.0",
"hash": 9203349476013620000,
"line_mean": 32.3256814921,
"line_max": 98,
"alpha_frac": 0.562166351,
"autogenerated": false,
"ratio": 3.4824587706146928,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45446251216146927,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CwT'
import Queue
class FlowAnalysis(object):
def __init__(self):
self.GenSet = {}
self.KillSet = {}
self._graph = None
def doAnalysis(self, directGraph):
self._graph = directGraph
numofcomputation = 0
queue = Queue.Queue()
# queue.put(directGraph.Entry)
for key, value in directGraph.blocks.items():
queue.put(value)
while not queue.empty():
numofcomputation += 1
block = queue.get()
# meet
# print "Processing... ", block.Id
self.meetFlow(block)
# transfer function
changed = self.flowthrough(block)
if changed:
for item in block.successors:
queue.put(item)
def meetFlow(self, block):
IN = self.getInFlow(block)
copied = False
for item in IN:
if not copied:
self.copy(item, block)
copied = True
else:
self.mergeInto(item, block)
def flowthrough(self, block):
# TO-DO Transfer
return self.flowTransfer(block)
def getInFlow(self, block):
return block.predecessors if self.isForward else block.successors
def getOutFlow(self, block):
return block.successors if self.isForward else block.predecessors
def isForward(self):
return
def mergeInto(self, sourceBlock, destBlock):
return
def copy(self, sourceBlock, destBlock):
return | {
"repo_name": "Panalyzer/Panalyzer",
"path": "src/FlowAnalysis.py",
"copies": "1",
"size": "1554",
"license": "mit",
"hash": -274345365934147400,
"line_mean": 25.8103448276,
"line_max": 73,
"alpha_frac": 0.5566280566,
"autogenerated": false,
"ratio": 4.304709141274238,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5361337197874239,
"avg_score": null,
"num_lines": null
} |
__author__ = 'CwT'
OPCODE = {0: "nop", 1: "move", 2: "move/from16", 5: "move-wide/from16", 7: "move-object", 8: "move-object/from16",
0xa: "move-result", 0xb: "move-result-wide", 0xc: "move-result-object", 0xd: "move-exception",
0xe: "return-void", 0xf: "return", 0x10: "return-wide", 0x11: "return-object",
0x12: "const/4", 0x13: "const/16", 0x1a: "const-string", 0x1c: "const-class",
0x1f: "check-cast", 0x22: "new-instance", 0x23: "new-array", 0x28: "goto",
0x32: "if-eq", 0x33: "if-ne", 0x34: "if-lt", 0x35: "if-ge", 0x36: "if-gt", 0x37: "if-le",
0x38: "if-eqz", 0x39: "if-nez", 0x3a: "if-ltz", 0x3b: "if-gez", 0x3c: "if-gtz", 0x3d: "if-lez",
0x44: "aget", 0x45: "aget-wide", 0x46: "aget-object", 0x47: "aget-boolean",
0x48: "aget-byte", 0x49: "aget-char", 0x4a: "aget-short",
0x4b: "aput", 0x4c: "aput-wide", 0x4d: "aput-object", 0x4e: "aput-boolean",
0x4f: "aput-byte", 0x50: "aput-char", 0x51: "aput-short",
0x52: "iget", 0x53: "iget-wide", 0x54: "iget-object", 0x55: "iget-boolean",
0x56: "iget-byte", 0x57: "iget-char", 0x58: "iget-short",
0x59: "iput", 0x5a: "iput-wide", 0x5b: "iput-object", 0x5c: "iput-boolean",
0x5d: "iput-byte", 0x5e: "iput-char", 0x5f: "iput-short",
0x60: "sget", 0x61: "sget-wide", 0x62: "sget-object", 0x63: "sget-boolean",
0x64: "sget-byte", 0x65: "sget-char", 0x66: "sget-short", 0x67: "sput", 0x68: "sput-wide",
0x69: "sput-object", 0x6a: "sput-boolean", 0x6b: "sput-byte", 0x6c: "sput-char", 0x6d: "sput-short",
0x6e: "invoke-virtual", 0x6f: "invoke-super", 0x70: "invoke-direct", 0x71: "invoke-static", 0x72: "invoke-interface",
0x74: "invoke-virtual/range", 0x75: "invoke-super/range", 0x76: "invoke-direct/range",
0x77: "invoke-static/range", 0x78: "invoke-interface/range",
0x7b: "neg-int", 0x7c: "not-int", 0x7d: "neg-long", 0x7e: "not-long", 0x7f: "neg-float", 0x80: "neg-double",
0x81: "int-to-long", 0x82: "int-to-float", 0x83: "int-to-double", 0x84: "long-to-int", 0x85: "long-to-float",
0x86: "long-to-double", 0x87: "float-to-int", 0x88: "float-to-long", 0x89: "float-to-double", 0x8a: "double-to-int",
0x8b: "double-to-long", 0x8c: "double-to-float", 0x8d: "int-to-byte", 0x8e: "int-to-char", 0x8f: "int-to-short",
0x90: "add-int", 0x91: "sub-int", 0x92: "mul-int", 0x93: "div-int", 0x94: "rem-int", 0x95: "and-int",
0x96: "or-int", 0x97: "xor-int", 0x98: "shl-int", 0x99: "shr-int", 0x9a: "ushr-int", 0x9b: "add-long",
0x9c: "sub-long", 0x9d: "mul-long", 0x9e: "div-long", 0x9f: "rem-long", 0xa0: "and-long", 0xa1: "or-long",
0xa2: "xor-long", 0xa3: "shl-long", 0xa4: "shr-long", 0xa5: "ushr-long", 0xa6: "add-float", 0xa7: "sub-float",
0xa8: "mul-float", 0xa9: "div-float", 0xaa: "rem-float", 0xab: "add-double", 0xac: "sub-double", 0xad: "mul-double",
0xae: "div-double", 0xaf: "rem-double",
0xb0: "add-int/2addr", 0xb1: "sub-int/2addr", 0xb2: "mul-int/2addr", 0xb3: "div-int/2addr", 0xb4: "rem-int/2addr",
0xb5: "and-int/2addr", 0xb6: "or-int/2addr", 0xb7: "xor-int/2addr", 0xb8: "shl-int/2addr", 0xb9: "shr-int/2addr",
0xba: "ushr-int/2addr", 0xbb: "add-long/2addr", 0xbc: "sub-long/2addr", 0xbd: "mul-long/2addr",
0xbe: "div-long/2addr", 0xbf: "rem-long/2addr", 0xc0: "and-long/2addr", 0xc1: "or-long/2addr", 0xc2: "xor-long/2addr",
0xc3: "shl-long/2addr", 0xc4: "shr-long/2addr", 0xc5: "ushr-long/2addr", 0xc6: "add-float/2addr", 0xc7: "sub-float/2addr",
0xc8: "mul-float/2addr", 0xc9: "div-float/2addr", 0xca: "rem-float/2addr", 0xcb: "add-double/2addr",
0xcc: "sub-double/2addr", 0xcd: "mul-double/2addr", 0xce: "div-double/2addr", 0xcf: "rem-double/2addr",
0xd0: "add-int/lit16", 0xd1: "rsub-int", 0xd2: "mul-int/lit16", 0xd3: "div-int/lit16", 0xd4: "rem-int/lit16",
0xd5: "and-int/lit16", 0xd6: "or-int/lit16", 0xd7: "xor-int/lit16",
0xd8: "add-int/lit8", 0xd9: "rsub-int/lit8", 0xda: "mul-int/lit8", 0xdb: "div-int/lit8", 0xdc: "rem-int/lit8",
0xdd: "and-int/lit8", 0xde: "or-int/lit8", 0xdf: "xor-int/lit8", 0xe0: "shl-int/lit8", 0xe1: "shl-int/lit8",
0xe2: "ushr-int/lit8",
}
class Instruction:
def __init__(self):
self.insns = []
self.str_ins = ""
self.dst = -1
self.src = -1
self.target = -1
def init(self, param_insns, index):
start = index
one = param_insns[index]
index += 1
opcode = one & 0xff
one >>= 8
if opcode in (0, 0xe):
self.str_ins = OPCODE[opcode]
elif opcode == 1:
index = self.parse(param_insns, index, one, opcode, 0)
elif opcode == 2:
index = self.parse(param_insns, index, one, opcode, 1)
elif opcode == 5:
index = self.parse(param_insns, index, one, opcode, 1)
elif opcode == 7:
index = self.parse(param_insns, index, one, opcode, 0)
elif opcode == 8:
index = self.parse(param_insns, index, one, opcode, 1)
elif opcode in range(0xa, 0xe):
index = self.parse(param_insns, index, one, opcode, 2)
elif opcode in range(0xf, 0x12):
index = self.parse(param_insns, index, one, opcode, 8)
elif opcode == 0x12:
index = self.parse(param_insns, index, one, opcode, 0)
elif opcode == 0x13:
index = self.parse(param_insns, index, one, opcode, 1)
elif opcode == 0x1a:
index = self.parse(param_insns, index, one, opcode, 1)
elif opcode == 0x1c:
index = self.parse(param_insns, index, one, opcode, 1)
elif opcode == 0x1f:
index = self.parse(param_insns, index, one, opcode, 1)
elif opcode == 0x22:
index = self.parse(param_insns, index, one, opcode, 1)
elif opcode == 0x23:
index = self.parse(param_insns, index, one, opcode, 4)
elif opcode == 0x28:
index = self.parse(param_insns, index, one, opcode, 2)
elif opcode in range(0x32, 0x38):
index = self.parse(param_insns, index, one, opcode, 4)
elif opcode in range(0x38, 0x3e):
index = self.parse(param_insns, index, one, opcode, 1)
elif opcode in range(0x44, 0x52):
index = self.parse(param_insns, index, one, opcode, 3)
elif opcode in range(0x52, 0x60):
index = self.parse(param_insns, index, one, opcode, 4)
elif opcode in range(0x60, 0x6e):
index = self.parse(param_insns, index, one, opcode, 1)
elif opcode in range(0x6e, 0x73):
index = self.parse(param_insns, index, one, opcode, 5)
elif opcode in range(0x7b, 0x90):
index = self.parse(param_insns, index, one, opcode, 0)
elif opcode in range(0x90, 0xb0):
index = self.parse(param_insns, index, one, opcode, 7)
elif opcode in range(0xb0, 0xd0):
index = self.parse(param_insns, index, one, opcode, 0)
elif opcode in range(0xd0, 0xd8):
index = self.parse(param_insns, index, one, opcode, 4)
elif opcode in range(0xd8, 0xe3):
index = self.parse(param_insns, index, one, opcode, 7)
else:
print("ERROR: didn't classified the opcode", opcode)
for i in range(start, index):
self.insns.append(param_insns[i])
return index
def parse(self, param_insns, index, one, opcode, kind):
if kind == 0: # opcode vx, vy
self.src = (one >> 4) & 0xf
self.dst = one & 0xf
self.str_ins = OPCODE[opcode] + " " + str(self.dst) + ", " + str(self.src)
elif kind == 1: # opcode vx/vxx, vyy(id)
self.dst = one
self.src = int(param_insns[index])
index += 1
self.str_ins = OPCODE[opcode] + " " + str(self.dst) + ", " + str(self.src)
elif kind == 2: # opcode vx/vxx
self.dst = one
self.str_ins = OPCODE[opcode] + " " + str(self.dst)
elif kind == 3: # opcode vx, vy, vz
self.dst = one
one = int(param_insns[index])
self.src = one & 0xff
self.target = (one >> 8) & 0xff
self.str_ins = OPCODE[opcode] + " " + str(self.dst) + ", " + str(self.src) + ", " + str(self.target)
index += 1
elif kind == 4: # opcode vx, vy, id(lit)
self.dst = one & 0xf
self.src = (one >> 4) & 0xf
self.target = int(param_insns[index])
index += 1
self.str_ins = OPCODE[opcode] + " " + str(self.dst) + ", " + str(self.src) + ", " + str(self.target)
elif kind == 5: # invoke-kind {vC, vD, vE, vF, vG}, meth@BBBB
self.src = (one >> 4) & 0xf # use for count parameter
self.target = int(param_insns[index])
param = int(param_insns[index+1])
index += 2
self.str_ins = OPCODE[opcode] + " {"
if self.src > 0:
self.str_ins += str(param & 0xf)
if self.src > 1:
self.str_ins += ", " + str((param >> 4) & 0xf)
if self.src > 2:
self.str_ins += ", " + str((param >> 8) & 0xf)
if self.src > 3:
self.str_ins += ", " + str((param >> 12) & 0xf)
if self.src > 4:
self.str_ins += ", " + str(one & 0xf)
self.str_ins += "}, " + str(self.target)
elif kind == 6: # invoke-kind/range {vCCCC .. vNNNN}, meth@BBBB
self.src = one # use for count parameter
self.target = int(param_insns[index])
param = int(param_insns[index+1])
index += 2
self.str_ins = OPCODE[opcode] + " {"
if self.src > 0:
self.str_ins += str(param) + " ... " + str(param + self.src - 1)
self.str_ins += "}, " + str(self.target)
elif kind == 7: # opcode vXX, vYY, vZZ
self.dst = one
self.src = param_insns[index] & 0xff
self.target = (param_insns[index] >> 8) & 0xff
self.str_ins = OPCODE[opcode] + " " + str(self.dst) + ", " + str(self.src) + ", " + str(self.target)
elif kind == 8: # opcode vxx
self.dst = one
self.str_ins = OPCODE[opcode] + " " + str(self.dst)
else:
print("ERROR: didn't classified this kind")
return index
# notice that: each instruction store as short(little)
class InstructionSet:
def __init__(self, insns):
length = len(insns)
index = 0
self.set = []
while index < length:
tmp = Instruction()
index = tmp.init(insns, index)
self.set.append(tmp)
print(tmp.str_ins, hex(tmp.insns[0] & 0xff), len(tmp.insns))
def printf(self):
length = len(self.set)
for i in range(0, length):
print(self.set[i].str_ins)
| {
"repo_name": "CvvT/DexParse",
"path": "Instruction.py",
"copies": "2",
"size": "11175",
"license": "apache-2.0",
"hash": -876353120084651600,
"line_mean": 54.5970149254,
"line_max": 132,
"alpha_frac": 0.5356599553,
"autogenerated": false,
"ratio": 2.753141167775314,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9276713716786559,
"avg_score": 0.002417481257750962,
"num_lines": 201
} |
__author__ = 'cyankw'
import re
import urllib.request
from multiprocessing import Pool
import time
strtime = time.clock() #计时器启动
def getHtml (url): #用于读取网页源代码
page = urllib.request.urlopen(url)
html = page.read()
return html
global year
global keywd
def getCVE(html): #对读取的网页源代码使用正则表达式进行筛选
reg = r'/\bCVE-%s-\b\d{1,4}'%(year)
CVEs = re.compile(reg)
CVElist = re.findall(CVEs,html)
Xmax = len(CVElist)
x=0
CVElist2=[]
while x < Xmax: #去除每个被正则表达式匹配出的结果的第一个字符“/”
CVElist2.append(CVElist[x][1:])
x=x+1
return CVElist2
def getKWD(html): #关键词词频统计,统计网页中keywd出现次数,(每出现一次,就记录一次)
reg = r'/\b<em>\b%s\b</em>\b'%(keywd)
KWDs = re.compile(reg)
KWDlist = re.findall(KWDs,html)
#print(KWDlist)
return KWDlist
if __name__=='__main__':
i=1
getCVE2=[]
print('Vulnerabilities Detector Started')
year = input('Year ')
times = int(input('Result Number '))
poolnum = int(input('Pool Numbers '))
print('clawler started...')
p = Pool(poolnum) #使用进程池调用的进程数量
rangmax = times
m=0
gd=0
numb = 0
while numb<times:
html = getHtml("http://cve.scap.org.cn/cve_list.php?action=cvss&floor=9.5&ceil=10&p=%u"%(i)).decode('utf-8') #从指定网页读取源代码
i = i + 1
getCVE2 = list(set(getCVE(html))) #使用getCVE()函数进行数据筛选,去除重复项目,并记录结果到列表getCVE2中
#print(getCVE2)
f=0
ln=len(getCVE2)
while f<ln :
if numb == times:
break
getKY = []
keywd = getCVE2[f] #从二级列表getCVE2中获取到的次级元素中迭代当前元素(列表)中的子元素
html = getHtml("http://www.baidu.com/baidu?wd=%s" % (keywd)).decode('utf-8') #将指定关键词进行百度搜索
getKY.append((getKWD(html))) #使用getKWD()进行词频统计
url = "http://www.baidu.com/baidu?wd=%s" %(keywd)#用于打印固定文本
#print(getKY)
if len(getKY) >= 5: #对词频结果给出其价值
GG='EXCELLENT!!!! %s'%(url)
gd = gd + 1
elif len(getKY) >= 3:
GG='GOOD!!! %s'%(url)
elif len(getKY) > 0:
GG='simple'
elif len(getKY) == 0:
GG='-----'
print(numb,keywd,len(getKY),GG),
f=f+1
numb=numb+1
m = m + 1
p.close()
p.join()
print('------------------------------------')
print('Vulnerabilities Detect Completed')
fintime = time.clock() #计时器结束
print('Program running time %fs'%(fintime-strtime))
print('We detected %u targets through %u websites, %u of them is valuable'%(numb,m,gd)) | {
"repo_name": "cyankw/Python-Crawler-Practice-01-Vulnerabilities-Detector",
"path": "Vulnerabilities Detector.py",
"copies": "1",
"size": "3089",
"license": "apache-2.0",
"hash": 4426454802812552000,
"line_mean": 27.8988764045,
"line_max": 128,
"alpha_frac": 0.5302745393,
"autogenerated": false,
"ratio": 2.3468667255075024,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3377141264807503,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cyhis'
from suds.client import Client
from suds import WebFault
from model.project import Project
class SoapHelper:
def __init__(self, app):
self.app = app
self.username = self.app.config['webadmin']['username']
self.password = self.app.config['webadmin']['password']
def can_login(self, username, password):
client = Client("http://localhost/mantisbt-1.2.19/api/soap/mantisconnect.php?wsdl")
try:
client.service.mc_login(username, password)
return True
except WebFault:
return False
def create(self, project):
client = Client("http://localhost/mantisbt-1.2.19/api/soap/mantisconnect.php?wsdl")
try:
pData = client.factory.create('ProjectData')
pData.name = project.name
client.service.mc_project_add(self.username, self.password, pData)
return True
except WebFault:
return False
def get_projects(self):
client = Client("http://localhost/mantisbt-1.2.19/api/soap/mantisconnect.php?wsdl")
projects = []
try:
projectsData = client.service.mc_projects_get_user_accessible(self.username, self.password)
for projectData in projectsData:
project = Project(name=projectData['name'])
projects.append(project)
return projects
except WebFault:
return False
def delete(self, project):
client = Client("http://localhost/mantisbt-1.2.19/api/soap/mantisconnect.php?wsdl")
try:
projectsData = client.service.mc_projects_get_user_accessible(self.username, self.password)
for projectData in projectsData:
if projectData['name'] == project.name:
client.service.mc_project_delete(self.username, self.password, projectData['id'])
return True
except WebFault:
return False | {
"repo_name": "galaktika81/python_training_mantis",
"path": "fixture/soap.py",
"copies": "1",
"size": "1991",
"license": "apache-2.0",
"hash": -2530711195864215000,
"line_mean": 34.5714285714,
"line_max": 103,
"alpha_frac": 0.611752888,
"autogenerated": false,
"ratio": 4.08829568788501,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.520004857588501,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cyrbuzz'
from base import (ScrollArea, QLabel, QFrame, QVBoxLayout, QPushButton, QHBoxLayout, QTableWidget,
QAbstractItemView)
class DownloadFrame(ScrollArea):
def __init__(self, parent=None):
super().__init__()
self.parent = parent
self.setObjectName('downloadMusic')
with open('QSS/downloadFrame.qss', 'r', encoding="utf-8") as f:
self.setStyleSheet(f.read())
self.mainLayout = QVBoxLayout(self)
self.setHeader()
self.setMusicTable()
def setHeader(self):
# self.titleLabel = QLabel("我的下载")
self.spaceLine = QFrame(self)
self.spaceLine.setObjectName("spaceLine")
self.spaceLine.setFrameShape(QFrame.HLine)
self.spaceLine.setFrameShadow(QFrame.Plain)
self.spaceLine.setLineWidth(2)
self.currentStorageFolderLabel = QLabel("当前存储目录: ")
self.currentStorageFolder = QLabel()
self.selectButton = QPushButton("选择目录")
self.selectButton.setObjectName('selectButton')
self.topShowLayout = QHBoxLayout()
self.topShowLayout.addSpacing(20)
# self.topShowLayout.addWidget(self.titleLabel)
self.topShowLayout.addWidget(self.currentStorageFolderLabel)
self.topShowLayout.addWidget(self.currentStorageFolder)
self.topShowLayout.addWidget(self.selectButton)
self.topShowLayout.addStretch(1)
self.mainLayout.addLayout(self.topShowLayout)
self.mainLayout.addWidget(self.spaceLine)
def setMusicTable(self):
self.singsTable = QTableWidget()
self.singsTable.setObjectName('singsTable')
self.singsTable.setMinimumWidth(self.width())
self.singsTable.setColumnCount(3)
self.singsTable.setHorizontalHeaderLabels(['音乐标题', '歌手', '时长'])
self.singsTable.setColumnWidth(0, self.width()/3*1.25)
self.singsTable.setColumnWidth(1, self.width()/3*1.25)
self.singsTable.setColumnWidth(2, self.width()/3*0.5)
self.singsTable.horizontalHeader().setStretchLastSection(True)
self.singsTable.verticalHeader().setVisible(False)
self.singsTable.setShowGrid(False)
self.singsTable.setAlternatingRowColors(True)
self.singsTable.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.singsTable.setSelectionBehavior(QAbstractItemView.SelectRows)
self.mainLayout.addWidget(self.singsTable) | {
"repo_name": "HuberTRoy/MusicPlayer",
"path": "MusicPlayer/widgets/downloadFrame.py",
"copies": "1",
"size": "2503",
"license": "mit",
"hash": 7190364056942079000,
"line_mean": 36.2727272727,
"line_max": 98,
"alpha_frac": 0.6823912159,
"autogenerated": false,
"ratio": 3.548340548340548,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9711616367852425,
"avg_score": 0.0038230792776247323,
"num_lines": 66
} |
__author__ = 'cyrbuzz'
import addition
from base import QObject, QTableWidgetItem
transTime = addition.itv2time
class ConfigRecommendFrame(QObject):
def __init__(self, parent):
super().__init__()
self.recommendFrame = parent
self.musicList = []
self.bindConnect()
def setSongs(self, musicInfo):
self.recommendFrame.singsTable.setRowCount(len(musicInfo))
for index, data in enumerate(musicInfo):
self.musicList.append(data._asdict())
self.recommendFrame.singsTable.setItem(index, 0, QTableWidgetItem(data.name))
self.recommendFrame.singsTable.setItem(index, 1, QTableWidgetItem(data.author))
self.recommendFrame.singsTable.setItem(index, 2, QTableWidgetItem(transTime(data.time/1000)))
def bindConnect(self):
self.recommendFrame.singsTable.itemDoubleClicked.connect(self.itemDoubleClickedEvent)
def itemDoubleClickedEvent(self):
currentRow = self.recommendFrame.singsTable.currentRow()
data = self.musicList[currentRow]
self.recommendFrame.parent.playWidgets.setPlayerAndPlayList(data)
| {
"repo_name": "HuberTRoy/MusicPlayer",
"path": "MusicPlayer/features/configRecommendFrameFeatures.py",
"copies": "1",
"size": "1143",
"license": "mit",
"hash": 2782863357357006300,
"line_mean": 29.8918918919,
"line_max": 105,
"alpha_frac": 0.7034120735,
"autogenerated": false,
"ratio": 3.7475409836065574,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49509530571065574,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cyrbuzz'
import os
import glob
import pickle
import os.path
try:
import eyed3
except ImportError:
print('eyed3没有成功加载或安装,请不要使用本地音乐功能!')
from base import QFileDialog, QObject, QTableWidgetItem, checkFolder
from addition import itv2time
def getAllFolder(topFolder):
result = []
def findFolder(topFolder):
folders = [os.path.join(topFolder, i) for i in os.listdir(topFolder) if not os.path.isfile(os.path.join(topFolder, i))]
if not folders:
return
else:
result.extend(folders)
for i in folders:
findFolder(i)
findFolder(topFolder)
return result
class ConfigNative(QObject):
loadLocalFolder = 'cookies/native/local.cks'
allCookiesFolder = [loadLocalFolder]
def __init__(self, native):
super(ConfigNative, self).__init__()
self.native = native
self.musicList = []
self.folder = []
self.bindConnect()
self.loadCookies()
def bindConnect(self):
self.native.selectButton.clicked.connect(self.selectFolder)
self.native.singsTable.itemDoubleClicked.connect(self.itemDoubleClickedEvent)
def selectFolder(self):
folder = QFileDialog()
selectFolder = folder.getExistingDirectory()
if not selectFolder:
pass
else:
self.folder.append(selectFolder)
self.loadMusic()
def loadMusic(self):
for folder in self.folder:
mediaFiles = glob.glob(folder+'/*.mp3')
allFolder = getAllFolder(folder)
for i in allFolder:
mediaFiles.extend(glob.glob(i+'/*.mp3'))
length = len(mediaFiles)
self.native.singsTable.clearContents()
self.native.singsTable.setRowCount(length)
self.musicList = []
for i in enumerate(mediaFiles):
music = eyed3.load(i[1])
if not music:
self.singsTable.removeRow(i[0])
continue
try:
name = music.tag.title
author = music.tag.artist
if not name:
filePath = i[1].replace(folder, '')
name = filePath[1:][:-4]
if not author:
author = ''
except:
try:
# TODO
# if more folders exist.
filePath = i[1].replace(folder, '')
name = filePath[1:][:-4]
except Exception as e:
name = i[1]
author = ''
try:
time = itv2time(music.info.time_secs)
except:
time = '00:00'
self.musicList.append({'name': name, 'author': author, 'time': time, 'url': i[1], 'music_img': 'None'})
self.native.singsTable.setItem(i[0], 0, QTableWidgetItem(name))
self.native.singsTable.setItem(i[0], 1, QTableWidgetItem(author))
self.native.singsTable.setItem(i[0], 2, QTableWidgetItem(time))
# 事件。
def itemDoubleClickedEvent(self):
currentRow = self.native.singsTable.currentRow()
data = self.musicList[currentRow]
self.native.parent.playWidgets.setPlayerAndPlayList(data)
@checkFolder(allCookiesFolder)
def saveCookies(self):
with open(self.loadLocalFolder, 'wb') as f:
pickle.dump(self.folder, f)
@checkFolder(allCookiesFolder)
def loadCookies(self):
with open(self.loadLocalFolder, 'rb') as f:
self.folder = pickle.load(f)
self.loadMusic()
| {
"repo_name": "HuberTRoy/MusicPlayer",
"path": "MusicPlayer/features/configNativeFeatures.py",
"copies": "1",
"size": "3832",
"license": "mit",
"hash": 8029725500049695000,
"line_mean": 29.0158730159,
"line_max": 127,
"alpha_frac": 0.5446853517,
"autogenerated": false,
"ratio": 4.002116402116402,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5046801753816402,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cyrbuzz'
import os
import re
import glob
import pickle
import os.path
import logging
try:
import eyed3
except ImportError:
print('eyed3没有成功加载或安装,当再次打开时下载的音乐会加载不到!')
from apiRequestsBase import HttpRequest
from asyncBase import aAsync, toTask
from base import QFileDialog, QObject, QTableWidgetItem, checkFolder
from addition import itv2time
import logger
logger = logging.getLogger(__name__)
myRequests = HttpRequest()
def getAllFolder(topFolder):
result = []
def findFolder(topFolder):
folders = [os.path.join(topFolder, i) for i in os.listdir(topFolder) if not os.path.isfile(os.path.join(topFolder, i))]
if not folders:
return
else:
result.extend(folders)
for i in folders:
findFolder(i)
findFolder(topFolder)
return result
def replace_forbidden_sym(string):
return re.sub(r'[\\/:*?"<>|]{1}', ' ', string)
class ConfigDownloadFrame(QObject):
myDownloadFrameCookiesFolder = 'cookies/downloadInfo/downloadFolder.cks'
allCookiesFolder = [myDownloadFrameCookiesFolder]
def __init__(self, downloadFrame):
super(ConfigDownloadFrame, self).__init__()
self.downloadFrame = downloadFrame
self.showTable = self.downloadFrame.singsTable
self.musicList = []
self.folder = []
self.myDownloadFolder = os.path.join(os.getcwd(), 'downloads')
self._setDownloadFolder(self.myDownloadFolder)
self.bindConnect()
self.loadCookies()
def bindConnect(self):
self.downloadFrame.selectButton.clicked.connect(self.selectFolder)
self.downloadFrame.singsTable.itemDoubleClicked.connect(self.itemDoubleClickedEvent)
def getDownloadSignal(self):
#
window = self.downloadFrame.parent
try:
window.searchArea.config.download.connect(self.downloadSong)
window.detailSings.config.download.connect(self.downloadSong)
except Exception as e:
logger.error("下载时遇到未知错误", exc_info=True)
def _setDownloadFolder(self, folderName):
logger.info("下载目标变更{}".format(folderName))
self.fromPathLoadSong(folderName)
self.myDownloadFolder = folderName
self.downloadFrame.currentStorageFolder.setText(folderName)
@toTask
def downloadSong(self, musicInfo):
logger.info("正在下载的音乐的信息: {}".format(musicInfo))
url = musicInfo.get('url')
allMusicName = re.search(r'.*\.[a-zA-Z0-9]+', url[url.rfind('/')+1:]).group(0)
if allMusicName:
musicSuffix = allMusicName[allMusicName.rfind('.')+1:]
musicName = '{name}.{suf}'.format(name=musicInfo.get('name') + ' - ' + musicInfo.get('author'), suf=musicSuffix)
else:
# TODO MD5。
musicName = "random_name.mp3"
musicName = replace_forbidden_sym(musicName)
self.downloadFrame.parent.systemTray.showMessage("~~~", '{musicName} 加入下载队列'.format(musicName=musicName))
# TODO
# Streaming.
future = aAsync(myRequests.httpRequest, url, 'GET')
data = yield from future
localPath = '{myDownloadFolder}/{musicName}'.format(myDownloadFolder=self.myDownloadFolder, musicName=musicName)
with open(localPath, 'wb') as f:
f.write(data.content)
musicInfo['url'] = localPath
# 从托盘栏给出提示。
self.downloadFrame.parent.systemTray.showMessage("~~~", '{musicName} 下载完成'.format(musicName=musicName))
# 将音乐信息加到musicList中。
self.musicList.append(musicInfo)
self.updateDownloadShowTable(musicInfo)
def updateDownloadShowTable(self, musicInfo):
showInfo = [musicInfo.get("name"), musicInfo.get("author"), musicInfo.get("time")]
# 这里写"我的下载"的实例对象。
# 首先获取出当前总共多少行。
rowCount = self.showTable.rowCount()
self.showTable.setRowCount(rowCount+1)
# 然后直接添加过去就好啦。
for i in range(3):
self.showTable.setItem(rowCount, i, QTableWidgetItem(showInfo[i]))
def fromPathLoadSong(self, selectFolder):
if not os.path.isdir(selectFolder):
os.mkdir(selectFolder)
return
mediaFiles = glob.glob(selectFolder+'/*.mp3')
allFolder = getAllFolder(selectFolder)
for i in allFolder:
mediaFiles.extend(glob.glob(i+'/*.mp3'))
length = len(mediaFiles)
self.downloadFrame.singsTable.clearContents()
self.downloadFrame.singsTable.setRowCount(length)
self.musicList = []
for i in enumerate(mediaFiles):
music = eyed3.load(i[1])
if not music:
self.singsTable.removeRow(i[0])
continue
try:
name = music.tag.title
author = music.tag.artist
if not name:
filePath = i[1].replace(selectFolder, '')
name = filePath[1:][:-4]
if not author:
author = ''
except:
try:
# TODO
# if more folders exist.
filePath = i[1].replace(selectFolder, '')
name = filePath[1:][:-4]
except Exception as e:
name = i[1]
author = ''
try:
time = itv2time(music.info.time_secs)
except:
time = '00:00'
self.musicList.append({'name': name, 'author': author, 'time': time, 'url': i[1], 'music_img': 'None'})
self.downloadFrame.singsTable.setItem(i[0], 0, QTableWidgetItem(name))
self.downloadFrame.singsTable.setItem(i[0], 1, QTableWidgetItem(author))
self.downloadFrame.singsTable.setItem(i[0], 2, QTableWidgetItem(time))
def selectFolder(self):
folder = QFileDialog()
selectFolder = folder.getExistingDirectory()
if not selectFolder:
pass
else:
self.folder.append(selectFolder)
self._setDownloadFolder(selectFolder)
self.fromPathLoadSong(selectFolder)
@checkFolder(allCookiesFolder)
def saveCookies(self):
with open(self.myDownloadFrameCookiesFolder, 'wb') as f:
pickle.dump(self.myDownloadFolder, f)
@checkFolder(allCookiesFolder)
def loadCookies(self):
with open(self.myDownloadFrameCookiesFolder, 'rb') as f:
self.myDownloadFolder = pickle.load(f)
self._setDownloadFolder(self.myDownloadFolder)
# 事件。
def itemDoubleClickedEvent(self):
currentRow = self.downloadFrame.singsTable.currentRow()
data = self.musicList[currentRow]
self.downloadFrame.parent.playWidgets.setPlayerAndPlayList(data) | {
"repo_name": "HuberTRoy/MusicPlayer",
"path": "MusicPlayer/features/configDownloadFrameFeatures.py",
"copies": "1",
"size": "7055",
"license": "mit",
"hash": -2431072981149056500,
"line_mean": 31.75,
"line_max": 127,
"alpha_frac": 0.6148876817,
"autogenerated": false,
"ratio": 3.5979926043317487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9698294776333933,
"avg_score": 0.0029171019395631613,
"num_lines": 208
} |
__author__ = 'cyrbuzz'
"""
提供用于将函数包装成异步的基本功能。
"""
import asyncio
def aAsync(func, *args, **kwargs):
"""
future 用于将任意函数包装成一个awaitable对象。
例:
future = aAsync(requests.get, 'http://www.xxx.com', headers=headers)
data = yield from future
"""
# run_in_evecutor不支持**kwargs.
# args是个元组,kwargs是个字典。
# 再用到时 run_in_evecutor(None, makeUp, args, kwargs)
# 注意这边不要带*,带*为解包,不要解包。
def makeUp(args, kwargs):
return func(*args, **kwargs)
eventLoop = asyncio.get_event_loop()
future = eventLoop.run_in_executor(None, makeUp, args, kwargs)
return future
def toTask(func):
"""
一个将普通函数包装成异步函数的装饰器。
例:
@toTask
def test(x):
future = aAsync(requests.get, 'http://www.xxx.com', headers=headers)
print(x)
data = yield from future
print(data)
在运行test时(test())就会变成一个异步函数,
里面I/O部分就会由asyncio提供的事件循环处理。
>>> for i in range(5):
test(x)
```
0
1
2
3
4
<Response [200]>
<Response [200]>
<Response [200]>
<Response [200]>
<Response [200]>
```
"""
def makeUp(*args, **kwargs):
eventLoop = asyncio.get_event_loop()
future = eventLoop.create_task(func(*args, **kwargs))
return future
return makeUp
def toTaskWCb(func):
"""
一个将普通函数包装成异步函数并添加回调的装饰器。
"""
def makeUp(callback):
def makeUps(*args, **kwargs):
eventLoop = asyncio.get_event_loop()
future = eventLoop.create_task(func(*args, **kwargs))
future.add_done_callback(callback)
return future
return makeUps
return makeUp
if __name__ == '__main__':
help(aAsync)
print('\n')
help(toTask) | {
"repo_name": "HuberTRoy/MusicPlayer",
"path": "MusicPlayer/features/asyncBase.py",
"copies": "1",
"size": "2150",
"license": "mit",
"hash": 4915872713588908000,
"line_mean": 19.6333333333,
"line_max": 80,
"alpha_frac": 0.5350215517,
"autogenerated": false,
"ratio": 2.662840746054519,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3697862297754519,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cyrbuzz'
"""制作登陆区盒子。"""
from base import QDialog, QFrame, HBoxLayout, HStretchBox, QLabel, QLineEdit, QPushButton, Qt, VBoxLayout, RequestThread
class InputLine(QLineEdit):
def __init__(self, parent=None, width=0, height=0, placeholderText=None):
super(QLineEdit, self).__init__()
self.parent = parent
if width:
self.setMaximumWidth(width)
self.setMinimumWidth(width)
if height:
self.setMaximumHeight(height)
self.setMinimumHeight(height)
if placeholderText:
self.setPlaceholderText(placeholderText)
class Header(QFrame):
myStyle = """
QFrame {background: #2D2D2D;}
QLabel {
margin-left: 8px;
color: white;
font-weight: bold;
font-size: 15px;
}
QPushButton {
border: none;
font: bold;
font-size: 13px;
color: #7C7C7C;
margin-right: 8px;
}
QPushButton:hover{
color: #DCDDE4;
}
"""
def __init__(self, title:str, parent=None):
super(Header, self).__init__()
self.parent = None
self.setStyleSheet(self.myStyle)
self.mainLayout = HBoxLayout(self)
self.title = QLabel(title)
self.mainLayout.addWidget(self.title)
self.mainLayout.addStretch(1)
self.closeButton = QPushButton('×')
self.mainLayout.addWidget(self.closeButton)
def connectCloseButton(self, functionName):
self.closeButton.clicked.connect(functionName)
class LoginBox(QDialog):
def __init__(self, parent=None):
super(LoginBox, self).__init__()
self.parent = parent
self.setWindowFlags(Qt.FramelessWindowHint)
self.setWindowTitle('登陆')
self.setObjectName('LoginBox')
self.resize(520, 300)
# 可能会有多个渠道登陆的后续扩展。
self.currentFrame = 0
self.mainLayout = VBoxLayout(self)
self.phoneAndEMailFrame = PhoneAndEMailFrame(self)
self.mainLayout.addWidget(self.phoneAndEMailFrame)
def setWarningAndShowIt(self, warningStr):
if not self.currentFrame:
self.phoneAndEMailFrame.setWarningAndShowIt(warningStr)
def connectLogin(self, functionName):
if not self.currentFrame:
self.phoneAndEMailFrame.connectLogin(functionName)
def checkAndGetLoginInformation(self):
if not self.currentFrame:
return self.phoneAndEMailFrame.checkAndGetLoginInformation()
class PhoneAndEMailFrame(QFrame):
def __init__(self, parent=None):
super(PhoneAndEMailFrame, self).__init__()
self.parent = parent
self.resize(520, 300)
with open('QSS/phoneAndEMailFrame.qss', 'r') as f:
self.setStyleSheet(f.read())
self.mainLayout = VBoxLayout(self)
self.header = Header("用户名", self)
self.header.setMinimumHeight(40)
self.header.connectCloseButton(self.parent.accept)
self.mainLayout.addWidget(self.header)
self.mainLayout.addStretch(1)
self.usernameLine = InputLine(self, 220, 32, '请输入用户名')
self.usernameLine.setObjectName('usernameLine')
self.usernameCenterBox = HStretchBox(self.mainLayout, self.usernameLine)
self.mainLayout.addSpacing(10)
self.passwordLine = InputLine(self, 220, 32, '请输入密码')
self.passwordLine.setObjectName('passwordLine')
self.passwordCenterBox = HStretchBox(self.mainLayout, self.passwordLine)
self.passwordLine.setEchoMode(QLineEdit.Password)
self.warningIconLabel = QLabel()
self.warningIconLabel.setObjectName('warningIconLabel')
self.warningIconLabel.setMaximumSize(14, 14)
self.warningIconLabel.setMinimumSize(14, 14)
self.warningIconLabel.hide()
self.warningLabel = QLabel("请输入用户名")
self.warningLabel.hide()
self.warningLabel.setObjectName('warningLabel')
self.warningCenterBox = HStretchBox(self.mainLayout, self.warningIconLabel, self.warningLabel,
behindStretch=2)
self.mainLayout.addSpacing(30)
self.enterLoginButton = QPushButton("登 录")
self.enterLoginButton.setObjectName("enterButton")
self.enterLoginButton.setMaximumSize(217, 27)
self.enterLoginButton.setMinimumSize(217, 27)
self.enterLoginCenterBox = HStretchBox(self.mainLayout, self.enterLoginButton)
self.mainLayout.addSpacing(30)
self.mainLayout.addStretch(1)
def checkAndGetLoginInformation(self):
username = self.usernameLine.text()
password = self.passwordLine.text()
if not username or not password:
self.warningIconLabel.show()
if not username:
self.warningLabel.setText('请输入用户名')
self.warningLabel.show()
return False
if not password:
self.warningLabel.setText('请输入密码')
self.warningLabel.show()
return False
self.warningIconLabel.hide()
self.warningLabel.hide()
return username, password
def setWarningAndShowIt(self, warningStr):
self.warningLabel.setText(warningStr)
self.warningLabel.show()
self.warningIconLabel.show()
def connectLogin(self, functionName):
self.enterLoginButton.clicked.connect(functionName)
if __name__ == '__main__':
import os
os.chdir('..')
app = QApplication([])
main = LoginBox()
main.show()
app.exec_() | {
"repo_name": "HuberTRoy/MusicPlayer",
"path": "MusicPlayer/widgets/loginFrames.py",
"copies": "1",
"size": "5711",
"license": "mit",
"hash": 4464577960630639600,
"line_mean": 27.1055276382,
"line_max": 120,
"alpha_frac": 0.6348354793,
"autogenerated": false,
"ratio": 3.840659340659341,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9937374588101966,
"avg_score": 0.007624046371475085,
"num_lines": 199
} |
__author__ = 'cyrbuzz'
# 方便扩展,抽象成基类。
from base import (QCursor, QFrame, Qt, QTabWidget, QTextEdit, QLabel, QIcon, QPushButton, QHBoxLayout, QVBoxLayout,
QGridLayout, QTableWidgetItem, PicLabel, ScrollArea, TableWidget, VBoxLayout, HBoxLayout, pyqtSignal)
import addition
class SingsFrameBase(ScrollArea):
"""全部歌单。"""
def __init__(self, parent=None):
super(SingsFrameBase, self).__init__()
self.parent = parent
self.transTime = addition.itv2time
self.setObjectName("allSingsArea")
# 为什么有的需要加utf-8呢,因为有中文。
with open('QSS/singsFrameBase.qss', 'r', encoding='utf-8') as f:
self.setStyleSheet(f.read())
# 主布局。
self.mainLayout = QGridLayout(self.frame)
self.mainLayout.setSpacing(0)
self.mainLayout.setHorizontalSpacing(10)
self.mainLayout.setContentsMargins(0, 0, 0, 0)
class SingsSearchResultFrameBase(QFrame):
def __init__(self, parent):
super(SingsSearchResultFrameBase, self).__init__()
self.parent = parent
self.singsFrameLayout = VBoxLayout(self)
self.noSingsContentsLabel = QLabel(self)
self.noSingsContentsLabel.setMaximumHeight(60)
self.noSingsContentsLabel.setObjectName("noSingsLable")
self.noSingsContentsLabel.hide()
self.singsResultTable = TableWidget(3, ['音乐标题', '歌手', '时长'])
self.singsResultTable.setObjectName('singsTable')
self.singsResultTable.setMinimumWidth(self.parent.width())
self.singsResultTable.setColumnWidths({i:j for i,j in zip(range(3),
[self.parent.width()/3*1.25,self.parent.width()/3*1.25,self.parent.width()/3*0.5])})
self.singsFrameLayout.addWidget(self.singsResultTable, Qt.AlignTop|Qt.AlignCenter)
self.centerLabelLayout = HBoxLayout()
self.centerLabelLayout.addStretch(1)
self.centerLabelLayout.addWidget(self.noSingsContentsLabel)
self.centerLabelLayout.addStretch(1)
self.singsFrameLayout.addLayout(self.centerLabelLayout)
# 歌单详情页。
class DetailSings(ScrollArea):
def __init__(self, parent=None):
super(DetailSings, self).__init__(self)
# self.hide()
self.parent = parent
self.setObjectName('detailSings')
with open('QSS/detailSings.qss', 'r', encoding='utf-8') as f:
self.setStyleSheet(f.read())
self.setLabels()
self.setButtons()
self.setTabs()
self.setLayouts()
# 布局。
def setLabels(self):
self.picLabel = PicLabel(width=200, height=200)
self.picLabel.setObjectName('picLabel')
self.titleLabel = QLabel(self.frame)
self.titleLabel.setObjectName('titleLabel')
self.titleLabel.setWordWrap(True)
self.titleLabel.setMaximumHeight(40)
self.authorPic = QLabel(self.frame)
self.authorName = QLabel(self.frame)
self.authorName.setObjectName('authorName')
self.authorName.setMaximumHeight(28)
self.descriptionText = QTextEdit(self.frame)
self.descriptionText.setReadOnly(True)
self.descriptionText.setObjectName('descriptionText')
self.descriptionText.setMaximumWidth(450)
self.descriptionText.setMaximumHeight(100)
self.descriptionText.setMinimumHeight(100)
def setButtons(self):
self.showButton = QPushButton("歌单")
self.showButton.setObjectName('showButton')
self.showButton.setMaximumSize(36, 20)
self.descriptionButton = QPushButton(" 简介 :")
self.descriptionButton.setObjectName('descriptionButton')
self.descriptionButton.setMaximumSize(36, 36)
self.playAllButton = QPushButton("全部播放")
self.playAllButton.setIcon(QIcon('resource/playAll.png'))
self.playAllButton.setObjectName('playAllButton')
self.playAllButton.setMaximumSize(90, 24)
def setTabs(self):
self.contentsTab = QTabWidget(self.frame)
self.singsTable = TableWidget(3, ['音乐标题', '歌手', '时长'])
self.singsTable.setObjectName('singsTable')
self.singsTable.setMinimumWidth(self.width())
self.singsTable.setColumnWidths({i:j for i,j in zip(range(3),
[self.width()/3*1.25,self.width()/3*1.25,self.width()/3*0.5])})
self.contentsTab.addTab(self.singsTable, "歌曲列表")
def setLayouts(self):
self.mainLayout = VBoxLayout()
self.topLayout = HBoxLayout()
self.descriptionLayout = VBoxLayout()
self.titleLayout = HBoxLayout()
self.titleLayout.addWidget(self.showButton)
self.titleLayout.addSpacing(5)
self.titleLayout.addWidget(self.titleLabel)
self.authorLayout = HBoxLayout()
self.authorLayout.addWidget(self.authorPic)
self.authorLayout.addWidget(self.authorName)
self.authorLayout.addStretch(1)
self.descriptLayout = HBoxLayout()
self.descriptLayout.addWidget(self.descriptionButton)
self.descriptLayout.addWidget(self.descriptionText)
self.descriptionLayout.addSpacing(5)
self.descriptionLayout.addLayout(self.titleLayout)
self.descriptionLayout.addLayout(self.authorLayout)
self.descriptionLayout.addSpacing(5)
self.descriptionLayout.addWidget(self.playAllButton)
self.descriptionLayout.addSpacing(10)
self.descriptionLayout.addLayout(self.descriptLayout)
self.topLayout.addWidget(self.picLabel)
self.topLayout.addSpacing(18)
self.topLayout.addLayout(self.descriptionLayout)
self.mainLayout.addLayout(self.topLayout)
self.mainLayout.addWidget(self.contentsTab)
self.frame.setLayout(self.mainLayout)
class OneSing(QFrame):
# 大量创建,这样可以省内存。
__solts__ = ('parent', 'ggparent', 'detailFrame', 'row', 'column', 'ids',
'picName', 'picLabel', 'nameLabel',
'mainLayout',
'mousePos',
'result','catch',
'singsIds', 'singsUrls')
clicked = pyqtSignal(str, str)
def __init__(self, row, column, ids=None, parent=None, picName=None):
super(OneSing, self).__init__()
self.setObjectName('oneSing')
# 自己的位置信息。
self.row = row
self.column = column
# 歌单号。
self.ids = str(ids)
# 大图的缓存名。
self.picName = picName
self.setMinimumSize(180, 235)
self.picLabel = QLabel()
self.picLabel.setObjectName('picLabel')
self.picLabel.setMinimumSize(180, 180)
self.picLabel.setMaximumSize(180, 180)
self.nameLabel = QLabel()
self.nameLabel.setMaximumWidth(180)
self.nameLabel.setWordWrap(True)
self.mainLayout = QVBoxLayout(self)
self.mainLayout.addWidget(self.picLabel)
self.mainLayout.addWidget(self.nameLabel)
# 功能。
def setStyleSheets(self, styleSheet=None):
if styleSheet:
self.setStyleSheet(styleSheet)
# 事件。
def mousePressEvent(self, event):
# 记录下当前鼠标的位置。
self.mousePos = QCursor.pos()
def mouseReleaseEvent(self, event):
# 先进行判断,防止误点将鼠标移开后还是会判断为已经点击的尴尬。
if QCursor.pos() != self.mousePos:
return
else:
self.clicked.emit(self.ids, self.picName)
class PlaylistButton(QPushButton):
"""
提供一个简单点击的歌单自动切换点击状态,点击时会发出hasClicked信号。
Args:
parent 父类。
ids 歌单的ids。
coverImgUrl 这个歌单应该包含的图片地址 -> None, url.
*args其他作用于原生Button的参数。
singsIds 与 singsUrls暂时无用,不提供接受接口。
"""
__solts__ = ('parent', 'grandparent', 'ids', 'coverImgUrl',
'catch', 'detailFrame', 'result', 'singsIds', 'singsUrls'
)
hasClicked = pyqtSignal(int, str)
def __init__(self, parent, ids, coverImgUrl, *args):
super(PlaylistButton, self).__init__(*args)
self.parent = parent
self.grandparent = self.parent.parent
self.setCheckable(True)
self.setAutoExclusive(True)
self.ids = ids
self.coverImgUrl = coverImgUrl
self.catch = None
self.result = None
self.singsIds = None
self.singsUrls = None
self.clicked.connect(self.clickedEvent)
def clickedEvent(self):
self.hasClicked.emit(self.ids, self.coverImgUrl)
| {
"repo_name": "HuberTRoy/MusicPlayer",
"path": "MusicPlayer/widgets/singsFrameBase.py",
"copies": "1",
"size": "8770",
"license": "mit",
"hash": 4592259841820868600,
"line_mean": 31.6117647059,
"line_max": 138,
"alpha_frac": 0.6465848966,
"autogenerated": false,
"ratio": 3.2895569620253164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4436141858625316,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cyrfer'
# epic pyparsing help provided by Paul McGuire
# http://pyparsing.wikispaces.com/share/view/68890534
from ogre_parse.basereader import *
from ogre_parse.submodel import *
class ReadTextureUnit(ReadBase):
def __init__(self):
# TODO: need to separate the required member options
# ---------------------
cubicResourceDecl = Keyword('cubic_texture')('resource_type')
cubicIdentifier = identspec ^ Group(identspec + identspec + identspec + identspec + identspec + identspec)
# cubicPropList = Group(\
# cubicIdentifier('name') + \
# oneOf('combinedUVW separateUV')('batch_mode') \
# )
cubicPropList = propList
cubicResource = Group( cubicResourceDecl + cubicPropList('cubic_properties') )('required')
# ---------------------
animResourceDecl = Keyword('anim_texture')('resource_type')
animResource = Group( animResourceDecl + propList('keyframes') )('required')
# ---------------------
textureResourceDecl = Keyword('texture')('resource_type')
texPropList = Group(identspec('name') + \
Optional( oneOf('1d 2d 3d cubic') )('type') + \
Optional(integer)('numMipMaps') + \
Optional(Literal('alpha'))('alpha') + \
Optional(oneOf(imageFormats)('format')) + \
Optional(Literal('gamma')))
textureResource = Group( textureResourceDecl + texPropList('resource_properties') )('required')
addr_mode_val = oneOf('wrap clamp mirror border')
addr_mode_spec = addr_mode_val + Optional(addr_mode_val + Optional(addr_mode_val))
c_op_src_spec = oneOf('src_current src_texture src_diffuse src_specular src_manual')
c_op_spec = oneOf('source1 source2 modulate modulate_x2 modulate_x4 add add_signed add_smooth subtract blend_diffuse_alpha blend_texture_alpha blend_current_alpha blend_manual dotproduct blend_diffuse_colour')
colour_op_ex_spec = Group(c_op_spec('operation') + c_op_src_spec('source1') + c_op_src_spec('source2') + Optional(real('manual_factor')) + Optional(coloraction('manual_colour1')) + Optional(coloraction('manual_colour2')))
fallback_spec = Group(scene_blend_long_spec('src_factor') + scene_blend_long_spec('dest_factor'))
# define the optional members
alias = Group(Keyword('texture_alias').suppress() + identspec)('texture_alias')
coord_set = Group(Keyword('tex_coord_set').suppress() + integer)('tex_coord_set')
address_mode = Group(Keyword('tex_address_mode').suppress() + addr_mode_spec)('tex_address_mode')
border_colour = Group(Keyword('tex_border_colour').suppress() + coloraction)('tex_border_colour')
filtering = Group(Keyword('filtering').suppress() + propList)('filtering')
scale = Group(Keyword('scale').suppress() + (real('x') + real('y')))('scale')
colour_op = Group(Keyword('colour_op').suppress() + oneOf('replace add modulate alpha_blend'))('colour_op')
binding_type = Group(Keyword('binding_type').suppress() + oneOf('vertex fragment'))('binding_type')
colour_op_ex = Group(Keyword('colour_op_ex').suppress() + colour_op_ex_spec)('colour_op_ex')
colour_op_multipass_fallback = Group(Keyword('colour_op_multipass_fallback').suppress() + fallback_spec)('colour_op_multipass_fallback')
env_map = Group(Keyword('env_map').suppress() + oneOf('off spherical planar cubic_reflection cubic_normal'))('env_map')
content_type = Group(Keyword('content_type').suppress() + oneOf('named shadow compositor') + Optional(identspec('compositor') + identspec('texture') + Optional(integer('MRT'))))('content_type')
# --- define the parser
textureDecl = Keyword('texture_unit').suppress() + Optional(ident)('name') + \
lbrace + \
( \
Optional(textureResource | animResource | cubicResource) & \
Optional(alias) & \
Optional(coord_set) & \
Optional(address_mode) & \
Optional(border_colour) & \
Optional(filtering) & \
Optional(scale) & \
Optional(colour_op) & \
Optional(binding_type) & \
Optional(colour_op_ex) & \
Optional(colour_op_multipass_fallback) & \
Optional(env_map) & \
Optional(content_type) \
) + \
rbrace
texture_ = Group(textureDecl).setParseAction(MTextureUnit)
super(ReadTextureUnit, self).__init__(texture_('texture_unit'))
class ReadShaderReference(ReadBase):
def __init__(self):
# --- define the shader_ref parser
# shaderRefPropName = oneOf('param_indexed param_indexed_auto param_named param_named_auto shared_params_ref')
param_named_auto_spec = Keyword('param_named_auto').suppress() + ident
param_named_spec = Keyword('param_named').suppress() + ident
shaderRefSpec = oneOf('vertex_program_ref fragment_program_ref')
shaderRefDecl = shaderRefSpec('stage') + ident('resource_name') + \
lbrace + \
( \
dictOf( param_named_auto_spec, propList )('param_named_auto') & \
dictOf( param_named_spec, propList )('param_named') \
) + \
rbrace
# (dictOf(param_named_auto, propList('system_params'))('param_named_auto')) + \
shader_ref_ = Group(shaderRefDecl)
shader_ref_.setParseAction(MShaderRef)
super(ReadShaderReference, self).__init__(shader_ref_('shader_ref'))
# successful parsing produces a subreader.MPass in parsed.mpass
# format documented here:
# http://www.ogre3d.org/docs/manual/manual_16.html#Passes
class ReadPass(ReadBase):
def __init__(self):
# define named parsers
color_ambient = Group(Keyword('ambient').suppress() + colorspec)('ambient')
color_diffuse = Group(Keyword('diffuse').suppress() + colorspec)('diffuse')
color_emissive = Group(Keyword('emissive').suppress() + colorspec)('emissive')
color_specular = Group(Keyword('specular').suppress() + specular_spec)('specular')
# scene_blend
# TODO: add action to turn short format into long format
scene_blend_short = oneOf('add modulate colour_blend alpha_blend')
scene_blend_long = scene_blend_long_spec + scene_blend_long_spec
scene_blend = Group(Keyword('scene_blend').suppress() + (scene_blend_short | scene_blend_long))('scene_blend')
# TODO: add action to turn short format into long format
separate_blend_short = scene_blend_short + scene_blend_short
separate_blend_long = scene_blend_long_spec + scene_blend_long_spec + scene_blend_long_spec + scene_blend_long_spec
separate_scene_blend = Group(Keyword('separate_scene_blend').suppress() + (separate_blend_short | separate_blend_long))('separate_scene_blend')
scene_blend_op_spec = oneOf('add subtract reverse_subtract min max')
scene_blend_op = Group(Keyword('scene_blend_op').suppress() + scene_blend_op_spec)('scene_blend_op')
separate_scene_blend_op = Group(Keyword('separate_scene_blend_op').suppress() + (scene_blend_op_spec+scene_blend_op_spec))('separate_scene_blend_op')
# depth stuff
depth_check = Group(Keyword('depth_check').suppress() + onoff_val_spec)('depth_check')
depth_write = Group(Keyword('depth_write').suppress() + onoff_val_spec)('depth_write')
depth_func_val_spec = oneOf('always_fail always_pass less less_equal equal not_equal greater_equal greater')
depth_func = Group(Keyword('depth_func').suppress() + depth_func_val_spec)('depth_func')
depth_bias = Group(Keyword('depth_bias').suppress() + real('constant') + Optional(real('slopescale')))('depth_bias')
iter_depth_bias = Group(Keyword('iteration_depth_bias').suppress() + real('bias'))('iteration_depth_bias')
# alpha stuff
alpha_rejection_func = depth_func_val_spec('function')
alpha_rejection = Group(Keyword('alpha_rejection').suppress() + alpha_rejection_func + real('threshold'))('alpha_rejection')
alpha_to_coverage = Group(Keyword('alpha_to_coverage').suppress() + onoff_val_spec)('alpha_to_coverage')
# light_scissor
light_scissor = Group(Keyword('light_scissor').suppress() + onoff_val_spec)('light_scissor')
light_clip_planes = Group(Keyword('light_clip_planes').suppress() + onoff_val_spec)('light_clip_planes')
# other
illum_stage_val = oneOf('ambient per_light decal')
illumination_stage = Group(Keyword('illumination_stage').suppress() + illum_stage_val)('illumination_stage')
onoffforce_val_spec = oneOf('on off force')
transparent_sorting = Group(Keyword('transparent_sorting').suppress() + onoffforce_val_spec)('transparent_sorting')
normalise_normals = Group(Keyword('normalise_normals').suppress() + onoff_val_spec)('normalise_normals')
# cull
cull_hardware = Group(Keyword('cull_hardware').suppress() + oneOf('clockwise anticlockwise none'))('cull_hardware')
cull_software = Group(Keyword('cull_software').suppress() + oneOf('back front none'))('cull_software')
# other
lighting = Group(Keyword('lighting').suppress() + onoff_val_spec)('lighting')
shading = Group(Keyword('shading').suppress() + oneOf('flat gouraud phong'))('shading')
polygon_mode = Group(Keyword('polygon_mode').suppress() + oneOf('solid wireframe points'))('polygon_mode')
polygon_mode_overrideable = Group(Keyword('polygon_mode_overrideable').suppress() + truefalse_spec)('polygon_mode_overrideable')
fog_override_type = oneOf('none linear exp exp2')
# must define number spec here because
# something is wrong with the specs for number types at this point (real, integer)
# and they are returning a number and not a string.
fog_int = Word(nums)
fog_real = Regex(r"\d+\.\d*")
fog_num = fog_int ^ fog_real
fog_override_colour = fog_num + fog_num + fog_num
fog_override_args = fog_override_type('type') + fog_override_colour('colour') + fog_num('density') + fog_num('start') + fog_num('end')
fog_override = Group(Keyword('fog_override').suppress() + truefalse_spec('enabled') + Optional(fog_override_args))('fog_override')
colour_write = Group(Keyword('colour_write').suppress() + onoff_val_spec)('colour_write')
start_light = Group(Keyword('start_light').suppress() + integer)('start_light')
max_lights = Group(Keyword('max_lights').suppress() + integer)('max_lights')
light_type_spec = oneOf('point directional spot')
iteration_format1 = oneOf('once once_per_light') + Optional(light_type_spec)
iteration_format2 = integerspec + Optional(Keyword('per_light') + light_type_spec)
iteration_format3 = integerspec + Optional(Keyword('per_n_lights') + integerspec + Optional(light_type_spec))
iteration_args = (iteration_format1 ^ iteration_format2 ^ iteration_format3)
iteration = Group(Keyword('iteration').suppress() + iteration_args)('iteration')
point_size = Group(Keyword('point_size').suppress() + real)('point_size')
point_sprites = Group(Keyword('point_sprites').suppress() + onoff_val_spec)('point_sprites')
attenuation_spec = oneOf('constant linear quadratic')
point_size_attenuation = Group(Keyword('point_size_attenuation').suppress() + onoff_val_spec('enabled') + Optional(attenuation_spec)('model'))('point_size_attenuation')
point_size_min = Group(Keyword('point_size_min').suppress() + real)('point_size_min')
point_size_max = Group(Keyword('point_size_max').suppress() + real)('point_size_max')
tu = ReadTextureUnit()
shader = ReadShaderReference()
passBody = ( \
# color
Optional(color_ambient) & \
Optional(color_diffuse) & \
Optional(color_emissive) & \
Optional(color_specular) & \
# blend
Optional(scene_blend) & \
Optional(separate_scene_blend) & \
Optional(scene_blend_op) & \
Optional(separate_scene_blend_op) & \
# depth
Optional(depth_check) & \
Optional(depth_write) & \
Optional(depth_func) & \
Optional(depth_bias) & \
Optional(iter_depth_bias) & \
# alpha
Optional(alpha_rejection) & \
Optional(alpha_to_coverage) & \
# light scissor
Optional(light_scissor) & \
Optional(light_clip_planes) & \
# other
Optional(illumination_stage) & \
Optional(transparent_sorting) & \
Optional(normalise_normals) & \
# cull
Optional(cull_hardware) & \
Optional(cull_software) & \
# lighting
Optional(lighting) & \
Optional(shading) & \
# polygon
Optional(polygon_mode) & \
Optional(polygon_mode_overrideable) & \
# other
Optional(fog_override) & \
Optional(colour_write) & \
Optional(start_light) & \
Optional(max_lights) & \
Optional(iteration) & \
# point
Optional(point_size) & \
Optional(point_sprites) & \
Optional(point_size_attenuation) & \
Optional(point_size_min) & \
Optional(point_size_max) & \
# texture
ZeroOrMore(tu.getGrammar())('texture_units') & \
# shaders
ZeroOrMore(shader.getGrammar())('shaders') \
)
# total parser
parser = Group( Keyword('pass').suppress() + Optional(identspec('name')) + LBRACE + passBody + RBRACE)('mpass')
parser.setParseAction(MPass)
super(ReadPass, self).__init__(parser)
# -------------- END NEW STUFF SINCE PAUL HELPED ------------ #
class ReadTechnique(ReadBase):
def __init__(self):
pass_ = ReadPass()
# --- define the technique parser
scheme = Group(Keyword('scheme').suppress() + identspec)('scheme')
lod_index = Group(Keyword('lod_index').suppress() + integer)('lod_index')
shadow_caster_material = Group(Keyword('shadow_caster_material').suppress() + identspec)('shadow_caster_material')
shadow_receiver_material = Group(Keyword('shadow_receiver_material').suppress() + identspec)('shadow_receiver_material')
inex = oneOf('include exclude')
gpu_vendor_rule = Group(Keyword('gpu_vendor_rule').suppress() + inex + identspec)('gpu_vendor_rule')
gpu_device_rule = Group(Keyword('gpu_device_rule').suppress() + inex + propList)('gpu_device_rule')
techDecl = Keyword('technique').suppress() + Optional(ident)('name') + \
lbrace + \
Optional( scheme ) + \
Optional( lod_index ) + \
Optional( shadow_caster_material ) + \
Optional( shadow_receiver_material ) + \
ZeroOrMore( gpu_vendor_rule )('gpu_vendor_rules') + \
ZeroOrMore( gpu_device_rule )('gpu_device_rules') + \
OneOrMore( pass_.getGrammar() )('passes') + \
rbrace
technique_ = Group(techDecl)
technique_.setParseAction(MTechnique)
super(ReadTechnique, self).__init__(technique_('technique'))
| {
"repo_name": "cyrfer/ogre_parse",
"path": "ogre_parse/subreader.py",
"copies": "1",
"size": "16636",
"license": "mit",
"hash": 5522130532067399000,
"line_mean": 52.8381877023,
"line_max": 229,
"alpha_frac": 0.5770016831,
"autogenerated": false,
"ratio": 4.018357487922706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009911498537122367,
"num_lines": 309
} |
__author__ = 'cyrfer'
import array
import math
import copy
def float_eq(a, b, epsilon=1e-7):
# print('float_eq: type(a)=%s, type(b)=%s' % (type(a), type(b)))
# print('float_eq(%s, %s)' % (a,b))
diff_val = math.fabs(a - b)
# print('float_eq: diff_val = %s' % diff_val)
return diff_val < epsilon
# should be hooked up to a 'basereader.colorspec' instance
class Color(object):
def __init__(self, tokens=None, vals=None):
self.vector = array.array('f', [0, 0, 0, 1])
if vals:
for i in range(min(4,len(vals))):
self.vector[i] = vals[i]
if tokens and len(tokens) > 0:
for index in range(min(len(self.vector), len(tokens[0]))):
self.vector[index] = tokens[0][index]
# print('self.vector = %s' % self.vector)
def __str__(self):
fmt = '{0:.6f}'
rep = fmt.format(self.vector[0]).rstrip('0').rstrip('.')
rep += ' ' + fmt.format(self.vector[1]).rstrip('0').rstrip('.')
rep += ' ' + fmt.format(self.vector[2]).rstrip('0').rstrip('.')
rep += ' ' + fmt.format(self.vector[3]).rstrip('0').rstrip('.')
return rep
__repr__ = __str__
def __getitem__(self, item):
return self.vector.__getitem__(item)
def __setitem__(self, key, value):
return self.vector.__setitem__(key, value)
def __eq__(self, other):
if not isinstance(other, Color):
return False
if not other:
# print('ogre_parse.basemodel.Color: why are we comparing with _%s_?' % other)
return False
if len(other)<4:
# print('ogre_parse.basemodel.Color: len(other)=%s' % len(other))
return False
res = self.vector[0] == other[0] \
and self.vector[1] == other[1] \
and self.vector[2] == other[2] \
and self.vector[3] == other[3]
return res
def __len__(self):
return 4
# support RHS multiplication
def __mul__(self, scalar):
if not isinstance(scalar, float) and not isinstance(scalar, int):
raise ValueError('argument to multiply with Color should be a scalar.')
prod = copy.deepcopy(self)
prod[0] = scalar * prod[0]
prod[1] = scalar * prod[1]
prod[2] = scalar * prod[2]
prod[3] = scalar * prod[3]
return prod
# support LHS multiplication
__rmul__ = __mul__
| {
"repo_name": "cyrfer/ogre_parse",
"path": "ogre_parse/basemodel.py",
"copies": "1",
"size": "2448",
"license": "mit",
"hash": -4869784341842968000,
"line_mean": 28.1428571429,
"line_max": 90,
"alpha_frac": 0.5310457516,
"autogenerated": false,
"ratio": 3.4094707520891365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9374034898460286,
"avg_score": 0.013296321045770054,
"num_lines": 84
} |
__author__ = 'cyrfer'
# from pyparsing import Optional, Word, Literal, Keyword, Forward, alphas, nums, alphanums, \
# Group, ZeroOrMore, OneOrMore, oneOf, delimitedList, cStyleComment, restOfLine, LineEnd, \
# cppStyleComment, Combine, Dict, dictOf, Regex, Suppress
from pyparsing import *
# import ogre_parse.model
import ogre_parse.basemodel
def printAll(s, l, toks):
print('-----------')
print(toks)
print('-----------')
# convenient definitions
# TODO: find a way that does not pollute the global namespace
EOL = LineEnd().suppress()
ident = Word( alphanums+"_", alphanums+"_-/$@#.()" )
identspec = Word( alphanums+"_", alphanums+"_-$@#." )
lbrace = Literal("{").suppress()
rbrace = Literal("}").suppress()
integerspec = Word(nums)
integer = Word(nums)
integer.setParseAction(lambda t: int(t[0]))
# propList.setParseAction(printAll)
# reusable definitions
LBRACE, RBRACE = map(Suppress,'{}')
EOL = LineEnd().suppress()
# another option for floating point parsing:
# http://pyparsing.wikispaces.com/share/view/33656348
# Regex(r'\d+(\.\d*)?([eE]\d+)?')
# realspec = Combine(Optional('-') + Regex(r"\d+(\.\d*)?"))
realspec_frac_only = Literal('.') + Word(nums) #Regex(r".\d")
realspec_whole_only = Word(nums) #Regex(r"\d")
realspace_whole_and_frac = Regex(r"\d+(\.\d*)?")
realspec = Combine(Optional('-') + (realspec_frac_only ^ realspec_whole_only ^ realspace_whole_and_frac))
int_or_real_spec = integerspec ^ realspec
real = (int_or_real_spec).setParseAction(lambda t: float(t[0]))
propVal = realspec | integerspec | ident
propList = Group(OneOrMore(~EOL + propVal))
# colorspec = Group(~EOL + OneOrMore(realspec))('vector').setParseAction(Color)
color3spec = Group(real('r') + real('g') + real('b')).setParseAction(ogre_parse.basemodel.Color)
color4spec = Group(real('r') + real('g') + real('b') + real('a')).setParseAction(ogre_parse.basemodel.Color)
coloraction = (color3spec ^ color4spec)
colorspec = ( color3spec ^ color4spec )('args')
specular_spec = Group( (Group(color3spec)('color') + Group(real)('shininess')) ^ (Group(color4spec)('color') + Group(real)('shininess')) )
scene_blend_long_spec = oneOf('one zero dest_colour src_colour one_minus_dest_colour one_minus_src_colour dest_alpha src_alpha one_minus_dest_alpha one_minus_src_alpha')
truefalse_spec = oneOf('true false')
onoff_val_spec = oneOf('on off')
# on 8/8/2014, taken from:
# http://www.ogre3d.org/docs/manual/manual_17.html#texture
imageFormats = '''
PF_L8 PF_L16 PF_A8 PF_A4L4 PF_BYTE_LA
PF_R5G6B5 PF_B5G6R5 PF_R3G3B2 PF_A4R4G4B4 PF_A1R5G5B5
PF_R8G8B8 PF_B8G8R8
PF_A8R8G8B8 PF_A8B8G8R8 PF_B8G8R8A8 PF_R8G8B8A8
PF_X8R8G8B8 PF_X8B8G8R8
PF_A2R10G10B10 PF_A2B10G10R10
PF_DXT1 PF_DXT2 PF_DXT3 PF_DXT4 PF_DXT5
PF_FLOAT16_R PF_FLOAT16_RGB PF_FLOAT16_RGBA
PF_FLOAT32_R PF_FLOAT32_RGB PF_FLOAT32_RGBA
PF_SHORT_RGBA
PF_FLOAT16_GR PF_FLOAT32_GR
PF_DEPTH
PF_SHORT_GR PF_SHORT_RGB
PF_PVRTC_RGB2 PF_PVRTC_RGBA2 PF_PVRTC_RGB4 PF_PVRTC_RGBA4
PF_R8 PF_RG8
'''
# base class for all parsers
class ReadBase(object):
# all derived classes will provide the grammar object
def __init__(self, grammar):
self.grammar_ = grammar
self.grammar_.ignore( cppStyleComment )
self.debug_flag_ = False
self.grammar_.setDebug(False)
def getGrammar(self):
return self.grammar_
def parseString(self, txt):
if self.debug_flag_:
print('parsing: [[\n' + txt + '\n]]\n')
result = self.grammar_.parseString(txt)
if self.debug_flag_:
print('result = %s' % result )
return result
| {
"repo_name": "cyrfer/ogre_parse",
"path": "ogre_parse/basereader.py",
"copies": "1",
"size": "3633",
"license": "mit",
"hash": 4363126430530298000,
"line_mean": 32.6388888889,
"line_max": 169,
"alpha_frac": 0.6710707404,
"autogenerated": false,
"ratio": 2.8316445830085737,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40027153234085733,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cyrfer'
# This file defines 'readers' for the major classes of OGRE objects, including:
# - materials
# - shader declarations
# - compositors
# OGRE permits putting any of the major classes into any of the OGRE resource file types, including:
# - .material
# - .program
# - .compositor
# Because any script may define any of the definitions, a catch-all "script reader" is defined here.
from ogre_parse.basereader import *
import ogre_parse.subreader
from ogre_parse.model import *
# grammar to parse materials
class ReadMaterial(ReadBase):
def __init__(self):
technique_ = ogre_parse.subreader.ReadTechnique()
# --- define the material parser
lod_strategy = Group(Keyword('lod_strategy').suppress() + ident)('lod_strategy')
lod_values = Group(Keyword('lod_values').suppress() + OneOrMore(integer))('lod_values')
receive_shadows = Group(Keyword('receive_shadows').suppress() + onoff_val_spec)('receive_shadows')
transparency_casts_shadows = Group(Keyword('transparency_casts_shadows').suppress() + onoff_val_spec)('transparency_casts_shadows')
set_texture_alias_key = Keyword('set_texture_alias').suppress() + identspec
set_texture_alias_val = identspec
matDecl = Keyword('material').suppress() + ident('name') + \
lbrace + \
Optional(lod_strategy) + \
Optional(lod_values) + \
Optional(receive_shadows) + \
Optional(transparency_casts_shadows) + \
dictOf(set_texture_alias_key, set_texture_alias_val)('texture_alias') + \
OneOrMore( technique_.getGrammar() )('techniques') + \
rbrace
material_ = Group(matDecl)('material')
material_.setParseAction(Material)
super(ReadMaterial, self).__init__(material_)
# grammar to parse shader declarations
class ReadShaderDeclaration(ReadBase):
def __init__(self):
param_named_auto_spec = Keyword('param_named_auto').suppress() + ident
param_named_spec = Keyword('param_named').suppress() + ident
default_params = Group( Keyword('default_params').suppress() + \
lbrace + \
dictOf( param_named_auto_spec, propList )('param_named_auto') + \
dictOf( param_named_spec, propList )('param_named') + \
rbrace \
)('default_params')
# shaderDeclPropName = oneOf('source entry_point target delegate')
# shaderDeclProp = Group(shaderDeclPropName + propList)
shaderStage = oneOf('vertex_program geometry_program fragment_program')('stage')
shaderName = ident('name')
shaderLang = oneOf('glsl hlsl cg asm')('language')
source = Group(Keyword('source').suppress() + identspec)('source')
entry_point = Group(Keyword('entry_point').suppress() + identspec)('entry_point')
target = Group(Keyword('target').suppress() + identspec)('target')
hlsl = ( \
source & \
entry_point & \
target \
)
glsl = ( \
source \
)
shaderDeclDecl = shaderStage + shaderName + shaderLang + \
lbrace + \
(\
(hlsl | glsl) & \
Optional(default_params) \
) + \
rbrace
# (shaderDeclProps_unified | shaderDeclProps_hlsl | shaderDeclProps_glsl | shaderDeclProps_cg | shaderDeclProps_asm)
shader_declaration = Group(shaderDeclDecl)('shader')
shader_declaration.setParseAction(ShaderDeclaration)
super(ReadShaderDeclaration, self).__init__(shader_declaration)
# grammar to parse compositors
class ReadCompositor(ReadBase):
def __init__(self):
compTexture = Group(Keyword('texture') + OneOrMore(ident))
compTargetPropName = oneOf('input only_initial visibility_mask load_bias material_scheme shadows pass')
compTargetProp = Group(compTargetPropName)
compTarget = Group(Keyword('target') + ident + lbrace + ZeroOrMore(compTargetProp) + rbrace)
compOutput = Group(Keyword('target_output') + lbrace + ZeroOrMore(compTargetProp) + rbrace)
compTech = Group(Keyword('technique').suppress() + \
lbrace + \
ZeroOrMore(compTexture) + \
ZeroOrMore(compTarget) + \
compOutput + \
rbrace)
compDecl = Keyword('compositor').suppress() + ident + \
lbrace + \
OneOrMore(compTech) + \
rbrace
compositor = Group(compDecl)
super(ReadCompositor, self).__init__(compositor)
# grammar to parse anything in an ogre script (.material, .compositor, .program)
class ReadScript(ReadBase):
def __init__(self):
material_ = ReadMaterial().getGrammar()
shader_declaration_ = ReadShaderDeclaration().getGrammar()
compositor_ = ReadCompositor().getGrammar()
resourceType = material_ | compositor_ | shader_declaration_
scriptDecl = ZeroOrMore(resourceType) + StringEnd()
scriptDecl.setParseAction(Script)
super(ReadScript, self).__init__(scriptDecl('script'))
def getGrammar(self):
return self.grammar_
| {
"repo_name": "cyrfer/ogre_parse",
"path": "ogre_parse/reader.py",
"copies": "1",
"size": "5640",
"license": "mit",
"hash": -7213373833509549000,
"line_mean": 41.0895522388,
"line_max": 148,
"alpha_frac": 0.5764184397,
"autogenerated": false,
"ratio": 4.28246013667426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.535887857637426,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Cyril de Vogelaere : 2814-11-00 & Thuin florian : 0656-11-00'
import time
import sys
from copy import deepcopy
from os import listdir, system
from search import *
# LEFT RIGHT UP DOWN
directions = [[0, -1], [0, 1], [-1, 0], [1, 0]]
global listOfGoalPos
###############
# My function #
###############
def readGridFromFile(Texte):
with open(Texte, "r") as file:
data_read = file.read()
grid = data_read.split("\n")
#Remove empty line
grid.pop()
#Remove last line
grid.pop()
#Remove first line
grid.pop(0)
#Truncate first and last collumn
for i in range(0, len(grid)):
grid[i] = grid[i][1 : len(grid[i])-1]
return grid
# Read goal file and return a grid containing the data
def readStateFromGoal(goal):
grid = readGridFromFile(goal)
listOfGoalPos = [] #Orginal position of boxes
i = 0
for line in grid:
for j in range(0, len(line)):
if line[j] == ".":
#Avatar
listOfGoalPos.append((i, j))
i+=1
return listOfGoalPos
# Read init file and return a state containing the data
def readStateFromInit(init):
with open(init, "r") as file:
# Lecture du fichier
grid = readGridFromFile(init)
# Creation d'un tableau equivalent au probleme
avatarPos = (0,0) #Original position of the avatar
listOfBoxesPos = [] #Orginal position of boxes
#Read grid for important elem
i = 0
for line in grid:
for j in range(0, len(line)):
if line[j] == "@":
#Avatar
avatarPos = (i, j)
elif line[j] == "$":
#Box
listOfBoxesPos.append((i, j))
i+=1
return State(grid, listOfBoxesPos, avatarPos)
#Add a line of wall
def addALineOfWall(string, length):
for i in range (0, length+2):
string += "#"
string += "\n"
return string
#Check if position is inbound
def inBounds(grid, pos):
return 0 <= pos[0] and pos[0] < len(grid) and 0 <= pos[1] and pos[1] < len(grid[0])
#Check if the state is a KO state
def isKOState(state, box):
#Check direction in which i can push
if box in listOfGoalPos :
# If box on goal state, it's never a KO state
return False
#Test LEFT AND RIGHT
freedom = 0
for x in range(0, 2):
i = box[0] + directions[x][0]
j = box[1] + directions[x][1]
if inBounds(state.grid, (i, j)) and (state.grid[i][j] == " " or state.grid[i][j] == "@"):
freedom += 1;
if freedom == 2:
return False
#Test LEFT AND RIGHT
freedom = 0
for x in range(2, 4):
i = box[0] + directions[x][0]
j = box[1] + directions[x][1]
if inBounds(state.grid, (i, j)) and (state.grid[i][j] == " " or state.grid[i][j] == "@"):
freedom += 1;
if freedom == 2:
return False
return False
# Check if pushing box will lead to a KO state
# Pre : box is pushable
def isPushingOK(state, dir, x, y):
result = False
state.grid[x] = state.grid[x][:y] + " " + state.grid[x][y+1:]
newBoxX = x + dir[0]
newBoxY = y + dir[1]
state.grid[newBoxX] = state.grid[newBoxX][:newBoxY] + "$" + state.grid[newBoxX][newBoxY+1:]
result = not isKOState(state, (newBoxX, newBoxY))
state.grid[newBoxX] = state.grid[newBoxX][:newBoxY] + " " + state.grid[newBoxX][newBoxY+1:]
state.grid[x] = state.grid[x][:y] + "$" + state.grid[x][y+1:]
return result
#Check if two position are adjacent
def arePosAdjacent(posA, posB):
distI = abs(posA[0] - posB[0])
distJ = abs(posA[1] - posB[1])
return (distI + distJ) < 2
#Check if char can push the box from this position
def canPushBox(grid, char, box):
if arePosAdjacent(char, box):
i = 2*box[0] - char[0]
j = 2*box[1] - char[1]
if inBounds(grid, (i, j)) and grid[i][j] == " ":
return True
return False
#Generate successor from state
#Pre : Successor can be generated => if box it can be pushed
def generateSuccessor(state, dir):
newState = deepcopy(state)
#Calculate new avatar pos
newState.avatarPos = (state.avatarPos[0] + dir[0], state.avatarPos[1] + dir[1])
#Clear old pos in grid, update avatar pos in state
newState.grid[state.avatarPos[0]] = newState.grid[state.avatarPos[0]][:state.avatarPos[1]] + " " + newState.grid[state.avatarPos[0]][state.avatarPos[1]+1:]
if(newState.grid[newState.avatarPos[0]][newState.avatarPos[1]] == "$"):
#Move box before updating avatar in grid
for index in range(0, len(newState.listOfBoxesPos)):
if (newState.avatarPos[0], newState.avatarPos[1]) == newState.listOfBoxesPos[index]:
#Calculate new coordinate
newX = dir[0] + newState.avatarPos[0]
newY = dir[1] + newState.avatarPos[1]
newState.listOfBoxesPos[index] = (newX, newY)
newState.grid[newX] = newState.grid[newX][:newY] + "$" + newState.grid[newX][newY+1:]
#Update avatar in grid
newState.grid[newState.avatarPos[0]] = newState.grid[newState.avatarPos[0]][:newState.avatarPos[1]] + "@" + newState.grid[newState.avatarPos[0]][newState.avatarPos[1]+1:]
return newState
#Calculate the minimum position from the avatar to a box
def calculateDistFromBoxes(state):
best = len(state.grid) + len(state.grid[0])
for box in state.listOfBoxesPos:
best = min(best, (abs(box[0] - state.avatarPos[0]) + abs(box[1] - state.avatarPos[1])))
return best
# Return the minimum hamilton distance to reach a goal
def minDistOfBoxToGoal(state, box):
best = len(state.grid) + len(state.grid[0])
for goal in listOfGoalPos:
best = min(best, (abs(goal[0] - box[0]) + abs(goal[1] - box[1])))
return best
# Heuristic function
# Minimal value will be explored first !!!
def heuristicFunction(node):
score = 0
for box in node.state.listOfBoxesPos:
score += minDistOfBoxToGoal(node.state, box) * len(node.state.grid) # Passes everything
score += calculateDistFromBoxes(node.state)
return score
#################
# My classes #
#################
class Sokoban(Problem):
def __init__(self, init):
# Extract state from file
global listOfGoalPos
listOfGoalPos = readStateFromGoal(init + ".goal")
initState = readStateFromInit(init + ".init")
# Extend super init
super().__init__(initState)
def goal_test(self, state):
for elem in listOfGoalPos:
if not elem in state.listOfBoxesPos:
return False
return True
def successor(self, state):
#print(state)
for i in range(0, len(directions)):
x = state.avatarPos[0] + directions[i][0]
y = state.avatarPos[1] + directions[i][1]
if inBounds(state.grid, (x, y)) and (state.grid[x][y] == ' ' or (state.grid[x][y] == '$' and canPushBox(state.grid, state.avatarPos, (x,y)) and isPushingOK(state, directions[i], x, y))):
#Yield result
yield (i, generateSuccessor(state, directions[i]))
class State:
def __init__(self, gridInit, listOfBoxesPos, avatarPos):
# Save state variable
self.listOfBoxesPos = listOfBoxesPos
self.avatarPos = avatarPos
self.grid = gridInit
def __str__(self): # Jolie representation
string = ""
string = addALineOfWall(string, len(self.grid[0]))
for a in range(0, len(self.grid)):
string += "#"
for b in range(0, len(self.grid[a])):
string += self.grid[a][b]
string += "#"
string += "\n"
string = addALineOfWall(string, len(self.grid[0]))
return string
def __repr__(self): # Full representation
return str((self.avatarPos, self.listOfBoxesPos, self.grid))
def __eq__(self, other):
return (other.grid == self.grid)
def __hash__(self):
return self.__str__().__hash__()
#####################
# Launch the search #
#####################
# Init
now = time.time()
problem = Sokoban(sys.argv[1])
# Solve using bfs search
#node = depth_first_graph_search(problem)
node = astar_graph_search(problem, heuristicFunction)
# Print
path = node.path()
path.reverse()
#print(len(path))
for n in path:
print(n.state) # assuming that the __str__ function of states output the correct format
#Calculate time elapsed
later = time.time()
print(later - now) | {
"repo_name": "fthuin/artificial-intelligence",
"path": "assignment2/Code/Sokoban.py",
"copies": "1",
"size": "8588",
"license": "mit",
"hash": 7897304180310724000,
"line_mean": 32.8149606299,
"line_max": 198,
"alpha_frac": 0.5895435491,
"autogenerated": false,
"ratio": 3.286643704554152,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4376187253654152,
"avg_score": null,
"num_lines": null
} |
__author__ = 'czervenka'
"""
Simple database app settings
BASIC USE
---------
from models.config import settings
version = settings['version'] # read settings
settings['version'] = '1' # stores settings
INITIALIZATION
--------------
from models.config import settings
settings['version'] = '1'
# use Datastore Viewer to change your own settings
"""
from google.appengine.ext import ndb
from google.appengine.api import namespace_manager
class AppSettings(ndb.Model):
version = ndb.StringProperty(default='0.1', indexed=False)
class LazyAppSettings(object):
"""
Settings row property descriptor used in SettingsDict.
"""
def __get__(self, instance, owner=None, domain=None):
if not hasattr(instance, '_cached_settings'):
key = ndb.Key(AppSettings, 'app_settings')
row = key.get()
if row is None:
row = AppSettings(key=key)
instance._cached_settings = row
return instance._cached_settings
class SettingsDict(object):
"""
Dictionary which reads settings from database.
To list all settings simply calll settings.items()
"""
_row = LazyAppSettings()
def __getitem__(self, key):
if hasattr(self._row, key):
return getattr(self._row, key)
else:
raise KeyError('Key %s not found.' % key)
def __setitem__(self, key, value):
setattr(self._row, key, value)
self._row.put()
def __contains__(self, key):
return hasattr(self._row, key)
def keys(self):
return self._row._properties.keys()
def items(self):
return [(key, self[key]) for key in self.keys()]
def values(self):
return [self[key] for key in self.keys()]
def __iter__(self):
for key in self.keys():
yield key
class SettingsCache(object):
def __init__(self):
self.storage = {}
def __getitem__(self, item):
return self.get()[item]
def __setitem__(self, key, value):
self.get()[key] = value
def get(self):
return self.__for_namespace(namespace_manager.get_namespace())
def __for_namespace(self, namespace):
return self.storage.setdefault(namespace, SettingsDict())
settings = SettingsCache()
| {
"repo_name": "xaralis/gap-sandbox",
"path": "src/app/settings.py",
"copies": "1",
"size": "2302",
"license": "apache-2.0",
"hash": 2730642240078861000,
"line_mean": 24.0217391304,
"line_max": 70,
"alpha_frac": 0.6068635969,
"autogenerated": false,
"ratio": 4.074336283185841,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 92
} |
__author__ = 'czhuang'
import os
import cPickle as pickle
import numpy as np
def is_valid_latin_square(grid):
n_row, n_col = grid.shape
assert n_row % n_col == 0
quotient = n_row / n_col
sum_of_elements = np.sum(np.arange(n_col))
sum_of_col_wanted = quotient * sum_of_elements
sum_of_cols = np.sum(grid, axis=0)
# print sum_of_cols, sum_of_col_wanted
if np.allclose(sum_of_cols, sum_of_col_wanted):
return True
else:
return False
def gen_latin_squares(num_conditions, num_participants):
num_tries = 100
grid = np.zeros((num_participants, num_conditions))
for j in range(num_tries):
# print j
for i in range(num_participants):
grid[i, :] = np.random.permutation(num_conditions)
if is_valid_latin_square(grid):
return grid
return None
def stack_condition_ordering(num_conditions, num_participants):
quotient = num_participants / num_conditions
ordering = None
for i in range(quotient):
local_ordering = gen_latin_squares(3, 3)
if ordering is None:
ordering = local_ordering
else:
ordering = np.vstack((ordering, local_ordering))
duplicated_ordering = np.zeros_like(ordering)
duplicated_ordering = np.vstack((duplicated_ordering, duplicated_ordering))
for i in range(ordering.shape[0]):
duplicated_ordering[i*2, :] = ordering[i, :]
duplicated_ordering[i*2+1, :] = ordering[i, :]
print duplicated_ordering
assert np.allclose(np.sum(duplicated_ordering, axis=0),
quotient * 2 * np.sum(np.arange(num_conditions)))
fpath = os.path.join('pkls', 'condition_ordering.pkl')
print 'fpath', fpath
with open(fpath, 'wb') as p:
pickle.dump(duplicated_ordering, p)
def get_condition_ordering():
fpath = os.path.join('pkls', 'condition_ordering.pkl')
with open(fpath, 'rb') as p:
grid = pickle.load(p)
return grid
if __name__ == '__main__':
print os.getcwd()
condition_orderings = stack_condition_ordering(3, 3*10)
condition_orderings = get_condition_ordering()
print condition_orderings
print condition_orderings.shape
| {
"repo_name": "czhuang/ChordRipple",
"path": "app/latin_squares_experiment_tools.py",
"copies": "1",
"size": "2219",
"license": "mit",
"hash": -5078612588845626000,
"line_mean": 27.0886075949,
"line_max": 79,
"alpha_frac": 0.6349707075,
"autogenerated": false,
"ratio": 3.5110759493670884,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46460466568670883,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cz'
import os
import math
from vmb_db.conn import get_one
from flask import request, redirect, url_for, flash, current_app, abort, render_template
from flask.ext.login import login_required, current_user
from werkzeug import secure_filename
from vmb_db.contact_info import get_contact, get_contact_by_casillero, set_contact_by_casillero, iter_pages
from vmb_db.invoice_info import get_invoice_list, set_inv_panama_by_guia, INVOICES_PER_PAGE
from vmb_db.upLoadZoom import accounts
from vmb_db.conf import getModule
configVMB = getModule('config')
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in configVMB.ALLOWED_EXTENSIONS
@login_required
def viewInvoice():
page = int(request.args.get('page', '1'))
if request.method.lower() == 'post':
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(configVMB.UPLOAD_FOLDER, filename))
fileFullName = '%s/%s' % (configVMB.UPLOAD_FOLDER, filename)
uploaded = accounts(fileFullName=fileFullName)
if uploaded > 0:
uploadedMes = 'The file was uploaded. %s rows were processed' % (uploaded)
flash(uploadedMes)
else:
flash('There was a problem with uploading the file')
client = {'casillero' : 0}
sort = 'fecha_proceso DESC, hora_proceso DESC'
invoices = get_invoice_list(where=None, sort=sort, limit=INVOICES_PER_PAGE, skip=(page - 1) * INVOICES_PER_PAGE)
n = get_one(query='SELECT COUNT(*) AS count FROM VMB.INVOICES')
count = int(n['count'])
numpages = int(math.ceil(count / float(INVOICES_PER_PAGE)))
return render_template('invoices/view.html', client=client, ilist=invoices,
tcount=count, tnumpages=numpages, tpage=page,
pagination=iter_pages)
@login_required
def packageArrived():
print 'packageArrived'
if request.method.lower() == 'post':
guia = request.form.get('update', '').strip()
set_inv_panama_by_guia(guia)
return redirect(url_for('viewInvoice'))
| {
"repo_name": "cindy-zimmerman/vmb-mrest",
"path": "flask_mrest/actions/invoice.py",
"copies": "1",
"size": "2217",
"license": "mit",
"hash": 6363787196036341000,
"line_mean": 40.0555555556,
"line_max": 116,
"alpha_frac": 0.6580965268,
"autogenerated": false,
"ratio": 3.4803767660910516,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46384732928910516,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cz'
from flask import request, redirect, url_for, flash, current_app, abort, render_template
from flask.ext.login import login_required, current_user
from vmb_db.contact_info import get_contact, get_contact_by_casillero, set_contact_by_casillero, iter_pages
from vmb_db.invoice_info import get_invoice_list_by_casillero, set_inv_by_guia, INVOICES_PER_PAGE
from vmb_db.accounts import insert_account
@login_required
def viewClient(cid):
if request.method.lower() == 'post':
guia = int(request.form.get('update', '').strip())
amt = request.form.get('amt', '').strip()
subtotal = request.form.get('subtotal', '').strip()
try:
amt = float(amt)
subtotal = float(subtotal)
paid = 0
if amt >= subtotal:
paid = 1
set_inv_by_guia(guia, paid, amt)
except:
flash('El numero de paga que ha introducido no es valido')
client = get_contact_by_casillero(casillero=cid)
invoices = get_invoice_list_by_casillero(casillero=cid)
return render_template('clients/view.html', client=client, ilist=invoices)
@login_required
def editClient(cid):
if cid == '0':
client = {'contacto_nombre_1': '', 'casillero': '0', 'contacto_nombre_2': '',
'direccion_area': '', 'direccion_torre': '', 'direccion_calle': '',
'telefonocel': '', 'correo': '', 'ciudad': 'Panama',
'contacto_apellido_2': '', 'VMB_ACCOUNTS_id': 0, 'contacto_apellido_1': '', 'telefonofij': '',
'tarifa': 0,
'direccion_apt': ''}
else:
client = get_contact_by_casillero(casillero=cid)
actItems = {'FT': True, 'telefonofij': False, 'telefonocel': False, 'tarifa': False}
if not client:
flash('User %s not found' % cid, 'error')
return redirect(url_for('/'))
if request.method.lower() == 'post':
actItems['FT'] = False
client['contacto_nombre_1'] = request.form.get('contacto_nombre_1', '').strip()
client['contacto_nombre_2'] = request.form.get('contacto_nombre_2', '').strip()
client['contacto_apellido_1'] = request.form.get('contacto_apellido_1', '').strip()
client['contacto_apellido_2'] = request.form.get('contacto_apellido_2', '').strip()
client['correo'] = request.form.get('correo', '').strip()
client['direccion_calle'] = request.form.get('direccion_calle', '').strip()
client['direccion_torre'] = request.form.get('direccion_torre', '').strip()
client['direccion_apt'] = request.form.get('direccion_apt', '').strip()
client['direccion_area'] = request.form.get('direccion_area', '').strip()
client['ciudad'] = request.form.get('ciudad', '').strip()
client['telefonofij'] = request.form.get('telefonofij', '').strip()
client['telefonocel'] = request.form.get('telefonocel', '').strip()
if request.form.get('tarifa', None):
try:
client['tarifa'] = float(request.form.get('tarifa', 0))
except:
flash('El numero de tarifa que ha introducido no es valido')
actItems['tarifa'] = True
actItems['FT'] = True
try:
if client['telefonofij'][:3] not in ('507', '506'):
client['telefonofij'] = '507%s' % (client['telefonofij'])
if client['telefonocel'][:3] not in ('507', '506'):
client['telefonocel'] = '507%s' % (client['telefonocel'])
client['telefonofij'] = int(client['telefonofij'])
client['telefonocel'] = int(client['telefonocel'])
except:
flash('El numero de telefono que ha introducido no es valido')
actItems['telefonofij'] = True
actItems['FT'] = True
try:
client['telefonocel'] = int(client['telefonocel'])
except:
flash('El numero de telefono que ha introducido no es valido')
actItems['telefonocel'] = True
actItems['FT'] = True
update = request.form.get('update', '').strip()
if int(update) > 0:
if cid == '0':
ncid = insert_account(contacto_nombre_1=client['contacto_nombre_1'],
contacto_nombre_2=client['contacto_nombre_2'],
contacto_apellido_1=client['contacto_apellido_1'],
contacto_apellido_2=client['contacto_apellido_2'],
telefonofij=client['telefonofij'],
telefonocel=client['telefonocel'],
correo=client['correo'],
direccion_calle=client['direccion_calle'],
direccion_torre=client['direccion_torre'],
direccion_apt=client['direccion_apt'],
direccion_area=client['direccion_area'],
ciudad=client['ciudad'],
tarifa=client['tarifa'])
where = 'VMB_ACCOUNTS_id = %s' % (ncid)
client = get_contact(where=where)
cid = client['casillero']
else:
set_contact_by_casillero(newClient=client, casillero=cid, updatedUser=current_user.username)
return redirect(url_for('viewClient', cid=cid))
return render_template('clients/edit.html', client=client, actItems=actItems)
| {
"repo_name": "cindy-zimmerman/vmb-mrest",
"path": "flask_mrest/actions/client.py",
"copies": "1",
"size": "5538",
"license": "mit",
"hash": -1712777589072555300,
"line_mean": 45.15,
"line_max": 112,
"alpha_frac": 0.5545323221,
"autogenerated": false,
"ratio": 3.496212121212121,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9520321808392898,
"avg_score": 0.0060845269838449186,
"num_lines": 120
} |
from ps2_1 import evaluate_poly
from ps2_2 import compute_deriv
def compute_root(poly, x_0, epsilon):
""" Uses Newton's Method to find and return a root of polynomial function.
Returns a tuple containing the root and the number of iterations required to
get the root.
Example:
>>> poly = (-13.39, 0.0, 17.5, 3.0, 1.0) x^4 + 3.0x^3 + 17.5x^2 - 13.39
>>> x_0 = 0.1
>>> epsilon = 0.0001
>>> print compute_root(poly, x_0, epsilon)
(0.80679075379635201, 8)
poly: tuple of numbers, length > 1.
Represent a polynomial function containing at least one real root.
The derivative of this polynomial function at x_0 is not 0.
x_0: float
epsilon: float > 0
returns: tuple (float, int) """
count = 0
while True:
count += 1
result = evaluate_poly(poly, x_0)
print 'x_0 =', x_0
if abs(result) <= epsilon:
break
else:
x_0 = x_0 - (evaluate_poly(poly, x_0))/(evaluate_poly((compute_deriv(poly)), x_0))
return (x_0, count)
## testing..
if __name__ == '__main__':
poly = (-13.39, 0.0, 17.5, 3.0, 1.0)
x_0 = 0.1
epsilon = 0.0001
print 'The root and number of itearaion is\n', compute_root(poly, x_0, epsilon)
| {
"repo_name": "MarcusHolloway/6.00SC",
"path": "ps2_3.py",
"copies": "1",
"size": "1241",
"license": "mit",
"hash": -3367153847151883000,
"line_mean": 30.025,
"line_max": 85,
"alpha_frac": 0.6518936342,
"autogenerated": false,
"ratio": 2.7334801762114536,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8680802342936749,
"avg_score": 0.04091429349494095,
"num_lines": 40
} |
import random
import string
WORD_LIST = "words.txt"
def load_words():
""" Returns a list of valid words. Words are string of lowercase letters.
Depending on the size of the word list, this function may take a while to finish. """
print "Loading word list from file..."
fin = open(WORD_LIST, 'r', 0)
line = fin.readline()
wordlist = string.split(line)
##print wordlist
print ' ', len(wordlist), 'words loaded.'
return wordlist
def choose_word(wordlist):
""" wordlist: list of words
Returns a word from wordlist at random. """
return random.choice(wordlist)
wordlist = load_words()
def part_of_word(mysterious_word, guessed_letters):
result = ''
for letter in mysterious_word:
if letter in guessed_letters:
result += letter
else:
result += '_'
return result
def hangman():
attemps = 8
isGuessed = False
guessed_letters = ''
mysterious_word = choose_word(wordlist)
available_letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
'y', 'z']
print 'Welcome to the game, Hangman!'
print 'I am thinking of a word that is %d letters long.' %len(mysterious_word)
while attemps > 0 and not isGuessed:
print '--------------'
print 'You have %d guesses left.' %attemps
print 'Available letters: %s' %''.join(available_letters)
guessed_char = raw_input('Please guess a letter:')
if guessed_char not in available_letters:
print 'Opps! You already enter that letter: ' + part_of_word(mysterious_word, guessed_letters)
elif guessed_char not in mysterious_word:
attemps -= 1
available_letters.remove(guessed_char)
print 'Oops! That letter is not in my word: ' + part_of_word(mysterious_word, guessed_letters)
else:
available_letters.remove(guessed_char)
guessed_letters += guessed_char
print 'Good guess: ' + part_of_word(mysterious_word, guessed_letters)
if mysterious_word == part_of_word(mysterious_word, guessed_letters):
isGuessed = True
if isGuessed:
print 'Congratulations, you won!'
else:
print 'Oops! You lost!'
| {
"repo_name": "MarcusHolloway/6.00SC",
"path": "ps2_4.py",
"copies": "1",
"size": "2507",
"license": "mit",
"hash": 7463818048138955000,
"line_mean": 33.3098591549,
"line_max": 106,
"alpha_frac": 0.5791783008,
"autogenerated": false,
"ratio": 3.5916905444126073,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9623952940217677,
"avg_score": 0.009383180998986215,
"num_lines": 71
} |
def cal_credit_balance():
outstanding_balance = float(raw_input('Enter the outstanding balance on your credit card:'))
annual_interest_rate = float(raw_input('Enter the annual credit card interest rate as decimal:'))
min_monthly_payment_rate = float(raw_input('Enter the minimum monthly payment rate as decimal:'))
monthly_interest_rate = annual_interest_rate / 12.0
months = 0
total_amount = 0
balance = outstanding_balance
while(months < 12):
min_monthly_payment = min_monthly_payment_rate * balance
interest_paid = monthly_interest_rate * balance
principal_paid = min_monthly_payment - interest_paid
remaining_balance = balance - principal_paid
months += 1
print 'Month:', months
print 'Minimum monthly payment: $' + str(round(min_monthly_payment, 2))
print 'Principal paid: $' + str(round(principal_paid, 2))
print 'Remaining balance: $' + str(round(remaining_balance, 2))
total_amount += min_monthly_payment
balance = remaining_balance
print 'RESULT'
print 'Total amount paid: $' + str(round(total_amount, 2))
print 'Remaining balance: $' + str(round(remaining_balance, 2))
## testing...
if __name__ == '__main__':
cal_credit_balance()
input('Press Enter to Exit!!')
| {
"repo_name": "MarcusHolloway/6.00SC",
"path": "ps_1a.py",
"copies": "1",
"size": "1386",
"license": "mit",
"hash": 2886143291776234500,
"line_mean": 38.7647058824,
"line_max": 101,
"alpha_frac": 0.63997114,
"autogenerated": false,
"ratio": 3.860724233983287,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5000695373983287,
"avg_score": null,
"num_lines": null
} |
def cal_min_fixed_monthly_payment():
outstanding_balance = float(raw_input('Enter the outstanding balance on your credit card:'))
annual_interest_rate = float(raw_input('Enter the annual credit card interest rate as decimal:'))
monthly_interest_rate = annual_interest_rate / 12.0
updated_balance = outstanding_balance
min_month_payment = 0.0
while updated_balance > 0.0:
months = 0
min_month_payment += 10.0
previous_balance = outstanding_balance
while months < 12 and updated_balance > 0.0:
months += 1
updated_balance = previous_balance *(1 + monthly_interest_rate) - min_month_payment
previous_balance = updated_balance
print 'RESULT'
print 'Monthly payment to pay off debt in 1 year:$' + str(round(min_month_payment, 2))
print 'Number of months needed:' + str(months)
print 'Balance: $' + str(round(updated_balance, 2))
## testing..
if __name__ == '__main__':
cal_min_fixed_monthly_payment()
| {
"repo_name": "MarcusHolloway/6.00SC",
"path": "ps_1b.py",
"copies": "1",
"size": "1089",
"license": "mit",
"hash": 35078077625912436,
"line_mean": 35.5517241379,
"line_max": 101,
"alpha_frac": 0.6271808999,
"autogenerated": false,
"ratio": 3.7294520547945207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4856632954694521,
"avg_score": null,
"num_lines": null
} |
def cal_min_payment():
outstanding_balance = float(raw_input('Enter the outstanding balance on your credit card:'))
annual_interest_rate = float(raw_input('Enter the annual credit card interest rate as decimal:'))
monthly_interest_rate = annual_interest_rate / 12.0
# New variables
epsilon = 0.001
lower_payment_bound = outstanding_balance / 12.0
upper_payment_bound = (outstanding_balance * (1 + monthly_interest_rate)**12) / 12.0
min_monthly_payment = (upper_payment_bound + lower_payment_bound) / 2.0
month = 0
while True:
previous_balance = outstanding_balance
min_monthly_payment = (upper_payment_bound + lower_payment_bound) / 2.0
for month in range(1, 13):
updated_balance = previous_balance * (1 + monthly_interest_rate) - min_monthly_payment
previous_balance = updated_balance
if updated_balance < 0:
break
if updated_balance == 0 or(upper_payment_bound - lower_payment_bound) / 2.0 < epsilon:
break
elif updated_balance < 0:
upper_payment_bound = min_monthly_payment
else:
lower_payment_bound = min_monthly_payment
print 'RESULT'
print 'Monthly payment to pay off debt in 1 year:$' + str(round(min_monthly_payment, 2))
print 'Number of months needed:' + str(month)
print 'Balance: $' + str(round(updated_balance, 2))
## testing..
if __name__ == '__main__':
cal_min_payment()
| {
"repo_name": "MarcusHolloway/6.00SC",
"path": "ps_1c.py",
"copies": "1",
"size": "1650",
"license": "mit",
"hash": 9053954728276165000,
"line_mean": 37.2857142857,
"line_max": 101,
"alpha_frac": 0.6212121212,
"autogenerated": false,
"ratio": 3.793103448275862,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9819221466411576,
"avg_score": 0.019018820612857108,
"num_lines": 42
} |
__author__ = 'Daan Debie'
from abc import ABCMeta
from abc import abstractmethod
from datetime import datetime
def datetime_from_http_datestring(datestring):
"""
Converts a datestring as present in HTTP headers to a python datetime object
"""
return datetime.strptime(datestring, '%a, %d %b %Y %H:%M:%S %Z')
def http_datestring_from_datetime(dt):
# HTTP datetimes are always in TZ GMT
return dt.strftime('%a, %d %b %Y %H:%M:%S GMT')
class PlaylistCache(object):
"""
Abstract Base Class representing the cache for PlaylistItems
This must be subclassed to implement a caching method. The subclass
is responisble for serializing and deserializing PlaylistItem objects.
"""
__metaclass__ = ABCMeta
@abstractmethod
def get(self, key):
""" Get a PlaylistItem from the cache """
pass
@abstractmethod
def put(self, key, value):
""" Put a PlaylistItem in the cache """
pass
@abstractmethod
def remove(self, key):
""" Remove a PlaylistItem from cache """
pass
class PlaylistItem:
def __init__(self, name, uri, last_modified, expires):
self.name = name
self.uri = uri
self.last_modified = last_modified
self.expires = expires
def is_expired(self):
return datetime.utcnow() > self.expires
def __str__(self):
return self.name
class MemPlaylistCache(PlaylistCache):
"""
Naive caching implementation for in-memory caching, using a simple dictionary for storing PlaylistItems
"""
def __init__(self):
self.cache = {}
def get(self, key):
if key in self.cache:
return self.cache[key]
else:
return None
def put(self, key, value):
self.cache[key] = value
def remove(self, key):
del self.cache[key]
def __str__(self):
output = ""
for key in self.cache.iterkeys():
output += "* {} / {} \n".format(key, self.cache[key].uri)
return output | {
"repo_name": "davidsogn/PyPlaylist",
"path": "playlist/cache.py",
"copies": "1",
"size": "2047",
"license": "apache-2.0",
"hash": 2729936508778319000,
"line_mean": 23.6746987952,
"line_max": 107,
"alpha_frac": 0.6145578896,
"autogenerated": false,
"ratio": 4.0216110019646365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5136168891564636,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daan Debie'
from threading import Thread
from Queue import Queue
from generator import PlaylistGenerator
class PLGeneratorThread(Thread):
"""
Class that provides threaded playlist generation. It takes one sentence off a queue and
processes them. Results are stored in the Thread object itself.
One of these Threads should be created for each message to be processed. Because of the average
long running time of querying the API, race conditions or deadlocks should be rare to non-existant. Should one of
the threads fail in some way, however, not all messages will be processed.
"""
def __init__(self, queue, generator):
Thread.__init__(self)
self.queue = queue
self.generator = generator
self.payload = None
self.incomplete = True # If the Thread fails somehow, it should be considered incomplete
self.playlist = None
self.position = None
def run(self):
self.payload = self.queue.get()
self.position = self.payload[1]
print "running thread {}".format(str(self.position))
# We're processing multiple sentences, almost guaranteeing multiple playlist entries,
# se we can use max_chunk_length
self.playlist, self.incomplete = self.generator.generate_playlist(self.payload[0], True)
self.queue.task_done()
def generate_multiple_playlists_threaded(list_of_messages, cache):
queue = Queue()
generator = PlaylistGenerator(cache)
# spawn a pool of threads, and pass them queue instance
threads = [PLGeneratorThread(queue, generator) for message in list_of_messages]
for thread in threads:
thread.setDaemon(True)
thread.start()
# populate queue with data, also providing a position so we can sort them back later
for x in range(len(list_of_messages)):
queue.put((list_of_messages[x], x))
# wait on the queue until everything has been processed
# NOTE: if a thread fails somehow (as in: doesn't execute at all), deadlock could occur, because part of the queue
# won't be processed.
queue.join()
return [(thread.playlist, thread.incomplete) for thread in sorted(threads, key=lambda thread: thread.position)]
def generate_multiple_playlists_naive(list_of_messages, cache):
generator = PlaylistGenerator(cache)
results = [generator.generate_playlist(message) for message in list_of_messages]
return results
| {
"repo_name": "davidsogn/PyPlaylist",
"path": "playlist/plthreading.py",
"copies": "1",
"size": "2454",
"license": "apache-2.0",
"hash": 4595702250171069400,
"line_mean": 39.2295081967,
"line_max": 118,
"alpha_frac": 0.7033414833,
"autogenerated": false,
"ratio": 4.312829525483304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5516171008783304,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Daan Wierstra and Tom Schaul'
from itertools import chain
from scipy import zeros
from pybrain.structure.networks.feedforward import FeedForwardNetwork
from pybrain.structure.networks.recurrent import RecurrentNetwork
from pybrain.structure.modules.neuronlayer import NeuronLayer
from pybrain.structure.connections import FullConnection
# CHECKME: allow modules that do not inherit from NeuronLayer? and treat them as single neurons?
class NeuronDecomposableNetwork(object):
""" A Network, that allows accessing parameters decomposed by their
corresponding individual neuron. """
# ESP style treatment:
espStyleDecomposition = True
def addModule(self, m):
assert isinstance(m, NeuronLayer)
super(NeuronDecomposableNetwork, self).addModule(m)
def sortModules(self):
super(NeuronDecomposableNetwork, self).sortModules()
self._constructParameterInfo()
# contains a list of lists of indices
self.decompositionIndices = {}
for neuron in self._neuronIterator():
self.decompositionIndices[neuron] = []
for w in range(self.paramdim):
inneuron, outneuron = self.paramInfo[w]
if self.espStyleDecomposition and outneuron[0] in self.outmodules:
self.decompositionIndices[inneuron].append(w)
else:
self.decompositionIndices[outneuron].append(w)
def _neuronIterator(self):
for m in self.modules:
for n in range(m.dim):
yield (m, n)
def _constructParameterInfo(self):
""" construct a dictionnary with information about each parameter:
The key is the index in self.params, and the value is a tuple containing
(inneuron, outneuron), where a neuron is a tuple of it's module and an index.
"""
self.paramInfo = {}
index = 0
for x in self._containerIterator():
if isinstance(x, FullConnection):
for w in range(x.paramdim):
inbuf, outbuf = x.whichBuffers(w)
self.paramInfo[index + w] = ((x.inmod, x.inmod.whichNeuron(outputIndex=inbuf)),
(x.outmod, x.outmod.whichNeuron(inputIndex=outbuf)))
elif isinstance(x, NeuronLayer):
for n in range(x.paramdim):
self.paramInfo[index + n] = ((x, n), (x, n))
else:
raise
index += x.paramdim
def getDecomposition(self):
""" return a list of arrays, each corresponding to one neuron's relevant parameters """
res = []
for neuron in self._neuronIterator():
nIndices = self.decompositionIndices[neuron]
if len(nIndices) > 0:
tmp = zeros(len(nIndices))
for i, ni in enumerate(nIndices):
tmp[i] = self.params[ni]
res.append(tmp)
return res
def setDecomposition(self, decomposedParams):
""" set parameters by neuron decomposition,
each corresponding to one neuron's relevant parameters """
nindex = 0
for neuron in self._neuronIterator():
nIndices = self.decompositionIndices[neuron]
if len(nIndices) > 0:
for i, ni in enumerate(nIndices):
self.params[ni] = decomposedParams[nindex][i]
nindex += 1
@staticmethod
def convertNormalNetwork(n):
""" convert a normal network into a decomposable one """
if isinstance(n, RecurrentNetwork):
res = RecurrentDecomposableNetwork()
for c in n.recurrentConns:
res.addRecurrentConnection(c)
else:
res = FeedForwardDecomposableNetwork()
for m in n.inmodules:
res.addInputModule(m)
for m in n.outmodules:
res.addOutputModule(m)
for m in n.modules:
res.addModule(m)
for c in chain(*list(n.connections.values())):
res.addConnection(c)
res.name = n.name
res.sortModules()
return res
class FeedForwardDecomposableNetwork(NeuronDecomposableNetwork, FeedForwardNetwork):
pass
class RecurrentDecomposableNetwork(NeuronDecomposableNetwork, RecurrentNetwork):
pass
| {
"repo_name": "jackru/pybrain",
"path": "pybrain/structure/networks/neurondecomposable.py",
"copies": "25",
"size": "4341",
"license": "bsd-3-clause",
"hash": -1961687884108220700,
"line_mean": 36.4224137931,
"line_max": 99,
"alpha_frac": 0.6178299931,
"autogenerated": false,
"ratio": 4.2558823529411764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008518096198359535,
"num_lines": 116
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.