function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def get(
self,
location: str,
gallery_unique_name: str,
**kwargs: Any | Azure/azure-sdk-for-python | [
3526,
2256,
3526,
986,
1335285972
] |
def cleanUpName(aName):
bName = ''
aName = aName.upper()
## ii = aName.find(" - Homo sapiens (human)")
ii = aName.find(" - HOMO SAPIENS (HUMAN)")
if (ii >= 0):
aName = aName[:ii]
aName = aName.strip()
ii = aName.find("(")
while (ii >= 0):
jj = aName.find(")", ii)
aName = aName[:ii] + aName[jj + 1:]
ii = aName.find("(")
aName = aName.strip()
ii = aName.find("<")
while (ii >= 0):
jj = aName.find(">", ii)
aName = aName[:ii] + aName[jj + 1:]
ii = aName.find("<")
aName = aName.strip()
for ii in range(len(aName)):
if (aName[ii] == ','):
continue
elif (aName[ii] == '('):
bName += '_'
elif (aName[ii] == ')'):
bName += '_'
elif (aName[ii] == '-'):
bName += '_'
elif (aName[ii] == '/'):
bName += '_'
elif (aName[ii] == ';'):
bName += '_'
elif (aName[ii] == '&'):
continue
elif (aName[ii] == '#'):
continue
elif (aName[ii] == ' '):
bName += '_'
else:
bName += aName[ii].upper()
ii = bName.find("__")
while (ii >= 0):
print " ", ii, bName
bName = bName[:ii] + bName[ii + 1:]
print " ", bName
ii = bName.find("__")
return (bName) | cancerregulome/gidget | [
6,
3,
6,
11,
1383861509
] |
def readPathways():
fh = file(
gidgetConfigVars['TCGAFMP_BIOINFORMATICS_REFERENCES'] + "/nci_pid/only_NCI_Nature_ver4.tab", 'r')
pwDict = {}
for aLine in fh:
aLine = aLine.strip()
aLine = aLine.upper()
tokenList = aLine.split('\t')
if (len(tokenList) != 3):
continue
if (tokenList[0] == "pathway"):
continue
longPathwayName = tokenList[0]
shortPathwayName = tokenList[1]
geneTokens = tokenList[2].strip()
geneList = geneTokens.split(',')
geneList.sort()
if (len(geneList) > 0):
while (geneList[0] == ''):
geneList = geneList[1:]
if (len(geneList) == 0):
continue
if (len(geneList) == 0):
continue
pathwayName = cleanUpName(shortPathwayName)
if (pathwayName not in pwDict.keys()):
# print " adding pathway %s (%d) " % ( pathwayName, len(geneList) )
pwDict[pathwayName] = geneList
else:
if (len(pwDict[pathwayName]) < len(geneList)):
# print " substituting shorter list of genes for %s (%d) " % (
# pathwayName, len(geneList) )
pwDict[pathwayName] = geneList
# else:
# print " NOT substituing list for %s " % pathwayName
fh.close()
print " "
print " have pathway dictionary with %d pathways " % len(pwDict)
print " --> now looking for duplicate pathways ... "
pwList = pwDict.keys()
pwList.sort()
delList = []
pairDict = {}
for ii in range(len(pwList) - 1):
iiName = pwList[ii]
iiLen = len(pwDict[iiName])
for jj in range(ii + 1, len(pwList)):
jjName = pwList[jj]
jjLen = len(pwDict[jjName])
if (jjLen != iiLen):
continue
if (pwDict[iiName] == pwDict[jjName]):
print "\n\n SAME !!! "
print iiName, iiLen
print pwDict[iiName]
print jjName, jjLen
print pwDict[jjName]
iiSplit = iiName.split('__')
jjSplit = jjName.split('__')
if (iiSplit[1] <= jjSplit[1]):
pairNames = (iiSplit[1], jjSplit[1])
else:
pairNames = (jjSplit[1], iiSplit[1])
if (pairNames in pairDict.keys()):
pairDict[pairNames] += 1
else:
pairDict[pairNames] = 1
if (iiSplit[1] == jjSplit[1]):
if (len(iiName) <= len(jjName)):
delList += [jjName]
else:
delList += [iiName]
else:
if (iiSplit[1] == "NCI-NATURE"):
delList += [jjName]
elif (jjSplit[1] == "NCI-NATURE"):
delList += [iiName]
elif (iiSplit[1] == "PID"):
delList += [jjName]
elif (jjSplit[1] == "PID"):
delList += [iiName]
elif (iiSplit[1] == "KEGG"):
delList += [jjName]
elif (jjSplit[1] == "KEGG"):
delList += [iiName]
elif (iiSplit[1] == "PWCOMMONS"):
delList += [jjName]
elif (jjSplit[1] == "PWCOMMONS"):
delList += [iiName]
elif (iiSplit[1] == "REACTOME"):
delList += [jjName]
elif (jjSplit[1] == "REACTOME"):
delList += [iiName]
elif (iiSplit[1] == "WIKIPATHWAYS"):
delList += [jjName]
elif (jjSplit[1] == "WIKIPATHWAYS"):
delList += [iiName]
elif (iiSplit[1] == "WIKIPW"):
delList += [jjName]
elif (jjSplit[1] == "WIKIPW"):
delList += [iiName]
elif (iiSplit[1] == "SMPDB"):
delList += [jjName]
elif (jjSplit[1] == "SMPDB"):
delList += [iiName]
elif (iiSplit[1] == "HUMANCYC"):
delList += [jjName]
elif (jjSplit[1] == "HUMANCYC"):
delList += [iiName]
else:
sys.exit(-1)
for aName in delList:
try:
del pwDict[aName]
except:
doNothing = 1
print " "
print " returning pathway dictionary with %d pathways " % len(pwDict)
print " "
for aKey in pairDict.keys():
print aKey, pairDict[aKey]
print " "
print " "
return (pwDict) | cancerregulome/gidget | [
6,
3,
6,
11,
1383861509
] |
def setFeatBits(rowLabels, featPrefix, doesContainList, notContainList):
numSet = 0
numRow = len(rowLabels)
bitVec = numpy.zeros(numRow, dtype=numpy.bool)
for iR in range(numRow):
if (featPrefix != ""):
if (not rowLabels[iR].startswith(featPrefix)): continue
if (len(doesContainList) > 0):
skipFlag = 1
for aStr in doesContainList:
if (rowLabels[iR].find(aStr) >= 0): skipFlag = 0
if (len(notContainList) > 0):
skipFlag = 0
for aStr in notContainList:
if (rowLabels[iR].find(aStr) >= 0): skipFlag = 1
if (skipFlag): continue
## set bit if we get here ...
bitVec[iR] = 1
numSet += 1
print featPrefix, doesContainList, notContainList, numRow, numSet
if (numSet == 0):
print " numSet=0 ... this is probably a problem ... "
# sys.exit(-1)
return (bitVec) | cancerregulome/gidget | [
6,
3,
6,
11,
1383861509
] |
def makeNewFeatureName(curFeatName, oldStringList, newStringList):
for jj in range(len(oldStringList)):
oldStr = oldStringList[jj]
newStr = newStringList[jj]
i1 = curFeatName.find(oldStr)
if ( i1 >= 0 ):
i2 = i1 + len(oldStr)
newFeatName = curFeatName[:i1] + newStr + curFeatName[i2:]
return ( newFeatName )
print " ERROR in makeNewFeatureName ???? ", curFeatName, oldStringList, newStringList
sys.exit(-1) | cancerregulome/gidget | [
6,
3,
6,
11,
1383861509
] |
def chooseCountThreshold(dataD):
rowLabels = dataD['rowLabels']
dMat = dataD['dataMatrix']
numBits = 0
for ii in range(len(rowLabels)):
if (numBits > 0):
continue
if (rowLabels[ii].find("B:GNAB:TP53:") >= 0):
for jj in range(len(dMat[ii])):
if (dMat[ii][jj] == 0):
numBits += 1
elif (dMat[ii][jj] == 1):
numBits += 1
print " number of bits found for TP53 mutation feature: ", numBits
countThreshold = int(numBits / 11) - 1
return (countThreshold) | cancerregulome/gidget | [
6,
3,
6,
11,
1383861509
] |
def findFeature ( rowLabels, s1, s2 ):
for iR in range(len(rowLabels)):
if ( rowLabels[iR].find(s1) >= 0 ):
if ( rowLabels[iR].find(s2) >= 0 ):
return ( iR )
return ( -1 ) | cancerregulome/gidget | [
6,
3,
6,
11,
1383861509
] |
def pathwayGnab(dataD, pathways={}):
print " "
print " ************************************************************* "
print " ************************************************************* "
print " "
print " in pathwayGnab ... "
# check that the input feature matrix looks ok ...
try:
numRow = len(dataD['rowLabels'])
numCol = len(dataD['colLabels'])
rowLabels = dataD['rowLabels']
print " %d rows x %d columns " % (numRow, numCol)
# print rowLabels[:5]
# print rowLabels[-5:]
except:
print " ERROR in pathwayGnab ??? bad data ??? "
return (dataD)
if (len(pathways) == 0):
print " "
print " WARNING: no pathway information found ... using a few hard-coded pathways for now "
print " "
pathways = {}
pathways[
"TP53_pathway"] = ["E2F1", "TP53", "RB1", "CDK4", "TIMP3", "CDK2", "ATM",
"CCNE1", "CCND1", "CDKN1A", "BCL2", "BAX", "PCNA", "MDM2",
"APAF1", "GADD45A"]
pathways[
"PI3K_AKT_pathway"] = ["FRAP1", "LST8", "PDPK1", "NGF", "NR4A1", "FOXO1", "CHUK",
"THEM4", "PTEN", "CREB1", "BAD", "RHOA", "TRIB3", "PHLPP",
"CASP9", "AKT1S1", "MDM2", "RPS6KB2"]
pathways[
"Wnt_pathway"] = ["PPP2R5B", "PPP2R5A", "PPP2R5D", "BTRC", "WNT3A",
"PPP2R5C", "MMP7", "PRKX", "CTNNB1", "WNT2", "CSNK2A2", "MAP3K7", "PRKACG",
"WNT1", "WNT4", "WNT3", "CSNK2A1", "PRKACA", "PRKACB", "WNT6", "CUL1",
"WNT10A", "WNT10B", "VANGL1", "ROCK1", "ROCK2", "VANGL2", "CHP2", "SKP1",
"EP300", "JUN", "MAPK9", "PPP2R5E", "MAPK8", "LOC728622", "WNT5A", "WNT5B",
"CXXC4", "DAAM1", "DAAM2", "RBX1", "RAC2", "RAC3", "RAC1", "CACYBP",
"AXIN2", "AXIN1", "DVL2", "DVL3", "TCF7", "CREBBP", "SMAD4", "SMAD3",
"SMAD2", "PORCN", "DVL1", "SFRP5", "SFRP1", "PRICKLE1", "SFRP2", "SFRP4",
"PRICKLE2", "WIF1", "PPARD", "PLCB3", "PLCB4", "FRAT1", "RHOA", "FRAT2",
"SOX17", "PLCB1", "FOSL1", "MYC", "PLCB2", "PPP2R1B", "PRKCA", "PPP2R1A",
"TBL1XR1", "CTBP1", "CTBP2", "TP53", "LEF1", "PRKCG", "PRKCB", "CTNNBIP1",
"SENP2", "CCND1", "PSEN1", "CCND3", "CCND2", "WNT9B", "WNT11", "SIAH1",
"RUVBL1", "WNT9A", "CER1", "NKD1", "WNT16", "NKD2", "APC2", "CAMK2G",
"PPP3R1", "PPP3R2", "TCF7L2", "TCF7L1", "CHD8", "PPP2CA", "PPP2CB",
"PPP3CB", "NFAT5", "CAMK2D", "PPP3CC", "NFATC4", "CAMK2B", "CHP",
"PPP3CA", "NFATC2", "NFATC3", "FBXW11", "CAMK2A", "WNT8A", "WNT8B",
"APC", "NFATC1", "CSNK1A1", "FZD9", "FZD8", "NLK", "FZD1", "CSNK2B",
"CSNK1A1L", "FZD3", "FZD2", "MAPK10", "FZD5", "FZD4", "FZD7", "DKK4",
"WNT2B", "FZD6", "DKK2", "FZD10", "WNT7B", "DKK1", "CSNK1E", "GSK3B",
"LRP6", "TBL1X", "WNT7A", "LRP5", "TBL1Y"]
print " "
print " total number of pathways : ", len(pathways)
print " "
mutationTypes = [":y_n_somatic", ":code_potential_somatic",
":missense_somatic",
":y_del_somatic", ":y_amp_somatic"]
numTypes = len(mutationTypes)
pathwayList = pathways.keys()
pathwayList.sort()
numPW = len(pathways)
newNameVec = [0] * (numPW * numTypes)
newDataMat = [0] * (numPW * numTypes)
dMat = dataD['dataMatrix']
min_numON = chooseCountThreshold(dataD)
if (min_numON < (numCol / 100)):
min_numON = int(numCol / 100)
print " minimum count threshold : ", min_numON
kFeat = 0
max_numON = 0
max_fracON = 0.
## outer loop is over pathways ...
for aPathway in pathwayList:
print " "
print " outer loop over pathways ... ", aPathway
## next loop is over mutation types
for aMutType in mutationTypes:
numON = 0
newFeatName = "B:GNAB:" + aPathway + "::::" + aMutType
print " new feature name : ", newFeatName
# first make sure we don't already have a feature with this name
stopNow = 0
for iRow in range(numRow):
if (newFeatName == rowLabels[iRow]):
print " STOPPING ... this feature already exists ??? ", newFeatName
stopNow = 1
if (stopNow): continue
print " tentative new feature #%d ... <%s> " % (kFeat, newFeatName)
newNameVec[kFeat] = newFeatName
newDataMat[kFeat] = numpy.zeros(numCol)
if (0):
print " "
print " "
print aPathway, newFeatName
print len(pathways[aPathway]), pathways[aPathway]
## and now we can loop over the genes in the pathway
for gnabGene in pathways[aPathway]:
print " looking for pathway gene ", gnabGene
## and look for the desired feature
iR = findFeature ( rowLabels, "B:GNAB:"+gnabGene+":", aMutType )
## if we don't find anything, and we are working on y_del or y_amp
## then we can use y_n instead
if ( iR < 0 ):
print " --> failed to find desired feature ", gnabGene, aMutType
if ( (aMutType==":y_del_somatic") or (aMutType==":y_amp_somatic") ):
iR = findFeature ( rowLabels, "B:GNAB:"+gnabGene+":", ":y_n_somatic" )
if ( iR >= 0 ):
print " --> will use this feature instead ", iR, rowLabels[iR]
else:
print " --> failed to find even a backup feature "
else:
print " --> FOUND desired feature ", gnabGene, aMutType, iR, rowLabels[iR] | cancerregulome/gidget | [
6,
3,
6,
11,
1383861509
] |
def driverGnab(dataD, driverList):
print " "
print " ************************************************************* "
print " ************************************************************* "
print " "
print " in driverGnab ... "
# check that the input feature matrix looks ok ...
try:
numRow = len(dataD['rowLabels'])
numCol = len(dataD['colLabels'])
rowLabels = dataD['rowLabels']
print " %d rows x %d columns " % (numRow, numCol)
# print rowLabels[:5]
# print rowLabels[-5:]
except:
print " ERROR in driverGnab ??? bad data ??? "
return (dataD)
mutationTypes = [":y_n_somatic", ":code_potential_somatic",
":missense_somatic",
":y_del_somatic", ":y_amp_somatic"]
numTypes = len(mutationTypes)
numK = 1
newNameVec = [0] * (numK * numTypes)
newDataMat = [0] * (numK * numTypes)
dMat = dataD['dataMatrix']
kFeat = 0
if (1):
for aMutType in mutationTypes:
newFeatName = "B:GNAB:driverMut" + "::::" + aMutType
# first make sure we don't already have a feature with this name
# ...
stopNow = 0
for iRow in range(numRow):
if (newFeatName == rowLabels[iRow]):
stopNow = 1
if (stopNow):
continue
print " tentative new feature #%d ... <%s> " % (kFeat, newFeatName)
newNameVec[kFeat] = newFeatName
newDataMat[kFeat] = numpy.zeros(numCol)
for iR in range(numRow):
if (iR % 1000 == 0):
print iR, numRow
if (1):
gnabLabel = rowLabels[iR]
if (not gnabLabel.startswith("B:GNAB:")):
continue
if (gnabLabel.find(aMutType) < 0):
continue
try:
gnabTokens = gnabLabel.split(':')
gnabGene = gnabTokens[2].upper()
except:
print " FAILED to parse GNAB feature name ??? ", gnabLabel
continue
print " considering ", iR, gnabTokens, gnabGene
if (gnabGene in driverList):
for iCol in range(numCol):
if (dMat[iR][iCol] == 1):
print " yes! setting bit at ", kFeat, iCol
newDataMat[kFeat][iCol] = 1
if (1):
print " --> keeping this feature ... ", kFeat, newFeatName
kFeat += 1
numNewFeat = kFeat
print " "
print " --> number of new features : ", numNewFeat
print len(newDataMat), len(newDataMat[0])
# now we need to append these new features to the input data matrix
newRowLabels = [0] * (numRow + numNewFeat)
newMatrix = [0] * (numRow + numNewFeat)
for iR in range(numRow):
newRowLabels[iR] = rowLabels[iR]
newMatrix[iR] = dMat[iR]
for iR in range(numNewFeat):
newRowLabels[iR + numRow] = newNameVec[iR]
newMatrix[iR + numRow] = newDataMat[iR]
dataD['rowLabels'] = newRowLabels
dataD['dataMatrix'] = newMatrix
print " "
print " --> finished with driverGnab ... "
print " "
return (dataD) | cancerregulome/gidget | [
6,
3,
6,
11,
1383861509
] |
def combineGnabCnvr(dataD):
print " "
print " ************************************************************* "
print " ************************************************************* "
print " "
print " in combineGnabCnvr ... "
# check that the input feature matrix looks ok ...
try:
numRow = len(dataD['rowLabels'])
numCol = len(dataD['colLabels'])
rowLabels = dataD['rowLabels']
colLabels = dataD['colLabels']
print " %d rows x %d columns " % (numRow, numCol)
# print rowLabels[:5]
# print rowLabels[-5:]
except:
print " ERROR in combineGnabCnvr ??? bad data ??? "
return (dataD)
# next, we need to find all of the GNAB features and all of the CNVR
# features
print " --> assigning gnab / cnvr flags ... "
gnabFeatIncSubstrings = [ ":y_n", ":code_potential" ]
gnabFeatAmpSubstrings = [ ":y_amp", ":cp_amp" ]
gnabFeatDelSubstrings = [ ":y_del", ":cp_del" ]
cnvrFeatExcSubstrings = [ "Gistic" ]
isGnab = setFeatBits(rowLabels, "B:GNAB:", gnabFeatIncSubstrings, [])
isCnvr = setFeatBits(rowLabels, "N:CNVR:", [], cnvrFeatExcSubstrings)
print len(isGnab), max(isGnab)
print len(isCnvr), max(isCnvr)
if (not max(isGnab) and not max(isCnvr)):
print " missing either GNAB or CNVR features ... "
return (dataD)
# now we need to map each of the GNAB features to one or more CNVR features
mapVec = [0] * numRow
for iR in range(numRow):
if (iR % 1000 == 0):
print iR, numRow
if (isGnab[iR]):
mapVec[iR] = []
gnabLabel = rowLabels[iR]
try:
gnabTokens = gnabLabel.split(':')
gnabChrName = gnabTokens[3].upper()
gnabStart = int(gnabTokens[4])
gnabStop = int(gnabTokens[5])
except:
print " FAILED to parse GNAB feature name ??? ", gnabLabel
continue
# avoid X and Y chromosome genes ...
if (gnabChrName.endswith("X")):
continue
if (gnabChrName.endswith("Y")):
continue
for jR in range(numRow):
if (isCnvr[jR]):
cnvrLabel = rowLabels[jR]
cnvrTokens = cnvrLabel.split(':')
cnvrChrName = cnvrTokens[3].upper()
if (gnabChrName != cnvrChrName):
continue
# print " comparing ... ", gnabLabel, cnvrLabel
cnvrStart = int(cnvrTokens[4])
if (gnabStop < cnvrStart):
continue
cnvrStop = int(cnvrTokens[5])
if (gnabStart > cnvrStop):
continue
mapVec[iR] += [jR]
# print " found match! ", gnabLabel, cnvrLabel, iR, jR,
# mapVec[iR]
if (0):
if (len(mapVec[iR]) > 5):
print iR, gnabLabel, len(mapVec[iR])
for kR in mapVec[iR]:
print " ", kR, rowLabels[kR]
# sys.exit(-1)
# now we need to actually loop over the data ...
dMat = dataD['dataMatrix']
# -------------------------------------------------------------------------
if (0):
# FIRST we want to check for any "adjacent normal" samples and set those to 0 ...
# --> ACTUALLY, deciding NOT to do this anymore ( 31 oct 2012 ) NEW CHANGE
numGNABfeat = 0
numCNVRfeat = 0
for iRow in range(numRow):
curFeature = rowLabels[iRow]
if (curFeature.startswith("B:GNAB:")):
numGNABfeat += 1
elif (curFeature.startswith("N:CNVR:")):
numCNVRfeat += 1
print " number of GNAB features : %d " % (numGNABfeat)
print " number of CNVR features : %d " % (numCNVRfeat)
print " "
numGNABset = 0
numCNVRset = 0
numGNABfeat = 0
numCNVRfeat = 0
numNormalCol = 0
for iCol in range(numCol):
curLabel = colLabels[iCol]
if (curLabel.startswith("TCGA-")):
if (len(curLabel) >= 15):
sampleType = curLabel[13:15]
if (sampleType == '11'):
numNormalCol += 1
# print iCol, curLabel
for iRow in range(numRow):
curFeature = rowLabels[iRow]
if (curFeature.startswith("B:GNAB:")):
# print curFeature, dMat[iRow][iCol]
if (dMat[iRow][iCol] == "NA" or dMat[iRow][iCol] == NA_VALUE):
dMat[iRow][iCol] = 0
numGNABset += 1
elif (curFeature.startswith("N:CNVR:")):
if (curFeature.find(":chrX:") > 0):
continue
if (curFeature.find(":chrY:") > 0):
continue
# print curFeature, dMat[iRow][iCol]
if (dMat[iRow][iCol] == "NA" or dMat[iRow][iCol] == NA_VALUE):
dMat[iRow][iCol] = 0.
numCNVRset += 1
# -------------------------------------------------------------------------
## cnvrThreshold = 2.
## cnvrThreshold = 1.
cnvrAmpThresh = 0.30
cnvrDelThresh = -0.46
print " --> now checking for deletions and amplifications ... ", cnvrAmpThresh, cnvrDelThresh
print " and creating new y_del and y_amp features "
numNewFeat = 0
newNameVec = []
newDataMat = []
for iR in range(numRow):
if (iR % 1000 == 0):
print iR, numRow
if (isGnab[iR]):
print " "
print " having a look at this feature: "
print iR, rowLabels[iR], len(mapVec[iR])
print mapVec[iR]
# how often is this gene mutated?
numYes = 0
numDel = 0
numAmp = 0
numYesDel = 0
numYesAmp = 0
maxCN = -999.
minCN = 999.
for iCol in range(numCol):
mutFlag = 0
ampFlag = 0
delFlag = 0
for jR in mapVec[iR]:
if (dMat[iR][iCol] == 1):
mutFlag = 1
if (dMat[jR][iCol] == NA_VALUE):
continue
if (dMat[jR][iCol] > maxCN):
maxCN = dMat[jR][iCol]
if (dMat[jR][iCol] < minCN):
minCN = dMat[jR][iCol]
if (dMat[jR][iCol] < cnvrDelThresh):
delFlag = 1
if (dMat[jR][iCol] > cnvrAmpThresh):
ampFlag = 1
numYes += mutFlag
numDel += delFlag
numAmp += ampFlag
if (mutFlag or delFlag): numYesDel += 1
if (mutFlag or ampFlag): numYesAmp += 1
addDelFeat = 0
addAmpFeat = 0
fThresh = 0.025
if (numYes + numAmp + numDel > 0):
print " --> %3d mutations (%3d mut or del, %3d mut or amp) " % \
( numYes, numYesDel, numYesAmp )
print " %3d deletions " % numDel, minCN
print " %3d amplifications " % numAmp, maxCN
if (numYesDel > 0):
delFrac1 = float(numYesDel-numYes)/float(numCol)
delFrac2 = float(numYesDel-numDel)/float(numCol)
delFrac3 = 0
if ( numYes > 0 ): delFrac3 += float(numYesDel/numYes)
if ( numDel > 0 ): delFrac3 += float(numYesDel/numDel)
if ( delFrac1>fThresh and delFrac2>fThresh ):
print " deletion looks significant ", numYesDel, numYes, numDel, numCol, delFrac1, delFrac2, delFrac3
addDelFeat = 1
else:
print " deletion does not seem significant (?) ", numYesDel, numYes, numDel, numCol, delFrac1, delFrac2, delFrac3
if (numYesAmp > 0):
ampFrac1 = float(numYesAmp-numYes)/float(numCol)
ampFrac2 = float(numYesAmp-numAmp)/float(numCol)
ampFrac3 = 0
if ( numYes > 0 ): ampFrac3 += float(numYesAmp/numYes)
if ( numAmp > 0 ): ampFrac3 += float(numYesAmp/numAmp)
if ( ampFrac1>fThresh and ampFrac2>fThresh ):
print " amplification looks significant ", numYesAmp, numYes, numAmp, numCol, ampFrac1, ampFrac2, ampFrac3
addAmpFeat = 1
else:
print " amplification does not seem significant (?) ", numYesAmp, numYes, numAmp, numCol, ampFrac1, ampFrac2, ampFrac3
## add the "DEL" feature if appropriate ...
if ( addDelFeat ):
numNewFeat += 1
curFeatName = rowLabels[iR]
newFeatName = makeNewFeatureName(curFeatName, gnabFeatIncSubstrings, gnabFeatDelSubstrings)
print " newFeatName <%s> " % newFeatName
# make sure that there is not already a feature by this name!!!
addFeat = 1
for aLabel in rowLabels:
if (aLabel == newFeatName):
addFeat = 0
print " oops ??? <%s> already exists ??? " % aLabel
if (addFeat):
print " --> adding this new feature: ", newFeatName
newNameVec += [newFeatName]
newDataMat += [numpy.zeros(numCol)]
numBitsOn = 0
for iCol in range(numCol):
# we need to start with NA
newDataMat[-1][iCol] = NA_VALUE
# if we already have a 'yes' for the mutation, that's
# all we need ...
if (dMat[iR][iCol] == 1):
newDataMat[-1][iCol] = 1
numBitsOn += 1
continue
# if not, then check for deletions ...
for jR in mapVec[iR]:
if (dMat[jR][iCol] == NA_VALUE): continue
if (newDataMat[-1][iCol] == 1): continue
if (dMat[jR][iCol] < cnvrDelThresh):
newDataMat[-1][iCol] = 1
numBitsOn += 1
# if we have set this bit we are done ...
if (newDataMat[-1][iCol] == 1): continue
# and otherwise if we have no mutation, set it to 0
if (dMat[iR][iCol] == 0): newDataMat[-1][iCol] = 0
print " number of bits set: ", numBitsOn
## add the "AMP" feature if appropriate ...
if ( addAmpFeat ):
numNewFeat += 1
curFeatName = rowLabels[iR]
newFeatName = makeNewFeatureName(curFeatName, gnabFeatIncSubstrings, gnabFeatAmpSubstrings)
print " newFeatName <%s> " % newFeatName
# make sure that there is not already a feature by this name!!!
addFeat = 1
for aLabel in rowLabels:
if (aLabel == newFeatName):
addFeat = 0
print " oops ??? <%s> already exists ??? " % aLabel
if (addFeat):
print " --> adding this new feature: ", newFeatName
newNameVec += [newFeatName]
newDataMat += [numpy.zeros(numCol)]
numBitsOn = 0
for iCol in range(numCol):
# we need to start with NA
newDataMat[-1][iCol] = NA_VALUE
# if we already have a 'yes' for the mutation, that's
# all we need ...
if (dMat[iR][iCol] == 1):
newDataMat[-1][iCol] = 1
numBitsOn += 1
continue
# if not, then check for amplifications ...
for jR in mapVec[iR]:
if (dMat[jR][iCol] == NA_VALUE): continue
if (newDataMat[-1][iCol] == 1): continue
if (dMat[jR][iCol] > cnvrAmpThresh):
newDataMat[-1][iCol] = 1
numBitsOn += 1
# if we have set this bit we are done ...
if (newDataMat[-1][iCol] == 1): continue
# and otherwise if we have no mutation, set it to 0
if (dMat[iR][iCol] == 0): newDataMat[-1][iCol] = 0
print " number of bits set: ", numBitsOn
# if ( numNewFeat == 0 ):
# print " --> NO new features "
# print " --> finished with combineGnabCnvr ... "
# return ( dataD )
print " "
print " --> number of new features : ", numNewFeat
if ( 0 ):
if (numNewFeat > 0):
print len(newNameVec)
print len(newDataMat), len(newDataMat[0])
for ii in range(numNewFeat):
if (newNameVec[ii].find("CSMD1") > 0):
print newNameVec[ii]
print newDataMat[ii]
print " "
# now we need to append these new features to the input data matrix
newRowLabels = [0] * (numRow + numNewFeat)
newMatrix = [0] * (numRow + numNewFeat)
for iR in range(numRow):
newRowLabels[iR] = rowLabels[iR]
newMatrix[iR] = dMat[iR]
for iR in range(numNewFeat):
newRowLabels[iR + numRow] = newNameVec[iR]
newMatrix[iR + numRow] = newDataMat[iR]
dataD['rowLabels'] = newRowLabels
dataD['dataMatrix'] = newMatrix
print " "
print " --> finished with combineGnabCnvr ... "
print " "
return (dataD) | cancerregulome/gidget | [
6,
3,
6,
11,
1383861509
] |
def to_json(self):
"""
Encodes a Collection as a json representation so it can be sent through the bitmessage network
:return: the json representation of the given Collection
"""
json_docs = []
for doc in self.documents:
json_docs.append({"address": doc.collection_address, "description": doc.description, "title": doc.title,
"hash": doc.hash, "filename": doc.filename, "accesses": doc.accesses})
json_keywords = []
for key in self.keywords:
json_keywords.append({"id": key.id, "name": key.name})
json_representation = {"type_id": 1,
"title": self.title,
"description": self.description,
"keywords": json_keywords,
"address": self.address,
"documents": json_docs,
"btc": self.btc,
"latest_broadcast_date": self.latest_broadcast_date.strftime("%A, %d. %B %Y %I:%M%p"),
"creation_date": self.creation_date.strftime("%A, %d. %B %Y %I:%M%p"),
"oldest_date": self.oldest_date.strftime("%A, %d. %B %Y %I:%M%p"),
"latest_btc_tx": self.latest_btc_tx,
"oldest_btc_tx": self.oldest_btc_tx,
"accesses": self.accesses,
"votes": self.votes,
"votes_last_checked": self.votes_last_checked.strftime("%A, %d. %B %Y %I:%M%p")}
try:
validate(json_representation, coll_schema)
return json.dumps(json_representation, sort_keys=True)
except ValidationError as m:
return None | FreeJournal/freejournal | [
8,
2,
8,
4,
1423190010
] |
def loop_call(delta=60 * 1000):
def wrap_loop(func):
@wraps(func)
def wrap_func(*args, **kwargs):
func(*args, **kwargs)
tornado.ioloop.IOLoop.instance().add_timeout(
datetime.timeelta(milliseconds=delta),
wrap_func)
return wrap_func
return wrap_loop | cloudaice/simple-data | [
93,
31,
93,
4,
1368439478
] |
def wrap_loop(func):
@wraps(func)
@gen.coroutine
def wrap_func(*args, **kwargs):
options.logger.info("function %r start at %d" %
(func.__name__, int(time.time())))
try:
yield func(*args, **kwargs)
except Exception, e:
options.logger.error("function %r error: %s" %
(func.__name__, e))
options.logger.info("function %r end at %d" %
(func.__name__, int(time.time())))
tornado.ioloop.IOLoop.instance().add_timeout(
datetime.timedelta(milliseconds=delta),
wrap_func)
return wrap_func | cloudaice/simple-data | [
93,
31,
93,
4,
1368439478
] |
def __init__(self, url, **kwargs):
super(TornadoDataRequest, self).__init__(url, **kwargs)
self.auth_username = options.username
self.auth_password = options.password
self.user_agent = "Tornado-data" | cloudaice/simple-data | [
93,
31,
93,
4,
1368439478
] |
def GetPage(url):
client = AsyncHTTPClient()
request = TornadoDataRequest(url, method='GET')
try:
response = yield client.fetch(request)
except HTTPError, e:
response = e
raise gen.Return(response) | cloudaice/simple-data | [
93,
31,
93,
4,
1368439478
] |
def PutPage(url, body):
client = AsyncHTTPClient()
request = TornadoDataRequest(url, method='PUT', body=body)
try:
response = yield client.fetch(request)
except HTTPError, e:
response = e
raise gen.Return(response) | cloudaice/simple-data | [
93,
31,
93,
4,
1368439478
] |
def PatchPage(url, body):
client = AsyncHTTPClient.configurable_default()()
request = TornadoDataRequest(url, method="PATCH", body=body)
try:
response = yield client.fetch(request)
except HTTPError, e:
response = e
raise gen.Return(response) | cloudaice/simple-data | [
93,
31,
93,
4,
1368439478
] |
def commit(url, message, data):
resp = yield GetPage(url)
if resp.code == 200:
resp = escape.json_decode(resp.body)
sha = resp["sha"]
body = json.dumps({
"message": message,
"content": base64.b64encode(json.dumps(data)),
"committer": {"name": "cloudaice", "email": "cloudaice@163.com"},
"sha": sha
})
resp = yield PutPage(url, body)
raise gen.Return(resp)
else:
raise gen.Return(resp) | cloudaice/simple-data | [
93,
31,
93,
4,
1368439478
] |
def hide():
"""Hide the cursor using ANSI escape codes."""
sys.stdout.write("\033[?25l")
sys.stdout.flush() | pa-pyrus/papatcher | [
4,
2,
4,
1,
1401601700
] |
def show():
"""Show the cursor using ANSI escape codes."""
sys.stdout.write("\033[?25h")
sys.stdout.flush() | pa-pyrus/papatcher | [
4,
2,
4,
1,
1401601700
] |
def shown():
"""Show the cursor within a context."""
Cursor.show()
yield
Cursor.hide() | pa-pyrus/papatcher | [
4,
2,
4,
1,
1401601700
] |
def __init__(self):
self.last_fraction = None | pa-pyrus/papatcher | [
4,
2,
4,
1,
1401601700
] |
def __init__(self, ubername, password, threads, ratelimit):
"""
Initialize the patcher with UberNet credentials. They will be used to
login, check for and retrieve patches.
"""
self.credentials = dumps({"TitleId": 4,
"AuthMethod": "UberCredentials",
"UberName": ubername,
"Password": password})
ssl_context = create_default_context()
self.connection = HTTPSConnection(UBERNET_HOST,
context=ssl_context)
self.threads = threads
self.ratelimit = ratelimit | pa-pyrus/papatcher | [
4,
2,
4,
1,
1401601700
] |
def get_streams(self):
"""
Request and return a list of streams we can download from UberNet.
"""
# we can't continue without a session ticket
if not hasattr(self, "_session"):
return None
headers = {"X-Authorization": self._session}
# we no longer need the session ticket
del self._session
self.connection.request("GET", "/Launcher/ListStreams?Platform=Linux",
headers=headers)
response = self.connection.getresponse()
if response.status is not HTTP_OK:
print("! Encountered an error: {0} {1}.".format(response.status,
response.reason))
return None
# get and parse response data
raw_data = response.read()
result = loads(raw_data.decode("utf-8"))
self._streams = {stream["StreamName"]: stream
for stream in result["Streams"]}
return self._streams | pa-pyrus/papatcher | [
4,
2,
4,
1,
1401601700
] |
def _verify_manifest(self, full):
if not hasattr(self, "_stream") or not hasattr(self, "_manifest"):
return False
# clean up cache in the process
cache_dir = CACHE_DIR / self._stream["StreamName"]
print("* Verifying contents of cache folder {0}.".format(
str(cache_dir)))
if cache_dir.exists():
bundle_names = [bundle["checksum"]
for bundle in self._manifest["bundles"]]
old_bundles = 0
for cache_file in cache_dir.iterdir():
if full or cache_file.name not in bundle_names:
cache_file.unlink()
old_bundles += 1
if old_bundles:
print("* Purged {0} old bundle(s).".format(old_bundles))
# verify bundles in parallel
with futures.ThreadPoolExecutor(max_workers=self.threads) as executor:
# this list will contain the bundles we actually need to download
self._bundles = list()
bundle_futures = [executor.submit(self._verify_bundle, bundle)
for bundle in self._manifest["bundles"]]
for completed in futures.as_completed(bundle_futures):
if not completed.result():
# cancel waiting futures
for future in bundle_futures:
future.cancel()
return False
print("* Need to get {0} bundle(s).".format(len(self._bundles)))
# if we get here there, all bundles were verified
# we no longer need the manifest
del self._manifest
return True | pa-pyrus/papatcher | [
4,
2,
4,
1,
1401601700
] |
def patch(self):
if not hasattr(self, "_bundles"):
return False
with futures.ThreadPoolExecutor(max_workers=self.threads) as executor:
bundle_futures = list()
# download bundles sorted by size
self._bundles.sort(key=lambda bundle: int(bundle["size"]),
reverse=True)
for bundle in self._bundles:
bundle_checksum = bundle["checksum"]
print("* Downloading bundle {0}.".format(bundle_checksum))
if not self._download_bundle(bundle):
return False
# bundle was downloaded, start extraction in parallel
print("* Extracting bundle {0}.".format(bundle_checksum))
bundle_future = executor.submit(self._extract_bundle, bundle)
bundle_futures.append(bundle_future)
for completed in futures.as_completed(bundle_futures):
if not completed.result():
# cancel waiting futures
for future in bundle_futures:
future.cancel()
return False
# if we're here everything has been downloaded and extracted
return True | pa-pyrus/papatcher | [
4,
2,
4,
1,
1401601700
] |
def _extract_bundle(self, bundle):
if not hasattr(self, "_stream"):
return False
bundle_checksum = bundle["checksum"]
cache_file = CACHE_DIR / self._stream["StreamName"] / bundle_checksum
# open cache file with gzip
with cache_file.open("rb") as cache_fp:
game_base = GAME_ROOT / self._stream["StreamName"]
# get entries sorted by offset
entries = sorted(bundle["entries"], key=itemgetter("offset"))
for entry in entries:
entry_file = game_base / entry["filename"][1:]
# make sure that path exists
if not entry_file.parent.exists():
entry_file.parent.mkdir(parents=True)
entry_offset = int(entry["offset"])
cache_fp.seek(entry_offset)
# remove the file first if it already exists
if entry_file.exists():
entry_file.unlink()
with entry_file.open("xb") as entry_fp:
# data might be compressed further, check sizeZ for that
if entry["sizeZ"] != "0":
entry_size = int(entry["sizeZ"])
raw_data = cache_fp.read(entry_size)
entry_fp.write(decompress(raw_data))
else:
entry_size = int(entry["size"])
entry_fp.write(cache_fp.read(entry_size))
# set executable
if "executable" in entry:
entry_file.chmod(entry_file.stat().st_mode | S_IEXEC)
return True | pa-pyrus/papatcher | [
4,
2,
4,
1,
1401601700
] |
def __init__(self, CP, CarController, CarState):
self.CP = CP
self.VM = VehicleModel(CP)
self.frame = 0
self.steering_unpressed = 0
self.low_speed_alert = False
self.silent_steer_warning = True
if CarState is not None:
self.CS = CarState(CP)
self.cp = self.CS.get_can_parser(CP)
self.cp_cam = self.CS.get_cam_can_parser(CP)
self.cp_body = self.CS.get_body_can_parser(CP)
self.cp_loopback = self.CS.get_loopback_can_parser(CP)
self.CC = None
if CarController is not None:
self.CC = CarController(self.cp.dbc_name, CP, self.VM) | commaai/openpilot | [
38913,
7077,
38913,
364,
1479951210
] |
def get_pid_accel_limits(CP, current_speed, cruise_speed):
return ACCEL_MIN, ACCEL_MAX | commaai/openpilot | [
38913,
7077,
38913,
364,
1479951210
] |
def get_params(candidate, fingerprint=gen_empty_fingerprint(), car_fw=None):
pass | commaai/openpilot | [
38913,
7077,
38913,
364,
1479951210
] |
def init(CP, logcan, sendcan):
pass | commaai/openpilot | [
38913,
7077,
38913,
364,
1479951210
] |
def get_steer_feedforward_default(desired_angle, v_ego):
# Proportional to realigning tire momentum: lateral acceleration.
# TODO: something with lateralPlan.curvatureRates
return desired_angle * (v_ego**2) | commaai/openpilot | [
38913,
7077,
38913,
364,
1479951210
] |
def get_steer_feedforward_function(cls):
return cls.get_steer_feedforward_default | commaai/openpilot | [
38913,
7077,
38913,
364,
1479951210
] |
def get_std_params(candidate, fingerprint):
ret = car.CarParams.new_message()
ret.carFingerprint = candidate
ret.unsafeMode = 0 # see panda/board/safety_declarations.h for allowed values
# standard ALC params
ret.steerControlType = car.CarParams.SteerControlType.torque
ret.steerMaxBP = [0.]
ret.steerMaxV = [1.]
ret.minSteerSpeed = 0.
ret.wheelSpeedFactor = 1.0
ret.pcmCruise = True # openpilot's state is tied to the PCM's cruise state on most cars
ret.minEnableSpeed = -1. # enable is done by stock ACC, so ignore this
ret.steerRatioRear = 0. # no rear steering, at least on the listed cars aboveA
ret.openpilotLongitudinalControl = False
ret.stopAccel = -2.0
ret.stoppingDecelRate = 0.8 # brake_travel/s while trying to stop
ret.vEgoStopping = 0.5
ret.vEgoStarting = 0.5
ret.stoppingControl = True
ret.longitudinalTuning.deadzoneBP = [0.]
ret.longitudinalTuning.deadzoneV = [0.]
ret.longitudinalTuning.kf = 1.
ret.longitudinalTuning.kpBP = [0.]
ret.longitudinalTuning.kpV = [1.]
ret.longitudinalTuning.kiBP = [0.]
ret.longitudinalTuning.kiV = [1.]
# TODO estimate car specific lag, use .15s for now
ret.longitudinalActuatorDelayLowerBound = 0.15
ret.longitudinalActuatorDelayUpperBound = 0.15
ret.steerLimitTimer = 1.0
return ret | commaai/openpilot | [
38913,
7077,
38913,
364,
1479951210
] |
def update(self, c: car.CarControl, can_strings: List[bytes]) -> car.CarState:
pass | commaai/openpilot | [
38913,
7077,
38913,
364,
1479951210
] |
def apply(self, c: car.CarControl) -> Tuple[car.CarControl.Actuators, List[bytes]]:
pass | commaai/openpilot | [
38913,
7077,
38913,
364,
1479951210
] |
def __init__(self, CP):
self.pts = {}
self.delay = 0
self.radar_ts = CP.radarTimeStep
self.no_radar_sleep = 'NO_RADAR_SLEEP' in os.environ | commaai/openpilot | [
38913,
7077,
38913,
364,
1479951210
] |
def __init__(self, CP):
self.CP = CP
self.car_fingerprint = CP.carFingerprint
self.out = car.CarState.new_message()
self.cruise_buttons = 0
self.left_blinker_cnt = 0
self.right_blinker_cnt = 0
self.left_blinker_prev = False
self.right_blinker_prev = False
# Q = np.matrix([[10.0, 0.0], [0.0, 100.0]])
# R = 1e3
self.v_ego_kf = KF1D(x0=[[0.0], [0.0]],
A=[[1.0, DT_CTRL], [0.0, 1.0]],
C=[1.0, 0.0],
K=[[0.12287673], [0.29666309]]) | commaai/openpilot | [
38913,
7077,
38913,
364,
1479951210
] |
def get_wheel_speeds(self, fl, fr, rl, rr, unit=CV.KPH_TO_MS):
factor = unit * self.CP.wheelSpeedFactor
wheelSpeeds = car.CarState.WheelSpeeds.new_message()
wheelSpeeds.fl = fl * factor
wheelSpeeds.fr = fr * factor
wheelSpeeds.rl = rl * factor
wheelSpeeds.rr = rr * factor
return wheelSpeeds | commaai/openpilot | [
38913,
7077,
38913,
364,
1479951210
] |
def update_blinker_from_stalk(self, blinker_time: int, left_blinker_stalk: bool, right_blinker_stalk: bool):
"""Update blinkers from stalk position. When stalk is seen the blinker will be on for at least blinker_time,
or until the stalk is turned off, whichever is longer. If the opposite stalk direction is seen the blinker
is forced to the other side. On a rising edge of the stalk the timeout is reset."""
if left_blinker_stalk:
self.right_blinker_cnt = 0
if not self.left_blinker_prev:
self.left_blinker_cnt = blinker_time
if right_blinker_stalk:
self.left_blinker_cnt = 0
if not self.right_blinker_prev:
self.right_blinker_cnt = blinker_time
self.left_blinker_cnt = max(self.left_blinker_cnt - 1, 0)
self.right_blinker_cnt = max(self.right_blinker_cnt - 1, 0)
self.left_blinker_prev = left_blinker_stalk
self.right_blinker_prev = right_blinker_stalk
return bool(left_blinker_stalk or self.left_blinker_cnt > 0), bool(right_blinker_stalk or self.right_blinker_cnt > 0) | commaai/openpilot | [
38913,
7077,
38913,
364,
1479951210
] |
def parse_gear_shifter(gear: str) -> car.CarState.GearShifter:
d: Dict[str, car.CarState.GearShifter] = {
'P': GearShifter.park, 'R': GearShifter.reverse, 'N': GearShifter.neutral,
'E': GearShifter.eco, 'T': GearShifter.manumatic, 'D': GearShifter.drive,
'S': GearShifter.sport, 'L': GearShifter.low, 'B': GearShifter.brake
}
return d.get(gear, GearShifter.unknown) | commaai/openpilot | [
38913,
7077,
38913,
364,
1479951210
] |
def get_cam_can_parser(CP):
return None | commaai/openpilot | [
38913,
7077,
38913,
364,
1479951210
] |
def get_body_can_parser(CP):
return None | commaai/openpilot | [
38913,
7077,
38913,
364,
1479951210
] |
def blocked_re(self):
return re.compile(u'({})'.format(u'|'.join(
map(re.escape, self.blocked_tokens) +
map(lambda k: u'\\b{}\\b'.format(re.escape(k)), self.blocked_words)
)), re.I + re.U) | ThaTiemsz/jetski | [
15,
9,
15,
22,
1496860309
] |
def __init__(self, reason, event, ctx):
self.reason = reason
self.event = event
self.ctx = ctx
self.content = S(event.content, escape_codeblocks=True) | ThaTiemsz/jetski | [
15,
9,
15,
22,
1496860309
] |
def details(self):
if self.reason is CensorReason.INVITE:
if self.ctx['guild']:
return u'invite `{}` to {}'.format(
self.ctx['invite'],
S(self.ctx['guild']['name'], escape_codeblocks=True)
)
else:
return u'invite `{}`'.format(self.ctx['invite'])
elif self.reason is CensorReason.DOMAIN:
if self.ctx['hit'] == 'whitelist':
return u'domain `{}` is not in whitelist'.format(S(self.ctx['domain'], escape_codeblocks=True))
else:
return u'domain `{}` is in blacklist'.format(S(self.ctx['domain'], escape_codeblocks=True))
elif self.reason is CensorReason.WORD:
return u'found blacklisted words `{}`'.format(
u', '.join([S(i, escape_codeblocks=True) for i in self.ctx['words']]))
elif self.reason is CensorReason.ZALGO:
return u'found zalgo at position `{}` in text'.format(
self.ctx['position']
) | ThaTiemsz/jetski | [
15,
9,
15,
22,
1496860309
] |
def compute_relevant_configs(self, event, author):
if event.channel_id in event.config.channels:
yield event.config.channels[event.channel.id]
if event.config.levels:
user_level = int(self.bot.plugins.get('CorePlugin').get_level(event.guild, author))
for level, config in event.config.levels.items():
if user_level <= level:
yield config | ThaTiemsz/jetski | [
15,
9,
15,
22,
1496860309
] |
def on_message_update(self, event):
try:
msg = Message.get(id=event.id)
except Message.DoesNotExist:
self.log.warning('Not censoring MessageUpdate for id %s, %s, no stored message', event.channel_id, event.id)
return
if not event.content:
return
return self.on_message_create(
event,
author=event.guild.get_member(msg.author_id)) | ThaTiemsz/jetski | [
15,
9,
15,
22,
1496860309
] |
def on_message_create(self, event, author=None):
author = author or event.author
if author.id == self.state.me.id:
return
if event.webhook_id:
return
configs = list(self.compute_relevant_configs(event, author))
if not configs:
return
tags = {'guild_id': event.guild.id, 'channel_id': event.channel.id}
with timed('rowboat.plugin.censor.duration', tags=tags):
try:
# TODO: perhaps imap here? how to raise exception then?
for config in configs:
if config.channel:
if event.channel_id != config.channel:
continue
if config.bypass_channel:
if event.channel_id == config.bypass_channel:
continue
if config.filter_zalgo:
self.filter_zalgo(event, config)
if config.filter_invites:
self.filter_invites(event, config)
if config.filter_domains:
self.filter_domains(event, config)
if config.blocked_words or config.blocked_tokens:
self.filter_blocked_words(event, config)
except Censorship as c:
self.call(
'ModLogPlugin.create_debounce',
event,
['MessageDelete'],
message_id=event.message.id,
)
try:
event.delete()
self.call(
'ModLogPlugin.log_action_ext',
Actions.CENSORED,
event.guild.id,
e=event,
c=c)
except APIException:
self.log.exception('Failed to delete censored message: ') | ThaTiemsz/jetski | [
15,
9,
15,
22,
1496860309
] |
def filter_invites(self, event, config):
invites = INVITE_LINK_RE.findall(event.content)
for _, invite in invites:
invite_info = self.get_invite_info(invite)
need_whitelist = (
config.invites_guild_whitelist or
(config.invites_whitelist or not config.invites_blacklist)
)
whitelisted = False
if invite_info and invite_info.get('id') in config.invites_guild_whitelist:
whitelisted = True
if invite.lower() in config.invites_whitelist:
whitelisted = True
if need_whitelist and not whitelisted:
raise Censorship(CensorReason.INVITE, event, ctx={
'hit': 'whitelist',
'invite': invite,
'guild': invite_info,
})
elif config.invites_blacklist and invite.lower() in config.invites_blacklist:
raise Censorship(CensorReason.INVITE, event, ctx={
'hit': 'blacklist',
'invite': invite,
'guild': invite_info,
}) | ThaTiemsz/jetski | [
15,
9,
15,
22,
1496860309
] |
def get_trace_name(self, source_entities, brief):
return "Preprocess file" | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def get_trace_targets(self, target_entities, brief):
return None | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def build(self, source_entities, targets):
src_file = source_entities[0].get()
empty_re = re.compile(r'^\s*\r*\n', re.MULTILINE)
slash_re = re.compile(r'\\\r*\n', re.MULTILINE)
comments_re = re.compile(r"^\s*#.*$", re.MULTILINE)
all_stmt_re = re.compile(
r"^__all__\s*=\s*\(.+?\)", re.MULTILINE | re.DOTALL)
content = aql.read_text_file(src_file)
content = slash_re.sub("", content)
content = comments_re.sub("", content)
content = all_stmt_re.sub("", content)
# -----------------------------------------------------------
import_re = re.compile(r"^import\s+(.+)$", re.MULTILINE)
std_imports = set()
def import_handler(match, _std_imports=std_imports):
module_name = match.group(1)
_std_imports.add(module_name)
return ""
content = import_re.sub(import_handler, content)
# -----------------------------------------------------------
aql_import_re = re.compile(r"^\s*from\s+(\.?aql.+)\s+import\s+.+$",
re.MULTILINE)
aql_imports = set()
def aql_import_handler(match, _aql_imports=aql_imports):
module_name = match.group(1)
if module_name.startswith('.'):
module_name = os.sep + module_name[1:] + '.py'
else:
module_name = os.sep + \
module_name.replace('.', os.sep) + os.sep
_aql_imports.add(module_name)
return ""
content = aql_import_re.sub(aql_import_handler, content)
# -----------------------------------------------------------
content = empty_re.sub("", content)
target = aql.SimpleEntity(name=src_file,
data=(std_imports, aql_imports, content))
targets.add_target_entity(target) | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def __init__(self, options, target):
self.target = self.get_target_path(target, ext='.py') | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def get_target_entities(self, source_entities):
return self.target | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def get_trace_sources(self, source_entities, brief):
return (os.path.basename(src.name) for src in source_entities) | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def replace(self, options, source_entities):
finder = aql.FindFilesBuilder(options,
mask='*.py',
exclude_mask="__init__.py")
core_files = aql.Node(finder, source_entities)
return aql.Node(AqlPreprocess(options), core_files) | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def _mod_to_files(file2deps, modules):
mod2files = {}
for mod in modules:
files = set()
for file in file2deps:
if file.find(mod) != -1:
files.add(file)
mod2files[mod] = files
return mod2files | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def _get_dep_to_files(file2deps, mod2files):
dep2files = {}
tmp_file2deps = {}
for file, mods in file2deps.items():
for mod in mods:
files = mod2files[mod]
tmp_file2deps.setdefault(file, set()).update(files)
for f in files:
dep2files.setdefault(f, set()).add(file)
return dep2files, tmp_file2deps | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def _get_content(files_content, dep2files, file2deps, tails):
content = ""
while tails:
tail = tails.pop(0)
content += files_content[tail]
files = dep2files.pop(tail, [])
for file in files:
deps = file2deps[file]
deps.remove(tail)
if not deps:
tails.append(file)
del file2deps[file]
return content | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def build(self, source_entities, targets):
file2deps = {}
files_content = {}
modules = set()
tails = []
std_modules = set()
for entity in source_entities:
file_name = entity.name
mod_std_imports, mod_deps, mod_content = entity.data
if not mod_content:
continue
if not mod_deps:
tails.append(file_name)
files_content[file_name] = mod_content
file2deps[file_name] = mod_deps
std_modules.update(mod_std_imports)
modules.update(mod_deps)
mod2files = self._mod_to_files(file2deps, modules)
dep2files, file2deps = self._get_dep_to_files(file2deps, mod2files)
content = self._get_content(files_content, dep2files, file2deps, tails)
imports_content = '\n'.join(
"import %s" % module for module in sorted(std_modules))
content = '\n'.join([HEADER, imports_content, content, AQL_DATE])
aql.write_text_file(self.target, data=content)
targets.add_target_files(self.target) | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def __init__(self, options, target):
self.target = target
self.build_target = self.get_target_path(target, ext='.b64') | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def get_trace_name(self, source_entities, brief):
return "Pack Tools" | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def get_target_entities(self, source_values):
return self.build_target | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def replace(self, options, source_entities):
tools_path = [source.get() for source in source_entities]
if not tools_path:
return None
finder = aql.FindFilesBuilder(options, '*.py')
zipper = aql.ZipFilesBuilder(options,
target=self.target,
basedir=tools_path)
tool_files = aql.Node(finder, source_entities)
zip = aql.Node(zipper, tool_files)
return zip | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def build(self, source_entities, targets):
target = self.build_target
with aql.open_file(target, write=True,
binary=True, truncate=True) as output:
for source in source_entities:
zip_file = source.get()
with aql.open_file(zip_file, read=True, binary=True) as input:
base64.encode(input, output)
targets.add_target_files(target, tags="embedded_tools") | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def __init__(self, options, target):
self.target = self.get_target_path(target) | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def get_trace_name(self, source_entities, brief):
return "Link AQL standalone script" | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def get_target_entities(self, source_values):
return self.target | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def build(self, source_entities, targets):
content = []
embedded_tools = ""
for source in source_entities:
data = aql.read_text_file(source.get())
if not data:
continue
if "embedded_tools" in source.tags:
embedded_tools = EMBEDDED_TOOLS % data
else:
content.append(data)
content.append(MAIN.format(embedded_tools=embedded_tools))
content = '\n'.join(content)
aql.write_text_file(self.target, content)
targets.add_target_files(self.target) | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def pack_tools(self, options, target):
return AqlPackTools(options, target) | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def link_standalone(self, options, target):
return AqlLinkStandalone(options, target) | aqualid/aqualid | [
5,
3,
5,
10,
1414322110
] |
def __init__(self, instrument):
"""."""
super().__init__(instrument)
# self.log = logging.getLogger(__name__)
# self.log.info('Creating an instance of\t' + str(__class__))
self.amps = [-140, 17]
self.freqs = [10e6, 40e9]
# self.siggen.write("*CLS") # clear error status
# self.frequency = min(self.freqs) | DavidLutton/EngineeringProject | [
7,
3,
7,
7,
1484501899
] |
def frequency(self):
"""."""
return(self.query("OF0")) | DavidLutton/EngineeringProject | [
7,
3,
7,
7,
1484501899
] |
def frequency(self, frequency):
self.write(f"F0{frequency:.2f}GH") | DavidLutton/EngineeringProject | [
7,
3,
7,
7,
1484501899
] |
def amplitude(self):
"""."""
return(self.query("OL0")) | DavidLutton/EngineeringProject | [
7,
3,
7,
7,
1484501899
] |
def amplitude(self, amplitude):
self.write(f"L0{amplitude:.2f}DM") | DavidLutton/EngineeringProject | [
7,
3,
7,
7,
1484501899
] |
def output(self):
if self.query("OUTPut:STATe?") == "1":
return(True)
else:
return(False) | DavidLutton/EngineeringProject | [
7,
3,
7,
7,
1484501899
] |
def output(self, boolean=False):
self.write("OUTPut:STATe {:d}".format(boolean)) | DavidLutton/EngineeringProject | [
7,
3,
7,
7,
1484501899
] |
def test_open_graph_all_properties(self):
url = 'http://lassie.it/open_graph/all_properties.html'
data = lassie.fetch(url)
self.assertEqual(data['url'], url)
self.assertEqual(data['title'], 'Lassie Open Graph All Properies Test')
self.assertEqual(data['description'], 'Just a test template with OG data!')
self.assertEqual(data['locale'], 'en_US')
self.assertEqual(data['site_name'], 'Lassie')
self.assertEqual(len(data['images']), 1)
image = data['images'][0]
self.assertEqual(image['src'], 'http://i.imgur.com/cvoR7zv.jpg')
self.assertEqual(image['width'], 550)
self.assertEqual(image['height'], 365)
self.assertEqual(image['type'], 'og:image')
self.assertEqual(len(data['videos']), 1)
video = data['videos'][0]
self.assertEqual(video['src'], 'http://www.youtube.com/v/dQw4w9WgXcQ?version=3&autohide=1')
self.assertEqual(video['width'], 640)
self.assertEqual(video['height'], 480)
self.assertEqual(video['type'], 'application/x-shockwave-flash') | michaelhelmick/lassie | [
579,
49,
579,
12,
1375216899
] |
def test_open_graph_og_image_plus_two_body_images(self):
url = 'http://lassie.it/open_graph/og_image_plus_two_body_images.html'
data = lassie.fetch(url)
# Try without passing "all_images", then pass it
self.assertEqual(len(data['images']), 1)
data = lassie.fetch(url, all_images=True)
self.assertEqual(len(data['images']), 3)
image_0 = data['images'][0]
image_1 = data['images'][1]
image_2 = data['images'][2]
self.assertEqual(image_0['type'], 'og:image')
self.assertEqual(image_1['type'], 'body_image')
self.assertEqual(image_2['type'], 'body_image') | michaelhelmick/lassie | [
579,
49,
579,
12,
1375216899
] |
def i2repr(self, pkt, x):
return XShortField.i2repr(self, pkt, x) | schumilo/vUSBf | [
151,
34,
151,
2,
1413990514
] |
def i2repr(self, pkt, x):
return XIntField.i2repr(self, pkt, x) | schumilo/vUSBf | [
151,
34,
151,
2,
1413990514
] |
def Reset(self):
for i in self._objects:
try:
self._objects[i].hide()
except:
pass
log.info("Resetting RPC objects...")
self._objects = {} | felipenaselva/felipe.repository | [
2,
6,
2,
1,
1474110890
] |
def UpdateAddonRepos(self):
return xbmc.executebuiltin("UpdateAddonRepos") | felipenaselva/felipe.repository | [
2,
6,
2,
1,
1474110890
] |
def Notify(self, header, message, image):
return notify(getLocalizedLabel(message), header, 3000, image) | felipenaselva/felipe.repository | [
2,
6,
2,
1,
1474110890
] |
def Dialog(self, title, message):
dialog = xbmcgui.Dialog()
return dialog.ok(getLocalizedLabel(title), getLocalizedLabel(message)) | felipenaselva/felipe.repository | [
2,
6,
2,
1,
1474110890
] |
def Dialog_Select(self, title, items):
dialog = xbmcgui.Dialog()
return dialog.select(getLocalizedLabel(title), items) | felipenaselva/felipe.repository | [
2,
6,
2,
1,
1474110890
] |
def Player_GetPlayingFile(self, *args, **kwargs):
return XBMC_PLAYER.getPlayingFile() | felipenaselva/felipe.repository | [
2,
6,
2,
1,
1474110890
] |
def Player_IsPaused(self):
return xbmc.getCondVisibility("Player.Paused") | felipenaselva/felipe.repository | [
2,
6,
2,
1,
1474110890
] |
def ConvertLanguage(self, *args, **kwargs):
return xbmc.convertLanguage(*args, **kwargs) | felipenaselva/felipe.repository | [
2,
6,
2,
1,
1474110890
] |
def GetAddonInfo(self):
info = {}
for key in ("author", "changelog", "description", "disclaimer",
"fanart", "icon", "id", "name", "path", "profile", "stars",
"summary", "type", "version"):
info[key] = ADDON.getAddonInfo(key)
return info | felipenaselva/felipe.repository | [
2,
6,
2,
1,
1474110890
] |
def AddonCheck(self, addonId):
if addonId in self._failures:
return self._failures[addonId]
return 0 | felipenaselva/felipe.repository | [
2,
6,
2,
1,
1474110890
] |
def GetLanguage(self, *args, **kwargs):
return xbmc.getLanguage(*args, **kwargs) | felipenaselva/felipe.repository | [
2,
6,
2,
1,
1474110890
] |
def GetSetting(self, *args, **kwargs):
return ADDON.getSetting(*args, **kwargs) | felipenaselva/felipe.repository | [
2,
6,
2,
1,
1474110890
] |
def SetSetting(self, *args, **kwargs):
return ADDON.setSetting(*args, **kwargs) | felipenaselva/felipe.repository | [
2,
6,
2,
1,
1474110890
] |
def TranslatePath(self, *args, **kwargs):
return xbmc.translatePath(*args, **kwargs) | felipenaselva/felipe.repository | [
2,
6,
2,
1,
1474110890
] |
def Dialog_CloseAll(self, *args, **kwargs):
return xbmc.executebuiltin("Dialog.Close(all, true)") | felipenaselva/felipe.repository | [
2,
6,
2,
1,
1474110890
] |
def DialogProgress_Create(self, *args, **kwargs):
dialog = xbmcgui.DialogProgress()
self._objects[id(dialog)] = dialog
dialog.create(*args, **kwargs)
return id(dialog) | felipenaselva/felipe.repository | [
2,
6,
2,
1,
1474110890
] |
def DialogProgress_Update(self, hwnd, *args, **kwargs):
return self._objects[hwnd].update(*args, **kwargs) | felipenaselva/felipe.repository | [
2,
6,
2,
1,
1474110890
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.