text stringlengths 0 1.05M | meta dict |
|---|---|
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
encryptions = []
class var:
def __init__(self, obj): self.obj = obj;
def get(self): return self.obj
def set(self, obj): self.obj = obj
class reverse:
name = "reverse"
def start(self, encrypt, sentance):
done = False;
if encrypt == False:
self.decrypt(sentance)
else:
self.encrypt(sentance)
def encrypt(sentence):
print("Encrypting")
final = ""
i = len(sentence) -1
while(i != -1):
final+= sentence[i];
i -= 1
return final;
def decrypt(sentence):
print("Decrypting")
final = ""
i = len(sentence) -1
while(i != -1):
final+= sentence[i];
i -= 1
return final;
class caesar:
name = "caesar"
def start(self, encrypt, sentance):
done = False;
while (not done):
try:
key = int(input("Whats the key? "))
done=True
except TypeError:
print("ERROR, NON-INT INPUTED")
done = False
if encrypt == False:
self.decrypt(key, sentance)
else:
self.encrypt(key, sentance)
def encrypt(self, key, sentance):
print("Encrypting " + sentance)
returnMessage = ""
for char in sentance:
pos = alphabet.find(char)
if char not in alphabet:
returnMessage += char
else:
if pos + key > 25:
returnMessage+= alphabet[pos + key - 26]
else:
returnMessage += alphabet[pos + key]
print(returnMessage)
return returnMessage
def decrypt(self, key, sentance):
print("Decrypting")
returnMessage = ""
for char in sentance:
pos = alphabet.find(char)
if char not in alphabet:
returnMessage += char
else:
if pos - key < 0:
returnMessage+= alphabet[pos - key + 26]
else:
returnMessage += alphabet[pos - key]
return returnMessage
encryptions.append(var(caesar()))
encryptions.append(var(reverse()))
encryptionType = input("What cipher do you want to use? Reverse/Caesar: ").lower()
sentance = input("What sentance do yo want to encrypt/decrypt? " ).upper()
done = False;
while (not done):
endy = input("Encrypt or decrypt? ").upper()
if endy == "ENCRYPT" or "DECRYPT":
done = True;
else:
print("ANSWER NOT ENCRYPT OR DECRYPT")
i = -1
for encryption in encryptions:
print ("Currently Checking: " + encryption.get().name);
i+=1
if encryption.get().name == encryptionType:
position = i
break
if endy == "ENCRYPT":
print(encryptions[i].get().start(True, sentance))
elif endy == "DECRYPT":
print(encryptions[i].get().start(False, sentance))
else:
print("FATAL ERROR, NON ENCRYPTION TYPE INPUTED") | {
"repo_name": "il8677/ComputationalThinking",
"path": "Encryption.py",
"copies": "1",
"size": "3060",
"license": "mit",
"hash": -8876327648043881000,
"line_mean": 29.0098039216,
"line_max": 82,
"alpha_frac": 0.5310457516,
"autogenerated": false,
"ratio": 4.08,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.013493990544240789,
"num_lines": 102
} |
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
class Rotor():
perms = []
turnover_position = ''
position = 'A'
def __init__(self, perms, turnover_position, ring_setting):
i = alphabet.index(ring_setting)
perms = perms[i:] + perms[:i]
self.perms = [c for c in perms]
self.turnover_position = turnover_position
def set_position(self, position):
position_change = alphabet.index(position) - alphabet.index(self.position)
self.position = position
self.perms = self.perms[position_change:] + self.perms[:position_change]
def turnover(self):
return True if self.turnover_position == self.position else False
def step(self):
turnover = self.turnover()
self.perms = self.perms[1:] + self.perms[:1]
self.position = alphabet[(alphabet.index(self.position) + 1) % 26]
if turnover:
return True
else:
return False
def encrypt_forward(self, c):
return self.perms[alphabet.index(c)]
def encrypt_backward(self, c):
return alphabet[self.perms.index(c)]
class Reflector():
def __init__(self, pairs):
self.pairs = {}
for i, c in enumerate(alphabet):
self.pairs[c] = pairs[i]
def reflect(self, c):
return self.pairs[c]
class Machine():
rotors = []
reflector = None
plug_board = {}
double_step = False
def __init__(self, rotors, reflector, plug_board):
self.rotors = [Rotor(rotor[0], rotor[1], rotor[2]) for rotor in rotors]
self.reflector = Reflector(reflector)
for pair in plug_board:
self.plug_board[pair[0]], self.plug_board[pair[1]] = pair[1], pair[0]
def set_rotors(self, positions):
if len(positions) != len(self.rotors):
print 'Error: rotor settings do not match with number of rotors'
else:
[rotor.set_position(positions[i]) for i, rotor in enumerate(self.rotors)]
return
def encrypt_char(self, c):
c = self.plug_board[c] if c in self.plug_board else c
for i, rotor in enumerate(self.rotors[::-1]):
if i is 0:
c = rotor.encrypt_forward(c)
else:
difference = (alphabet.index(self.rotors[::-1][i-1].position) - alphabet.index(self.rotors[::-1][i].position)) % 26
c = rotor.encrypt_forward(alphabet[alphabet.index(c) - difference])
print c
difference = alphabet.index(self.rotors[0].position)
c = self.reflector.reflect(c)
print '\n'
print c
print '\n'
for i, rotor in enumerate(self.rotors):
if i is 0:
c = rotor.encrypt_backward(c)
else:
difference = (alphabet.index(self.rotors[i-1].position) - alphabet.index(self.rotors[i].position)) % 26
print difference
c = rotor.encrypt_backward(alphabet[alphabet.index(c) - difference])
print c
c = self.plug_board[c] if c in self.plug_board else c
return c
def status(self):
return self.rotors[0].position + self.rotors[1].position + self.rotors[2].position
def step(self):
if self.double_step:
self.rotors[1].step()
self.rotors[0].step()
self.double_step = False
if self.rotors[2].step():
self.rotors[1].step()
if self.rotors[1].turnover():
self.double_step = True
def encrypt(self, s):
out = ''
for c in s:
self.step()
out += self.encrypt_char(c)
return out
def test():
rotors=[('ESOVPZJAYQUIRHXLNFTGKDCMWB', 'J', 'G'),
('AJDKSIRUXBLHWTMCQGZNPYFVOE', 'E', 'M'),
('VZBRGITYUPSDNHLXAWMJQOFECK', 'Z', 'Y')]
reflector = 'YRUHQSLDPXNGOKMIEBFZCWVJAT'
plug_board = [('D', 'N'), ('G', 'R'), ('I', 'S'), ('K', 'C'), ('Q', 'X'), ('T', 'M'), ('P', 'V'), ('H', 'Y'), ('F', 'W'), ('B', 'J')]
machine = Machine(rotors, reflector, plug_board)
return machine | {
"repo_name": "DT9/programming-problems",
"path": "other/engima.py",
"copies": "1",
"size": "4222",
"license": "apache-2.0",
"hash": 9022073693378002000,
"line_mean": 33.3333333333,
"line_max": 137,
"alpha_frac": 0.5438180957,
"autogenerated": false,
"ratio": 3.5658783783783785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9453924924533967,
"avg_score": 0.031154309908882367,
"num_lines": 123
} |
alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
def encrypt(text, shift):
if not (1 < shift < 25):
raise ValueError("Shift key must be between 1 and 25.")
i, enc = 0, ""
text = text.upper()
while i < len(text):
if not (text[i].upper() in alphabet):
enc += text[i]
else:
enc += alphabet[(alphabet.index(text[i]) + shift) % 26]
i += 1
return enc
def decrypt(text, shift):
if not (1 < shift < 25):
raise ValueError("Shift key must be between 1 and 25.")
i, enc = 0, ""
while i < len(text):
if not (text[i].upper() in alphabet):
enc += text[i]
else:
enc += alphabet[(alphabet.index(text[i].upper()) - shift) % 26]
i += 1
return enc
def main():
while True:
print("Encryption (E) or Decryption (D)?")
i = input("> ")
if i == "E" or i == "e":
print(encrypt(input("Enter text to encrypt: "), int(input("Enter key: "))))
elif i == "D" or i == "d":
print(decrypt(input("Enter text to decrypt: "), int(input("Enter key: "))))
if __name__ == "__main__":
main() | {
"repo_name": "idunnowhy9000/Projects",
"path": "SOURCE/Python/Security/Caesar Cipher.py",
"copies": "1",
"size": "1112",
"license": "mit",
"hash": 4253319265747669500,
"line_mean": 24.2954545455,
"line_max": 141,
"alpha_frac": 0.529676259,
"autogenerated": false,
"ratio": 2.6164705882352943,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7780798863258145,
"avg_score": 0.17306959679542994,
"num_lines": 44
} |
alphabet = "abcdefghijklmnopqrstuvwxyz"
def rule1(password):
for i in range(len(password) - 2):
try:
if password[i:i + 3] in alphabet:
return True
except IndexError:
continue
return False
def rule2(password):
if 'i' in password or 'o' in password or 'l' in password:
return False
else:
return True
def rule3(password):
count = 0
usedIndices = []
for i in range(len(password) - 1):
try:
if password[i] == password[i + 1] and not (i in usedIndices or i + 1 in usedIndices):
count += 1
usedIndices.append(i)
usedIndices.append(i + 1)
except IndexError:
continue
return count > 1
def validPassword(password):
if rule1(password) and rule2(password) and rule3(password):
return True
else:
return False
# test cases
assert rule1('hijklmmn')
assert not rule2('hijklmmn')
assert rule3('abbceffg')
assert not rule1('abbceffg')
assert not rule3('abbcegjk')
assert validPassword('abcdffaa')
assert validPassword('ghjaabcc')
def incrementString(string):
try:
return string[0:-1] + alphabet[alphabet.index(string[-1]) + 1]
except IndexError:
return incrementString(string[0:-1]) + 'a'
password = "vzbxkghb"
while (not validPassword(password)):
password = incrementString(password)
print("Part 1:", password)
password = incrementString(password)
while (not validPassword(password)):
password = incrementString(password)
print("Part 2:", password)
| {
"repo_name": "protocol114/AdventOfCode",
"path": "day11/day11.py",
"copies": "1",
"size": "1608",
"license": "mit",
"hash": -1771149653789528600,
"line_mean": 24.125,
"line_max": 97,
"alpha_frac": 0.6231343284,
"autogenerated": false,
"ratio": 3.7746478873239435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48977822157239437,
"avg_score": null,
"num_lines": null
} |
alphabet = 'abcdefghijklmnopqrstuvwxyz'
#VERSION 1
text = input('your input: ')
for c_alphabet in alphabet: #test each character in the alphabet
for c_text in text: # ...comparing it to each character in the text
found_char = False #when we start, we haven't found the current character yet
if c_text == c_alphabet:
found_char = True #found it! Set found_char to true and exit the inner loop
break
if not found_char:
break #we didn't find the currect charcter, so we break out of the outer loop and report failure
#finally, print message depending on the value of found_char:
if not found_char:
print('at least one character is missing')
else:
print('all characters are present')
#VERSION 2: using the 'in' operator
text = input('your input: ')
for c_alphabet in alphabet:
found_char = True #assume character is present
if c_alphabet not in text: #but if not, set variable to false and break out fo the loop
found_char = False
break
if not found_char:
print('at least one character is missing')
else:
print('all characters are present')
| {
"repo_name": "sgolitsynskiy/sergey.cs.uni.edu",
"path": "www/courses/cs1510/fall2017/sessions/100517_alphabet.py",
"copies": "1",
"size": "1164",
"license": "mit",
"hash": -110182112147892100,
"line_mean": 30.4594594595,
"line_max": 104,
"alpha_frac": 0.6735395189,
"autogenerated": false,
"ratio": 4.055749128919861,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5229288647819861,
"avg_score": null,
"num_lines": null
} |
alphabet=['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
print ("\n\n\nQue voulez-vous faire?")
print (" Option 1:crypter un document")
print (" Option 2:decrypter un document")
choix=input("faite un choix (tapez 1 ou 2): ")
cle=input("Clé utilisée ? (en majuscules) ")
code=[0]*len(cle)#crée une liste de zero de la longeur de la cle
code[:]=cle[:] #la clé correspond aux premiers changements de lettres,[:] permet de selectionner l'ensemble de la liste
for loop in range(len(alphabet)): #loop prend la valeur de 0 jusqua loop= 25 (compteur)
if not(alphabet[loop] in code): #si une lettre de alphabet n'est pas dans code alors
code.append(alphabet[loop]) #on complète le code avec les lettres non utilisées
print("Entrez le nom du fichier , ou le chemin: ")
nom=input()+".txt"
fichier=open(nom,"r") #on ouvre le fichier à crypter ou décrypter en mode lecture
texte=fichier.read() #on copie le texte dans la variable texte
texte=texte.upper() #on passe tout le texte en majuscules
fichier.close() #on ferme le fichier, il n'est plus utile maintenant que le contenu est stocké dans 'texte'
if choix=="1":
print("Choisir nom de sauvegarde : ")
nom=input()+".txt"
newtexte=open(nom,"w") #on ouvre un nouveau fichier txt en mode écriture
new=[] #liste utilisée pour stocker le message entier
mot=[] #liste provisoire permettant la formation des mots
for i in range(len(texte)):#compteur
if texte[i]!=" ": #si le caractère n'est pas un espace
if texte[i] in alphabet: #si le caractère est une lettre de l'alphabet
pos=alphabet.index(texte[i]) #on lui associe sa lettre codée
mot.append(code[pos]) #ajout a mot la position de chaque lettre de code
if texte[i] in ["0","1","2","3","4","5","6","7","8","9",'"',":",";",".",",","(",")","'"]: #si le caractère est un chiffre ou de la ponctuation
mot.append(texte[i]) #le caractère est inchangé
if texte[i]==" " or i==(len(texte)-1): #si le caratère est un espace ou si on arrive au bout du texte
mot=''.join(x for x in mot if x) #on regroupe les éléments de la liste mot en une chaine de caractères correspondant justement à un mot
new.append(mot) #on ajoute le mot à la liste new
mot=[] #on commence un nouveau mot
new=' '.join(x for x in new if x) #on regroupe tous les éléments de la liste (soit tous les mots) en une chaîne de caractères correspondant alors au texte codé
newtexte.write(new) #on écrit ce message dans le fichier txt qu'on avait ouvert
newtexte.close() #on ferme le fichier
print("Cryptage terminé !!")
if choix=="2":
print("Choisir nom de sauvegarde : ")
nom=input()+".txt"
newtexte=open(nom,"w") #on ouvre un nouveau fichier txt en mode écriture
new=[] #liste utilisée pour stocker le message entier
mot=[] #liste provisoire permettant la formation des mots
for i in range(len(texte)):
if texte[i]!=" ": #si le caractère n'est pas un espace
if texte[i] in code: #si le caractère est une lettre de l'alphabet codé
pos=code.index(texte[i]) #on lui associe sa lettre décodée
mot.append(alphabet[pos])#ajout a mot la position de chaque lettre de code
if texte[i] in ["0","1","2","3","4","5","6","7","8","9",'"',":",";",".",",","(",")","'"]: #si le caractère est un chiffre ou de la ponctuation
mot.append(texte[i]) #le caractère est inchangé
if texte[i]==" " or i==(len(texte)-1): #si le caractère est un espace ou si on arrive au bout du texte
mot=''.join(x for x in mot if x) #on regroupe les éléments de la liste mot en une chaine de caractères correspondant justement à un mot
new.append(mot) #on ajoute le mot à la liste new
mot=[] #on commence un nouveau mot
new=' '.join(x for x in new if x)#on regroupe tous les éléments de la liste (soit tous les mots) en une chaîne de caractères correspondant alors au texte codé
newtexte.write(new) #on regroupe tous les éléments de la liste (soit tous les mots) en une chaîne de caractères correspondant alors au texte décodé
newtexte.close() #on ferme le fichier
print("Décryptage terminé !!")
| {
"repo_name": "alexandre-o/XOR-isn-projet-python-",
"path": "cryptage-decryptage final.py",
"copies": "1",
"size": "4464",
"license": "cc0-1.0",
"hash": 4121966733490300000,
"line_mean": 65.7692307692,
"line_max": 163,
"alpha_frac": 0.6356413167,
"autogenerated": false,
"ratio": 2.786211258697027,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39218525753970274,
"avg_score": null,
"num_lines": null
} |
ALPHABET = ["A", "C", "D", "E", "F", "G", "H", "I", "K", "L", "M", "N", "P", "Q", "R", "S"]
def makeKMerList(k):
if k == 1:
# The k-mer list is the alphabet
return ALPHABET
kMinusOneMerList = makeKMerList(k - 1)
kMerList = []
for kMinusOneMer in kMinusOneMerList:
# Iterate through the list of k-1-mers and add each letter of the alphabet to each
for letter in ALPHABET:
# Iterate through the letters of the alphabet and add each to the current k-1-mer
kMer = kMinusOneMer + letter
kMerList.append(kMer)
return kMerList
def initializeKMerDict(k):
# Initialize the k-mer dictionary
kMerList = makeKMerList(k)
kMerDict = {}
for kMer in kMerList:
# Make an entry in the dictionary for each k-mer
kMerDict[kMer] = 0
return kMerDict
def makeSampleImageKMerCounts(sequence, k, pngFileNamePrefix, sampleCount):
# Make an image from k-mer counts in the sequence
kMerDict = initializeKMerDict(k)
for i in range(len(sequence) - k + 1):
# Iterate through the k-mers and increment the appropriate counts
if "X" not in sequence[i:i+k]:
# The sequence does not have a wild card, so count it
kMerDict[sequence[i:i+k]] = kMerDict[sequence[i:i+k]] + 1
sequenceArray = numpy.zeros((len(kMerDict), 1))
numSeqCount = 0
for val in kMerDict.values():
# Iterate through the values in the dictionary and put each into the sequence array
sequenceArray[numSeqCount] = float(val)/float(255)
if val > 255:
print "PROBLEM!"
numSeqCount = numSeqCount + 1
pngFileName = pngFileNamePrefix + "_" + str(sampleCount) + ".png"
misc.imsave(pngFileName, sequenceArray)
return pngFileName
def makeSequenceInputsKMerCountsBaseline(sequenceAlphabetFileName, peakHeightFileName, pngFileNamePrefix, valueFileNamePrefix, outputFileName, k):
# Convert each alphabet sequence to k-mer counts, and record them all as png examples
# ASSUMES THAT 16-LETTER ALPHABET AND ALL 4 COMBINATIONS FOR EACH SEQUENCE ARE IN sequenceAlphabetFile
# ASSUMES THAT EACH PEAK HEIGHT CORRESPONDS TO THE ALPHABET ENTRIES WITH THE SAME INDEX
sequenceAlphabetFile = open(sequenceAlphabetFileName)
peakHeightFile = open(peakHeightFileName)
outputFile = open(outputFileName, 'w+')
sampleCount = 0
valueFileName = ""
for line in sequenceAlphabetFile:
# Iterate through the alphabet sequences and count the kmers in each
peakHeight = peakHeightFile.readline().strip()
sampleCount = sampleCount + 1
if sampleCount % 4 == 1:
valueFileName = valueFileNamePrefix + "_" + str(sampleCount) + "-" + str(sampleCount + 3) + ".txt"
valueFile = open(valueFileName, 'w+')
valueFile.write(peakHeight + "\n")
valueFile.close()
pngFileName = makeSampleImageKMerCounts(line.strip(), k, pngFileNamePrefix, sampleCount)
outputFile.write(pngFileName + "\t" + valueFileName + "\t" + "0" + "\n")
peakHeightFile.close()
outputFile.close()
if __name__=="__main__":
import sys
import numpy
from scipy import misc
sequenceAlphabetFileName = sys.argv[1]
peakHeightFileName = sys.argv[2]
pngFileNamePrefix = sys.argv[3]
valueFileNamePrefix = sys.argv[4]
outputFileName = sys.argv[5]
k = int(sys.argv[6])
makeSequenceInputsKMerCountsBaseline(sequenceAlphabetFileName, peakHeightFileName, pngFileNamePrefix, valueFileNamePrefix, outputFileName, k)
| {
"repo_name": "imk1/IMKTFBindingCode",
"path": "makeSequenceInputsKMerCountsBaseline.py",
"copies": "1",
"size": "3338",
"license": "mit",
"hash": 5476083632124738000,
"line_mean": 39.725,
"line_max": 146,
"alpha_frac": 0.7171959257,
"autogenerated": false,
"ratio": 3.1490566037735848,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43662525294735843,
"avg_score": null,
"num_lines": null
} |
"""Alphabet and text normalization for Latin.
- *Principles of Text Cleaning gleaned from*
- http://udallasclassics.org/wp-content/uploads/maurer_files/APPARATUSABBREVIATIONS.pdf
Guidelines:
- [...] Square brackets, or in recent editions wavy brackets ʺ{...}ʺ, enclose words etc. that an editor thinks should be deleted (see ʺdel.ʺ) or marked as out of place (see ʺsecl.ʺ).
- [...] Square brackets in a papyrus text, or in an inscription, enclose places where words have been lost through physical damage.
- If this happens in mid-line, editors use ʺ[...]ʺ.
- If only the end of the line is missing, they use a single bracket ʺ[...ʺ
- If the lineʹs beginning is missing, they use ʺ...]ʺ
- Within the brackets, often each dot represents one missing letter.
- [[...]] Double brackets enclose letters or words deleted by the medieval copyist himself.
- (...) Round brackets are used to supplement words abbreviated by the original copyist; e.g. in an inscription: ʺtrib(unus) mil(itum) leg(ionis) IIIʺ
- <...> diamond ( = elbow = angular) brackets enclose words etc. that an editor has added (see ʺsuppl.ʺ)
- † An obelus (pl. obeli) means that the word(s etc.) is very plainly corrrupt, but the editor cannot see how to emend.
- If only one word is corrupt, there is only one obelus, which precedes the word; if two or more words are corrupt, two obeli enclose them. (Such at least is the rule--but that rule is often broken, especially in older editions, which sometimes dagger several words using only one obelus.) To dagger words in this way is to ʺobelizeʺ them.
"""
__author__ = [
"Todd Cook <todd.g.cook@gmail.com>",
"Kyle P. Johnson <kyle@kyle-p-johnson.com>",
]
__license__ = "MIT License"
import re
from typing import Dict
from cltk.alphabet.text_normalization import cltk_normalize
BRACE_STRIP = re.compile(r"{[^}]+}")
NUMERALS = re.compile(r"[0-9]+")
LATIN_PUNCT = re.compile(r"[\\/':;,!\?\._『@#\$%^&\*]+")
QUOTES = re.compile(r'["”“]+')
# we will extact content between brackets, it is editorial
ANGLE_BRACKETS = re.compile(r"([a-zA-Z]+)?<[a-zA-Z\s]+>([,\?\.a-zA-Z]+)?")
SQUARE_BRACKETS = re.compile(r"\[[^\]]+\]")
OBELIZED_WORDS = re.compile(r"†[^†]+†")
OBELIZED_WORD = re.compile(r"†[^\s]+\s")
OBELIZED_PLUS_WORDS = re.compile(r"[\+][^\+]+[\+]")
OBELIZED_PLUS_WORD = re.compile(r"[\+][^\s]+\s")
HYPHENATED = re.compile(r"\s[^-]+-[^-]+\s")
class JVReplacer: # pylint: disable=too-few-public-methods
"""Replace J/V with I/U.
Latin alphabet does not distinguish between J/j and I/i and V/v and U/u;
Yet, many texts bear the influence of later editors and the predilections of other languages.
In practical terms, the JV substitution is recommended on all Latin text preprocessing; it
helps to collapse the search space.
>>> replacer = JVReplacer()
>>> replacer.replace("Julius Caesar")
'Iulius Caesar'
>>> replacer.replace("In vino veritas.")
'In uino ueritas.'
"""
def __init__(self):
"""Initialization for JVReplacer, reads replacement pattern tuple."""
patterns = [(r"j", "i"), (r"v", "u"), (r"J", "I"), (r"V", "U")]
self.patterns = [(re.compile(regex), repl) for (regex, repl) in patterns]
def replace(self, text):
"""Do j/v replacement"""
for (pattern, repl) in self.patterns:
text = re.subn(pattern, repl, text)[0]
return text
JV_REPLACER = JVReplacer()
class LigatureReplacer: # pylint: disable=too-few-public-methods
"""Replace 'œæ' with AE, 'Œ Æ' with OE.
Classical Latin wrote the o and e separately (as has today again become the general practice),
but the ligature was used by medieval and early modern writings, in part because the
diphthongal sound had, by Late Latin, merged into the sound [e].
See: https://en.wikipedia.org/wiki/%C5%92
Æ (minuscule: æ) is a grapheme named æsc or ash, formed from the letters a and e, originally
a ligature representing the Latin diphthong ae. It has been promoted to the full status of a
letter in the alphabets of some languages, including Danish, Norwegian, Icelandic, and Faroese.
See: https://en.wikipedia.org/wiki/%C3%86
>>> replacer = LigatureReplacer()
>>> replacer.replace("mæd")
'maed'
>>> replacer.replace("prœil")
'proeil'
"""
def __init__(self):
"""Initialization for LigatureReplacer, reads replacement pattern tuple."""
patterns = [(r"œ", "oe"), (r"æ", "ae"), (r"Œ", "OE"), (r"Æ", "AE")]
self.patterns = [(re.compile(regex), repl) for (regex, repl) in patterns]
def replace(self, text):
"""Do character replacement."""
for (pattern, repl) in self.patterns:
text = re.subn(pattern, repl, text)[0]
return text
LIGATURE_REPLACER = LigatureReplacer()
def dehyphenate(text: str) -> str:
"""
Remove hyphens from text; used on texts that have an line breaks with hyphens
that may creep into the text. Caution using this elsewhere.
:param text:
:return:
>>> dehyphenate('quid re-tundo hier')
'quid retundo hier'
"""
idx_to_omit = []
for item in HYPHENATED.finditer(text):
idx_to_omit.insert(0, item.span())
for start, end in idx_to_omit:
text = text[:start] + text[start:end].replace("-", "") + text[end:]
return text
def swallow(text: str, pattern_matcher: re.Pattern) -> str:
"""
Utility function internal to this module
:param text: text to clean
:param pattern_matcher: pattern to match
:return: the text without the matched pattern; spaces are not substituted
"""
idx_to_omit = []
for item in pattern_matcher.finditer(text):
idx_to_omit.insert(0, item.span())
for start, end in idx_to_omit:
text = text[:start] + text[end:]
return text.strip()
def swallow_braces(text: str) -> str:
"""
Remove Text within braces, and drop the braces.
:param text: Text with braces
:return: Text with the braces and any text inside removed
>>> swallow_braces("{PRO P. QVINCTIO ORATIO} Quae res in civitate {etc}... ")
'Quae res in civitate ...'
"""
return swallow(text, BRACE_STRIP)
def drop_latin_punctuation(text: str) -> str:
"""
Drop all Latin punctuation except the hyphen and obelization markers, replacing the punctuation
with a space.
Please collapsing hyphenated words and removing obelization marks separately beforehand.
The hyphen is important in Latin tokenization as the enclitic particle `-ne`
is different than the interjection `ne`.
:param text: Text to clean
:return: cleaned text
>>> drop_latin_punctuation('quid est ueritas?')
'quid est ueritas '
>>> drop_latin_punctuation("vides -ne , quod , planus est ")
'vides -ne quod planus est '
>>> drop_latin_punctuation("here is some trash, punct \/':;,!\?\._『@#\$%^&\*okay").replace(" ", " ")
'here is some trash punct okay'
"""
text = NUMERALS.sub(" ", text)
text = LATIN_PUNCT.sub(" ", text)
text = QUOTES.sub(" ", text)
return text
def remove_accents(text: str) -> str: # pylint: disable=too-many-statements
"""
Remove accents; note: AE replacement and macron replacement should happen elsewhere, if desired.
:param text: text with undesired accents
:return: clean text
>>> remove_accents('suspensám')
'suspensam'
>>> remove_accents('quăm')
'quam'
>>> remove_accents('aegérrume')
'aegerrume'
>>> remove_accents('ĭndignu')
'indignu'
>>> remove_accents('îs')
'is'
>>> remove_accents('óccidentem')
'occidentem'
>>> remove_accents('frúges')
'fruges'
"""
text = text.replace(r"á", "a") # suspensám
text = text.replace(r"Á", "A")
text = text.replace(r"á", "a") # Note: this accent is different than the one above!
text = text.replace(r"Á", "A")
text = text.replace(r"ă", "a") # 'quăm'
text = text.replace(r"Ă", "A")
text = text.replace(r"à", "a")
text = text.replace(r"À", "A")
text = text.replace(r"â", "a")
text = text.replace(r"Â", "A")
text = text.replace(r"ä", "a")
text = text.replace(r"Ä", "A")
text = text.replace(r"é", "e") # aegérrume
text = text.replace(r"è", "e")
text = text.replace(r"È", "E")
text = text.replace(r"é", "e")
text = text.replace(r"É", "E")
text = text.replace(r"ê", "e")
text = text.replace(r"Ê", "E")
text = text.replace(r"ë", "e")
text = text.replace(r"Ë", "E")
text = text.replace(r"ĭ", "i") # ĭndignu
text = text.replace(r"î", "i") # 'îs'
text = text.replace(r"í", "i")
text = text.replace(r"í", "i")
text = text.replace(r"î", "i")
text = text.replace(r"Î", "I")
text = text.replace(r"ï", "i")
text = text.replace(r"Ï", "I")
text = text.replace(r"ó", "o") # óccidentem
text = text.replace(r"ô", "o")
text = text.replace(r"Ô", "O")
text = text.replace(r"ö", "o")
text = text.replace(r"Ö", "O")
text = text.replace(r"û", "u")
text = text.replace(r"Û", "U")
text = text.replace(r"ù", "u")
text = text.replace(r"Ù", "U")
text = text.replace(r"ü", "u")
text = text.replace(r"Ü", "U")
text = text.replace(r"ú", "u") # frúges
text = text.replace(r"ÿ", "y")
text = text.replace(r"Ÿ", "Y")
text = text.replace(r"ç", "c")
text = text.replace(r"Ç", "C")
text = text.replace(r"ë", "e")
text = text.replace(r"Ë", "E")
text = text.replace(r"Ȳ", "Y")
text = text.replace(r"ȳ", "y")
return text
def remove_macrons(text: str) -> str:
"""
Remove macrons above vowels
:param text: text with macronized vowels
:return: clean text
>>> remove_macrons("canō")
'cano'
>>> remove_macrons("Īuliī")
'Iulii'
"""
text = text.replace(r"ā", "a")
text = text.replace(r"Ā", "A")
text = text.replace(r"ē", "e")
text = text.replace(r"Ē", "E")
text = text.replace(r"ī", "i")
text = text.replace(r"Ī", "I")
text = text.replace(r"ō", "o")
text = text.replace(r"Ō", "O")
text = text.replace(r"ū", "u")
text = text.replace(r"Ū", "U")
return text
def swallow_angle_brackets(text: str) -> str:
"""
Disappear text in and surrounding an angle bracket
>>> text = " <O> mea dext<e>ra illa CICERO RUFO Quo<quo>. modo proficiscendum <in> tuis. deesse HS <c> quae metu <exagitatus>, furore <es>set consilium "
>>> swallow_angle_brackets(text)
'mea illa CICERO RUFO modo proficiscendum tuis. deesse HS quae metu furore consilium'
"""
text = swallow(text, ANGLE_BRACKETS)
# There are occasionally some unmatched ANGLE_BRACKETS
text = text.replace("<", " ")
text = text.replace(">", " ")
return text
def disappear_angle_brackets(text: str) -> str:
"""
Remove all angle brackets, keeping the surrounding text; no spaces are inserted
:param text: text with angle bracket
:return: text without angle brackets
"""
text = text.replace("<", "")
text = text.replace(">", "")
return text
def swallow_square_brackets(text: str) -> str:
"""
Swallow text inside angle brackets, without substituting a space.
:param text: text to clean
:return: text without square brackets and text inside removed
>>> swallow_square_brackets("qui aliquod institui[t] exemplum")
'qui aliquod institui exemplum'
>>> swallow_square_brackets("posthac tamen cum haec [tamen] quaeremus,")
'posthac tamen cum haec quaeremus,'
"""
return swallow(text, SQUARE_BRACKETS)
def swallow_obelized_words(text: str) -> str:
"""
Swallow obelized words; handles enclosed and words flagged on the left.
Considers plus signs and daggers as obelization markers
:param text: Text with obelized words
:return: clean text
>>> swallow_obelized_words("tu Fauonium †asinium† dicas")
'tu Fauonium dicas'
>>> swallow_obelized_words("tu Fauonium †asinium dicas")
'tu Fauonium dicas'
>>> swallow_obelized_words("meam +similitudinem+")
'meam'
>>> swallow_obelized_words("mea +ratio non habet" )
'mea non habet'
"""
text = swallow(text, OBELIZED_WORDS)
text = swallow(text, OBELIZED_WORD)
text = swallow(text, OBELIZED_PLUS_WORDS)
return swallow(text, OBELIZED_PLUS_WORD)
def disappear_round_brackets(text: str) -> str:
"""
Remove round brackets and keep the text intact
:param text: Text with round brackets.
:return: Clean text.
>>> disappear_round_brackets("trib(unus) mil(itum) leg(ionis) III")
'tribunus militum legionis III'
"""
text = text.replace("(", "")
return text.replace(")", "")
def swallow_editorial(text: str) -> str:
"""
Swallow common editorial morks
:param text: Text with editorial marks
:return: Clean text.
>>> swallow_editorial("{PRO P. QVINCTIO ORATIO} Quae res in civitate trib(unus) mil(itum) leg(ionis) III tu Fauonium †asinium† dicas meam +similitudinem+ mea +ratio non habet ... ")
'{PRO P. QVINCTIO ORATIO} Quae res in civitate tribunus militum legionis III tu Fauonium dicas meam mea non habet ...'
"""
text = disappear_round_brackets(text)
text = swallow_angle_brackets(text)
text = swallow_square_brackets(text)
text = swallow_obelized_words(text)
return text
def accept_editorial(text: str) -> str:
"""
Accept common editorial suggestions
:param text: Text with editorial suggestions
:return: clean text
>>> accept_editorial("{PRO P. QVINCTIO ORATIO} Quae res in civitate trib(unus) mil(itum) leg(ionis) III tu Fauonium †asinium† dicas meam +similitudinem+ mea +ratio non habet ... ")
'Quae res in civitate tribunus militum legionis III tu Fauonium dicas meam mea non habet '
"""
text = swallow_braces(text)
text = disappear_round_brackets(text)
text = swallow_obelized_words(text)
text = text.replace("[", "")
text = text.replace("]", "")
text = text.replace("<", "")
text = text.replace(">", "")
text = text.replace("...", " ")
return text
def truecase(word: str, case_counter: Dict[str, int]):
"""
Truecase a word using a Truecase dictionary
:param word: a word
:param case_counter: A counter; a dictionary of words/tokens and their relative frequency counts
:return: the truecased word
>>> case_counts ={"caesar": 1, "Caesar": 99}
>>> truecase('CAESAR', case_counts)
'Caesar'
"""
lcount = case_counter.get(word.lower(), 0)
ucount = case_counter.get(word.upper(), 0)
tcount = case_counter.get(word.title(), 0)
if lcount == 0 and ucount == 0 and tcount == 0:
return word #: we don't have enough information to change the case
if tcount > ucount and tcount > lcount:
return word.title()
if lcount > tcount and lcount > ucount:
return word.lower()
if ucount > tcount and ucount > lcount:
return word.upper()
return word
def normalize_lat(
text: str,
drop_accents: bool = False,
drop_macrons: bool = False,
jv_replacement: bool = False,
ligature_replacement: bool = False,
) -> str:
"""The function for all default Latin normalization.
>>> text = "canō Īuliī suspensám quăm aegérrume ĭndignu îs óccidentem frúges Julius Caesar. In vino veritas. mæd prœil"
>>> normalize_lat(text)
'canō Īuliī suspensám quăm aegérrume ĭndignu îs óccidentem frúges Julius Caesar. In vino veritas. mæd prœil'
>>> normalize_lat(text, drop_accents=True)
'canō Īuliī suspensam quăm aegerrume ĭndignu is óccidentem frúges Julius Caesar. In vino veritas. mæd prœil'
>>> normalize_lat(text, drop_accents=True, drop_macrons=True)
'cano Iulii suspensam quăm aegerrume ĭndignu is óccidentem frúges Julius Caesar. In vino veritas. mæd prœil'
>>> normalize_lat(text, drop_accents=True, drop_macrons=True, jv_replacement=True)
'cano Iulii suspensam quăm aegerrume ĭndignu is óccidentem frúges Iulius Caesar. In uino ueritas. mæd prœil'
>>> normalize_lat(text, drop_accents=True, drop_macrons=True, jv_replacement=True, ligature_replacement=True)
'cano Iulii suspensam quăm aegerrume ĭndignu is óccidentem frúges Iulius Caesar. In uino ueritas. maed proeil'
"""
text_cltk_normalized = cltk_normalize(text=text) # type: str
if drop_macrons:
text_cltk_normalized = remove_macrons(text_cltk_normalized)
if drop_accents:
text_cltk_normalized = remove_accents(text_cltk_normalized)
if jv_replacement:
text_cltk_normalized = JV_REPLACER.replace(text_cltk_normalized)
if ligature_replacement:
text_cltk_normalized = LIGATURE_REPLACER.replace(text_cltk_normalized)
return text_cltk_normalized
| {
"repo_name": "kylepjohnson/cltk",
"path": "src/cltk/alphabet/lat.py",
"copies": "4",
"size": "16990",
"license": "mit",
"hash": -7182769970856932000,
"line_mean": 34.2962184874,
"line_max": 339,
"alpha_frac": 0.6411523124,
"autogenerated": false,
"ratio": 2.914816099930604,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5555968412330604,
"avg_score": null,
"num_lines": null
} |
ALPHABET = "ATGC"
COMPLEMENT = {"A":"T","T":"A","G":"C","C":"G"}
def rc(dna):
ret = ""
for i in range(len(dna)):
ret = COMPLEMENT[dna[i]] + ret
return ret
def compute_distance(s1,s2):
if len(s2) == 0:
return 0
pos = 0
while pos != -1:
l = len(s1) - pos
if s1[pos:] == s2[:l]:
return len(s2) - l
pos = s1.find(s2[0],pos+1)
return len(s2)
if __name__ == '__main__':
dnas = [x.strip() for x in open('data/data.dat').readlines()]
for k in reversed(range(2,len(dnas[0])+1)):
mers = set()
for s in dnas:
for i in range(len(s)-k+1):
mers.add(s[i:i+k])
mers.add(rc(s[i:i+k]))
ans = None
stack = []
stack.append(([],set()))
while len(stack) > 0 and ans == None:
path,vs = stack.pop()
if len(path) == 0:
for mer in mers:
stack.append(([mer],set([mer])))
else:
mer = path[-1]
for a in ALPHABET:
nmer = mer[1:] + a
if nmer in mers and nmer != rc(mer):
if nmer == path[0]:
ans = list(path)
break
elif not nmer in vs:
stack.append((path + [nmer],vs.union(set([nmer]))))
if ans != None:
output = ans[0]
for i in range(1, len(ans)):
output += ans[i][-1]
output = output[:-(k-1)]
doutput = output + output
success = True
for dna in dnas:
if doutput.find(dna) == -1 and doutput.find(rc(dna)) == -1:
success = False
if success:
print output
break | {
"repo_name": "crf1111/Bio-Informatics-Learning",
"path": "Bio-StrongHold/src/Genome_Assembly_Using_Reads.py",
"copies": "1",
"size": "1882",
"license": "mit",
"hash": 51970870166750700,
"line_mean": 22.835443038,
"line_max": 79,
"alpha_frac": 0.398512221,
"autogenerated": false,
"ratio": 3.584761904761905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44832741257619046,
"avg_score": null,
"num_lines": null
} |
ALPHABET = [chr(i) for i in range(97, 123)]
ALPHABET_SIZE = len(ALPHABET)
class Mapping(dict):
def __init__(self):
self.inverted = {}
self.length = 0
def to_char(self, number):
index = number / ALPHABET_SIZE
first_char = ALPHABET[ index-1 ] if index else ''
last_char = ALPHABET[ (number % ALPHABET_SIZE)-1 ]
return first_char + last_char
def add(self, key):
mapped_to = self.inverted.get( key )
if mapped_to:
return mapped_to
self.length += 1
char = self.to_char(self.length)
self[ char ] = key
self.inverted[key] = char
return char
def uglify_json(json):
def uglify(value):
if isinstance(value, list):
uglify_list( value )
elif isinstance(value, dict):
uglify_dict( value )
def uglify_list(list_):
for item in list_:
uglify( item )
def uglify_dict(dict_):
for key,value in dict_.items():
dict_[ mapping.add(key) ] = dict_.pop( key )
uglify( value )
mapping = Mapping()
uglify( json )
return {
"Mapping": mapping,
"Objects": json
}
| {
"repo_name": "laginha/yard",
"path": "src/yard/resources/base/uglify.py",
"copies": "1",
"size": "1227",
"license": "mit",
"hash": 3426945364969444000,
"line_mean": 24.5625,
"line_max": 58,
"alpha_frac": 0.5297473513,
"autogenerated": false,
"ratio": 3.505714285714286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4535461637014286,
"avg_score": null,
"num_lines": null
} |
"""Alphabetic tokenizer"""
import re
from py_stringmatching import utils
from py_stringmatching.tokenizer.definition_tokenizer import DefinitionTokenizer
class AlphabeticTokenizer(DefinitionTokenizer):
"""Alphabetic tokenizer class.
Parameters:
return_set (boolean): flag to indicate whether to return a set of
tokens. (defaults to False)
"""
def __init__(self, return_set=False):
self.al_regex = re.compile('[a-zA-Z]+')
super(AlphabeticTokenizer, self).__init__(return_set)
def tokenize(self, input_string):
"""
Tokenizes input string into alphabetic tokens.
An alphabetic token is defined as consecutive sequence of alphabetic characters.
Args:
input_string (str): Input string
Returns:
Token list (list)
Raises:
TypeError : If the input is not a string
Examples:
>>> al_tok = AlphabeticTokenizer()
>>> al_tok.tokenize('data99science, data#integration.')
['data', 'science', 'data', 'integration']
>>> al_tok.tokenize('99')
[]
>>> al_tok = AlphabeticTokenizer(return_set=True)
>>> al_tok.tokenize('data99science, data#integration.')
['data', 'science', 'integration']
"""
utils.tok_check_for_none(input_string)
utils.tok_check_for_string_input(input_string)
token_list = list(filter(None, self.al_regex.findall(input_string)))
if self.return_set:
return utils.convert_bag_to_set(token_list)
return token_list
| {
"repo_name": "Anson-Doan/py_stringmatching",
"path": "py_stringmatching/tokenizer/alphabetic_tokenizer.py",
"copies": "1",
"size": "1672",
"license": "bsd-3-clause",
"hash": -1362291831278575900,
"line_mean": 29.962962963,
"line_max": 88,
"alpha_frac": 0.5927033493,
"autogenerated": false,
"ratio": 4.222222222222222,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5314925571522222,
"avg_score": null,
"num_lines": null
} |
ALPHABET = list("abcdefghijklmnopqrstuvwxyz")
def get_keys(dictionary, value):
result = []
for key in dictionary.keys():
if dictionary[key] == value:
result.append(key)
return result
def parse_line(line_data):
letter_counts = {}
parts = line_data.split("-")
name = ' '.join(parts[:-1])
sector_id = parts[-1].split("[")[0]
checksum = parts[-1].split("[")[1][:-1]
for char in ALPHABET:
letter_counts[char] = name.count(char)
sorted_letter_counts = sorted(list(set(letter_counts.values())))[::-1]
sorted_letter_counts.remove(0)
expected_checksum = ''
for i in sorted_letter_counts:
expected_checksum += ''.join(sorted(get_keys(letter_counts, i)))
expected_checksum = expected_checksum[:5]
result = int(sector_id) if expected_checksum == checksum else 0
return result
def solve(puzzle_input):
result = 0
lines = puzzle_input.split("\n")
for line in lines:
result += parse_line(line)
return result
def main():
f = open("puzzle_input")
puzzle_input = f.read()
f.close()
solution = solve(puzzle_input)
print(solution)
if __name__ == "__main__":
main()
| {
"repo_name": "Shifterovich/AoC",
"path": "2016/4/part1.py",
"copies": "1",
"size": "1212",
"license": "mit",
"hash": -8505388748147935000,
"line_mean": 20.6428571429,
"line_max": 74,
"alpha_frac": 0.603960396,
"autogenerated": false,
"ratio": 3.5232558139534884,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46272162099534886,
"avg_score": null,
"num_lines": null
} |
ALPHABET = list("abcdefghijklmnopqrstuvwxyz")
# https://github.com/Shifterovich/Crypto
def rot(string, rotInt, letters=ALPHABET): # To decrypt, use a negative number
length = len(letters)
return ''.join(
map(lambda letter: letter in letters and letters[(letters.index(letter) + rotInt) % length] or letter,
string))
def get_keys(dictionary, value):
result = []
for key in dictionary.keys():
if dictionary[key] == value:
result.append(key)
return result
def parse_line(line_data):
letter_counts = {}
parts = line_data.split("-")
name = ' '.join(parts[:-1])
sector_id = parts[-1].split("[")[0]
checksum = parts[-1].split("[")[1][:-1]
for char in ALPHABET:
letter_counts[char] = name.count(char)
sorted_letter_counts = sorted(list(set(letter_counts.values())))[::-1]
sorted_letter_counts.remove(0)
expected_checksum = ''
for i in sorted_letter_counts:
expected_checksum += ''.join(sorted(get_keys(letter_counts, i)))
expected_checksum = expected_checksum[:5]
not_decoy = True if expected_checksum == checksum else False
if not_decoy:
decrypted_name = [int(sector_id), rot(name.replace("-", " "), int(sector_id))]
return decrypted_name
return ['', '']
def solve(puzzle_input):
result = []
results = []
search_term = "North".lower()
lines = puzzle_input.split("\n")
for line in lines:
results.append(parse_line(line))
for i in results:
if search_term in i[1]:
result.append(i)
# `result` is a list that contains lists consisting of sector ID's and decrypted names.
# The answer is supposed to be the sector ID.
# Obviously, there should be only one answer, but to keep the code easy-to-tweak, I specify
# that it should be the *first* item's sector ID at the very end.
return result[0][0]
def main():
f = open("puzzle_input")
puzzle_input = f.read()
f.close()
solution = solve(puzzle_input)
print(solution)
if __name__ == "__main__":
main()
| {
"repo_name": "Shifterovich/AoC",
"path": "2016/4/part2.py",
"copies": "1",
"size": "2106",
"license": "mit",
"hash": 110855489001069800,
"line_mean": 25.325,
"line_max": 110,
"alpha_frac": 0.6177587844,
"autogenerated": false,
"ratio": 3.551433389544688,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4669192173944688,
"avg_score": null,
"num_lines": null
} |
alphabet = list(map(chr, range(97, 123)))
dataInput = "hxbxwxba"
def increment(a):
a_number = letter_number(a)
if a_number == 25:
return "a"
return alphabet[a_number + 1]
def full_increment(a):
should_increment = True
i = len(a) - 1
while should_increment:
new_letter = increment(a[i])
a_2 = list(a)
a_2[i] = new_letter
a = ''.join(a_2)
i -= 1
if not new_letter == "a":
should_increment = False
return a
# Test `a` against the rules
# 'Passwords must include one increasing straight of at least three letters'
def rule_one(a):
for i in range(len(a)-2):
letters = [letter_number(a[i]), letter_number(a[i+1]), letter_number(a[i+2])]
if letters[1] == letters[0] + 1 and letters[2] == letters[0] + 2:
return True
return False
# 'Passwords may not contain the letters i, o, or l'
def rule_two(a):
return not("i" in a or "o" in a or "l" in a)
# 'Passwords must contain at least two different, non-overlapping pairs of letters'
def rule_three(a):
pairs = 0
incrementer = 0
while incrementer < len(a) - 2:
incrementer += 1
if a[incrementer] == a[incrementer + 1]:
pairs += 1
incrementer += 1
return pairs >= 2
def all_rules(a):
return rule_one(a) and rule_two(a) and rule_three(a)
def letter_number(a):
return alphabet.index(a)
def next_password(a):
password = a
while not all_rules(password):
password = full_increment(password)
return password
print(next_password(dataInput)) | {
"repo_name": "gytdau/advent",
"path": "Day11/part1.py",
"copies": "1",
"size": "1608",
"license": "mit",
"hash": -5578565286086075000,
"line_mean": 22.3188405797,
"line_max": 85,
"alpha_frac": 0.5939054726,
"autogenerated": false,
"ratio": 3.2354124748490944,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4329317947449094,
"avg_score": null,
"num_lines": null
} |
alphabet = list(map(chr, range(97, 123)))
dataInput = "hxbxxzaa"
def increment(a):
a_number = letter_number(a)
if a_number == 25:
return "a"
return alphabet[a_number + 1]
def full_increment(a):
should_increment = True
i = len(a) - 1
while should_increment:
new_letter = increment(a[i])
a_2 = list(a)
a_2[i] = new_letter
a = ''.join(a_2)
i -= 1
if not new_letter == "a":
should_increment = False
return a
# Test `a` against the rules
# 'Passwords must include one increasing straight of at least three letters'
def rule_one(a):
for i in range(len(a)-2):
letters = [letter_number(a[i]), letter_number(a[i+1]), letter_number(a[i+2])]
if letters[1] == letters[0] + 1 and letters[2] == letters[0] + 2:
return True
return False
# 'Passwords may not contain the letters i, o, or l'
def rule_two(a):
return not("i" in a or "o" in a or "l" in a)
# 'Passwords must contain at least two different, non-overlapping pairs of letters'
def rule_three(a):
pairs = 0
incrementer = 0
while incrementer < len(a) - 2:
incrementer += 1
if a[incrementer] == a[incrementer + 1]:
pairs += 1
incrementer += 1
return pairs >= 2
def all_rules(a):
return rule_one(a) and rule_two(a) and rule_three(a)
def letter_number(a):
return alphabet.index(a)
def next_password(a):
password = a
while not all_rules(password):
password = full_increment(password)
return password
print(next_password(dataInput)) | {
"repo_name": "gytdau/advent",
"path": "Day11/part2.py",
"copies": "1",
"size": "1608",
"license": "mit",
"hash": -3561071318660239000,
"line_mean": 22.3188405797,
"line_max": 85,
"alpha_frac": 0.5939054726,
"autogenerated": false,
"ratio": 3.2354124748490944,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9302559538479749,
"avg_score": 0.005351681793869134,
"num_lines": 69
} |
ALPHABET = {
'0': 'zero',
'1': 'one',
'2': 'two',
'3': 'tree',
'4': 'fower',
'5': 'fife',
'6': 'six',
'7': 'seven',
'8': 'ait',
'9': 'niner',
'-': 'minus',
'.': 'and',
}
def aviation_numbers(number):
output = []
for char in str(number):
output.append(ALPHABET[char])
return ' '.join(output)
def aviation_numbers(number):
"""
>>> aviation_numbers(1969)
'one niner six niner'
>>> aviation_numbers(31337)
'tree one tree tree seven'
"""
return ' '.join(ALPHABET[x] for x in str(number))
if __name__ == '__main__':
out = aviation_numbers(1969) # 'one niner six niner'
print(out)
out = aviation_numbers(31337) # 'tree one tree tree seven'
print(out)
out = aviation_numbers(13.37) # 'one tree and tree seven'
print(out)
out = aviation_numbers(31.337) # 'tree one and tree tree seven'
print(out)
out = aviation_numbers(-1969) # 'minus one niner six niner'
print(out)
out = aviation_numbers(-31.337) # 'minus tree one and tree tree seven
print(out)
out = aviation_numbers(-49.35) # 'minus fower niner and tree fife'
print(out)
| {
"repo_name": "AstroTech/workshop-python",
"path": "functions/solution/functions_aviation_numbers.py",
"copies": "1",
"size": "1192",
"license": "mit",
"hash": -4713878762042674000,
"line_mean": 19.9122807018,
"line_max": 74,
"alpha_frac": 0.5553691275,
"autogenerated": false,
"ratio": 2.9215686274509802,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.397693775495098,
"avg_score": null,
"num_lines": null
} |
alphabet = {
"A": ("ABCDEFGHIJKLM", "NOPQRSTUVWXYZ"),
"B": ("ABCDEFGHIJKLM", "NOPQRSTUVWXYZ"),
"C": ("ABCDEFGHIJKLM", "ZNOPQRSTUVWXY"),
"D": ("ABCDEFGHIJKLM", "ZNOPQRSTUVWXY"),
"E": ("ABCDEFGHIJKLM", "YZNOPQRSTUVWX"),
"F": ("ABCDEFGHIJKLM", "YZNOPQRSTUVWX"),
"G": ("ABCDEFGHIJKLM", "XYZNOPQRSTUVW"),
"H": ("ABCDEFGHIJKLM", "XYZNOPQRSTUVW"),
"I": ("ABCDEFGHIJKLM", "WXYZNOPQRSTUV"),
"J": ("ABCDEFGHIJKLM", "WXYZNOPQRSTUV"),
"K": ("ABCDEFGHIJKLM", "VWXYZNOPQRSTU"),
"L": ("ABCDEFGHIJKLM", "VWXYZNOPQRSTU"),
"M": ("ABCDEFGHIJKLM", "UVWXYZNOPQRST"),
"N": ("ABCDEFGHIJKLM", "UVWXYZNOPQRST"),
"O": ("ABCDEFGHIJKLM", "TUVWXYZNOPQRS"),
"P": ("ABCDEFGHIJKLM", "TUVWXYZNOPQRS"),
"Q": ("ABCDEFGHIJKLM", "STUVWXYZNOPQR"),
"R": ("ABCDEFGHIJKLM", "STUVWXYZNOPQR"),
"S": ("ABCDEFGHIJKLM", "RSTUVWXYZNOPQ"),
"T": ("ABCDEFGHIJKLM", "RSTUVWXYZNOPQ"),
"U": ("ABCDEFGHIJKLM", "QRSTUVWXYZNOP"),
"V": ("ABCDEFGHIJKLM", "QRSTUVWXYZNOP"),
"W": ("ABCDEFGHIJKLM", "PQRSTUVWXYZNO"),
"X": ("ABCDEFGHIJKLM", "PQRSTUVWXYZNO"),
"Y": ("ABCDEFGHIJKLM", "OPQRSTUVWXYZN"),
"Z": ("ABCDEFGHIJKLM", "OPQRSTUVWXYZN"),
}
def generate_table(key: str) -> list[tuple[str, str]]:
"""
>>> generate_table('marvin') # doctest: +NORMALIZE_WHITESPACE
[('ABCDEFGHIJKLM', 'UVWXYZNOPQRST'), ('ABCDEFGHIJKLM', 'NOPQRSTUVWXYZ'),
('ABCDEFGHIJKLM', 'STUVWXYZNOPQR'), ('ABCDEFGHIJKLM', 'QRSTUVWXYZNOP'),
('ABCDEFGHIJKLM', 'WXYZNOPQRSTUV'), ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST')]
"""
return [alphabet[char] for char in key.upper()]
def encrypt(key: str, words: str) -> str:
"""
>>> encrypt('marvin', 'jessica')
'QRACRWU'
"""
cipher = ""
count = 0
table = generate_table(key)
for char in words.upper():
cipher += get_opponent(table[count], char)
count = (count + 1) % len(table)
return cipher
def decrypt(key: str, words: str) -> str:
"""
>>> decrypt('marvin', 'QRACRWU')
'JESSICA'
"""
return encrypt(key, words)
def get_position(table: tuple[str, str], char: str) -> tuple[int, int]:
"""
>>> get_position(generate_table('marvin')[0], 'M')
(0, 12)
"""
# `char` is either in the 0th row or the 1st row
row = 0 if char in table[0] else 1
col = table[row].index(char)
return row, col
def get_opponent(table: tuple[str, str], char: str) -> str:
"""
>>> get_opponent(generate_table('marvin')[0], 'M')
'T'
"""
row, col = get_position(table, char.upper())
if row == 1:
return table[0][col]
else:
return table[1][col] if row == 0 else char
if __name__ == "__main__":
import doctest
doctest.testmod() # Fist ensure that all our tests are passing...
"""
Demo:
Enter key: marvin
Enter text to encrypt: jessica
Encrypted: QRACRWU
Decrypted with key: JESSICA
"""
key = input("Enter key: ").strip()
text = input("Enter text to encrypt: ").strip()
cipher_text = encrypt(key, text)
print(f"Encrypted: {cipher_text}")
print(f"Decrypted with key: {decrypt(key, cipher_text)}")
| {
"repo_name": "TheAlgorithms/Python",
"path": "ciphers/porta_cipher.py",
"copies": "1",
"size": "3152",
"license": "mit",
"hash": -5530739248368731000,
"line_mean": 29.6019417476,
"line_max": 76,
"alpha_frac": 0.5881979695,
"autogenerated": false,
"ratio": 3.078125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41663229695,
"avg_score": null,
"num_lines": null
} |
alphabets = 'abcdefghijklmnopqrstuvwxyz'
test = '''g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj.'''
url = 'map'
def finder(letter):
'''Uses binary search to find letter position between 0 and 25.
If not found, returns the character back'''
lower = 0
upper = len(alphabets) - 1
found = False
letter = letter.lower()
while lower <= upper and not found:
mid = (upper + lower) // 2
if alphabets[mid] < letter:
lower = mid + 1
elif alphabets[mid] > letter:
upper = mid - 1
else:
return mid
return letter
def shifter(pos,shift):
'''Takes position of letter between 0 and 25 and shifts and returns letter'''
new_pos = (pos + shift) % 26
return alphabets[new_pos]
def string_shift(in_str, num):
'''Shifts entire string by amount specified'''
answer = ''
for l in in_str:
loc = finder(l)
'''If loc doesn't find the character, that character doesn't get shifted'''
if type(loc).__name__ != 'str':
ans = shifter(loc, num)
else:
ans = loc
answer += ans
return answer
if __name__ == '__main__':
print(test)
print(string_shift(test,2))
print(url)
print(string_shift(url,2))
| {
"repo_name": "DayGitH/Python-Challenges",
"path": "PythonChallenge/ch1.py",
"copies": "1",
"size": "1496",
"license": "mit",
"hash": -653520523575267200,
"line_mean": 28.3333333333,
"line_max": 216,
"alpha_frac": 0.5755347594,
"autogenerated": false,
"ratio": 3.4709976798143853,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4546532439214385,
"avg_score": null,
"num_lines": null
} |
alphabets = [chr(i) for i in range(32, 126)]
gear_one = [i for i in range(len(alphabets))]
gear_two = [i for i in range(len(alphabets))]
gear_three = [i for i in range(len(alphabets))]
reflector = [i for i in reversed(range(len(alphabets)))]
code = []
gear_one_pos = gear_two_pos = gear_three_pos = 0
def rotator():
global gear_one_pos
global gear_two_pos
global gear_three_pos
i = gear_one[0]
gear_one.append(i)
del gear_one[0]
gear_one_pos += 1
if gear_one_pos % int(len(alphabets)) == 0:
i = gear_two[0]
gear_two.append(i)
del gear_two[0]
gear_two_pos += 1
if gear_two_pos % int(len(alphabets)) == 0:
i = gear_three[0]
gear_three.append(i)
del gear_three[0]
gear_three_pos += 1
def engine(input_character):
target = alphabets.index(input_character)
target = gear_one[target]
target = gear_two[target]
target = gear_three[target]
target = reflector[target]
target = gear_three.index(target)
target = gear_two.index(target)
target = gear_one.index(target)
code.append(alphabets[target])
rotator()
if __name__ == "__main__":
decode = list(input("Type your message:\n"))
while True:
try:
token = int(input("Please set token:(must be only digits)\n"))
break
except Exception as error:
print(error)
for i in range(token):
rotator()
for j in decode:
engine(j)
print("\n" + "".join(code))
print(
f"\nYour Token is {token} please write it down.\nIf you want to decode "
f"this message again you should input same digits as token!"
)
| {
"repo_name": "TheAlgorithms/Python",
"path": "hashes/enigma_machine.py",
"copies": "1",
"size": "1705",
"license": "mit",
"hash": -4619744488591786000,
"line_mean": 27.8983050847,
"line_max": 80,
"alpha_frac": 0.5865102639,
"autogenerated": false,
"ratio": 3.105646630236794,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4192156894136794,
"avg_score": null,
"num_lines": null
} |
alphabet = "абвгдеєжзиіїйклмнопрстуфхцчшщьюя_"
rot11 = [
alphabet,
range(1, len(alphabet) + 1),
[5,7,17,23,28,26,20,21,2,19,13,32,12,9,16,11,10,25,24,8,4,27,3,31,15,14,33,18,1,29,22,30,6]
]
rot22 = [
range(1, len(alphabet) + 1),
[20,19,25,22,27,8,13,29,30,12,32,24,1,31,6,7,17,26,2,28,11,4,23,14,16,5,9,18,10,21,3,33,15],
alphabet
]
rot1 = [
alphabet,
range(1, len(alphabet) + 1),
[8, 15, 29, 23, 22, 7, 1, 28, 5, 32, 27, 9, 20, 14, 21, 19, 6, 13, 26, 17, 10, 4, 25, 12, 18, 3, 30, 33, 16, 11, 24, 31, 2]
]
rot2 = [
range(1, len(alphabet) + 1),
[22, 14, 26, 19, 8, 33, 7, 10, 5, 17, 21, 6, 15, 3, 1, 29, 4, 24, 25, 31, 11, 18, 32, 28, 20, 27, 12, 23, 9, 16, 30, 2, 13],
alphabet
]
def rotor_encrypt(word, start_shift):
print("sh s1 c1 p1 c2 p2 s2")
print("--------------------------")
sh = start_shift
res = ""
for letter in word:
s1 = letter
c1 = rot1[1][rot1[0].index(s1)]
p1 = rot1[1][rot1[2].index(c1)]
c2 = (p1 + sh) % len(alphabet)
if c2 == 0:
c2 = 33
p2 = rot2[0][rot2[1].index(c2)]
s2 = rot2[2][rot2[0].index(p2)]
print("%2d %s %2d %2d %2d %2d %s" % (sh, s1, c1, p1, c2, p2, s2))
sh += 1
res += s2
return res
def rotor_decrypt(word):
decryptions = []
for sh in range(len(alphabet)):
res = ""
cs = sh
for letter in word:
s2 = letter
p2 = rot2[0][rot2[2].index(s2)]
c2 = rot2[1][rot2[0].index(p2)]
p1 = (c2 - cs) % len(alphabet)
if p1 == 0:
p1 = 33
c1 = rot1[2][rot1[1].index(p1)]
s1 = rot1[0][rot1[1].index(c1)]
res += s1
cs += 1
decryptions.append("Shift: %2d, decryption: %s\n(run rotor_encrypt('%s', %d) for print checking)\n" % (sh, res, res, sh))
print("\n".join(decryptions))
# rotor_decrypt("рьіхьхьрттх")
rotor_encrypt('метро', 28)
# rotor_decrypt('о_ясо') | {
"repo_name": "Fly-Style/metaprog_univ",
"path": "Lab1/LearnPy/test.py",
"copies": "1",
"size": "1942",
"license": "mit",
"hash": -672354941539923800,
"line_mean": 27.223880597,
"line_max": 128,
"alpha_frac": 0.5201058201,
"autogenerated": false,
"ratio": 2.1140939597315436,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8004865579951683,
"avg_score": 0.025866839975971955,
"num_lines": 67
} |
"""Alpha combination models."""
import copy
import numpy as np
from mingle.utilities.simulation_utilities import combine_spectra
def alpha_model(alpha, rv, host, companion, limits, new_x=None):
"""Entangled spectrum model.
inputs:
spectrum_1
spectrum_2
alpha
rv - rv offset of spec2
xrange = location of points to return for spectrum. e.g. observation.xaxis.
should find better name.
returns:
Spectrum object
"""
# this copy solved my nan issue.
companion = copy.copy(companion)
host = copy.copy(host)
companion.doppler_shift(rv)
combined = combine_spectra(host, companion, alpha)
if np.any(new_x):
combined.spline_interpolate_to(new_x)
combined.wav_select(*limits)
# observation.wav_select(2100, 2200)
return combined
# aplha model 2 is from Chisqr_of_obersvation.py
# TO find why answer is all nans
def alpha_model2(alpha, rv, host, companion, limits, new_x=None):
"""Entangled spectrum model.
inputs:
spectrum_1
spectrum_2
alpha
rv - rv offset of spec2
xrange = location of points to return for spectrum. e.g. observation.xaxis.
should find better name.
returns:
Spectrum object
"""
# this copy solved my nan issue.
companion = copy.copy(companion)
host = copy.copy(host)
if np.all(np.isnan(companion.flux)):
print("companion spectrum is all Nans before RV shift")
if np.all(np.isnan(host.flux)):
print("Host spectrum is all Nans before combine")
companion.doppler_shift(rv)
if np.all(np.isnan(companion.flux)):
print("companion spectrum is all Nans after RV shift")
combined = combine_spectra(host, companion, alpha)
if np.all(np.isnan(combined.flux)):
print("Combined spectrum is all Nans before interpolation")
if np.any(new_x):
# print(new_x)
# combined.spline_interpolate_to(new_x)
combined.interpolate1d_to(new_x)
if np.all(np.isnan(combined.flux)):
print("Combined spectrum is all Nans after interpolation")
combined.wav_select(limits[0], limits[1])
# observation.wav_select(2100, 2200)
if np.all(np.isnan(combined.flux)):
print("Combined spectrum is all Nans after wav_select")
return combined
def double_shifted_alpha_model(alpha, rv1, rv2, host, companion, limits, new_x=None):
"""Entangled spectrum model.
inputs:
spectrum_1
spectrum_2
alpha
rv - rv offset of spec2
xrange = location of points to return for spectrum. e.g. observation.xaxis.
should find better name.
returns:
Spectrum object
"""
companion = copy.copy(companion)
host = copy.copy(host)
host.doppler_shift(rv1)
companion.doppler_shift(rv2)
combined = combine_spectra(host, companion, alpha)
if np.any(new_x):
combined.spline_interpolate_to(new_x)
combined.wav_select(limits[0], limits[1])
# observation.wav_select(2100, 2200)
return combined
| {
"repo_name": "jason-neal/companion_simulations",
"path": "obsolete/models/alpha_model.py",
"copies": "1",
"size": "2999",
"license": "mit",
"hash": -3965605230734573000,
"line_mean": 25.7767857143,
"line_max": 85,
"alpha_frac": 0.6692230744,
"autogenerated": false,
"ratio": 3.524089306698002,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.969279327810797,
"avg_score": 0.00010382059800664452,
"num_lines": 112
} |
"""alpha-Compositing of images.
Implements algorithms developed in
Porter, Thomas & Duff, Tom (1984), "Compositing Digital Images",
SIGGRAPH Comput. Graph.,
doi:10.1145/964965.808606
Smith, Alvy Ray (1995), "Image Compositing Fundamentals",
Microsoft Tech Memo 4,
http://www.cs.princeton.edu/courses/archive/fall00/cs426/papers/smith95a.pdf
For an example, open the Ipython notebook compositing.ipynb. Either
run locally by launching
ipython notebook
and then opening the compositing.ipynb file in the browser window that
just popped up, or open compositing.ipynb in the on-line Ipython
notebook viewer (nbviewer):
http://nbviewer.ipython.org/
More instructions on Ipython Notebook are in:
http://ipython.org/ipython-doc/1/interactive/notebook.html
For usage of the classes provided in this module, see docstrings of
classes Cube, FitsFile, Image, and Compositing.
This module composits RGBA images from an arbitrary number of
monochromatic (MC) 2d-intensity/brightness maps. For each MC image
'slice', any color can be specified separately, using standard
pylab/matplotlib notation, e.g. 'r' (red) or 'blue' or '0.5' (middle
gray) or (0.,1.,0.) (green in RGB notation), or #ff0000 (red in
hex). It is recommended to use "pure" RGB colors, i.e. r,g, or b, but
any valid color specification is accepted. Be aware though that using
"mixed" colors can result in unexpected final rendition of the
composite image. Experiment a bit!
For each slice, a separate value of alpha (opaqueness) can be
specified. However, in many cases it is advisable to leave the
assignment of alpha values to the code. In that case, the alpha of the
k-th layer (counted from the bottom layer) will be 1/k. This
dependence ensures that for the observer looking down through all the
N layers, each layer contributes 1/N-th of the total visibility.
1st 2nd ... n-th <-- k-th layer
| | | |
| | | |
|---|---|---|----> Observer
| | | |
| | | |
a1 a2 ... an <-- alpha of k-th layer (opaqueness)
Visible of k-th layer by the observer:
layer total transmitted part at observer's site
1 a1 * (1-a2) * ... * (1-n)
2 (1-a2) * ... * (1-n)
...
n (1-n)
If all layers are to contribute with equal strength to the total
signal (i.e. single-layer strength is 1/N), then it follows from the
above equations that a1 = 1, a2 = 1/2, a3 = 1/3, ..., an = 1/N. Thus,
the total transmitted contribution tr(k) from the k-th layer is a
harmonic series with
tr(k) = \sum_{n=k}^{n=N} 1/n.
However, if you want to bring out the features in a layer more, you
can specify the alphas for the individual layers in a list (see
docstring of Compositing.__call__).
"""
__author__ = "Robert Nikutta <robert.nikutta@gmail.com"
__version__ = "20200912"
import numpy as N
import astropy.io.fits as pf
import os
import matplotlib
class Cube():
def __init__(self,data,slices=(0,1,2)):
"""3d representation of image slices to blend.
Parameters
----------
data : 3d array
data.shape = (nx,ny,nz), with nx, ny the x,y number of
pixels, and nz a number of image 'slices'. This is the
original data set, and need not be identical to the one to
be used for blending / compositing (see 'slices').
slices : tuple of integers or None
Tuple of integers, all from range(nz), representing the
image slices from data to select for compositing. The
order of the indices will be the order in which the images
will be blended: first slice at the bottom, last slice on
top. Example: slices=(0,1,2) (default), or slices=(5,0,8),
etc. If None, all frames from 'data' will be used.
"""
self.data = data
self.update_slices(slices)
def update_slices(self,slices):
if slices is None:
slices = range(self.data.shape[0])
else:
if isinstance(slices,int):
slices = list(slices)
if not isinstance(slices,(list,tuple,N.ndarray)):
raise Exception("slices is neither None, nor an integer, nor a sequence of integers.")
self.slices = slices
self.__update_cube()
def __update_cube(self):
self.cube = self.data[self.slices,:,:]
class FitsFile():
def __init__(self,path):
if os.path.isfile(path):
try:
self.hdr = pf.getheader(path)
except IOError:
raise Exception("Opening FITS file %s failed." % path)
self.nw = self.hdr['NAXIS3']
self.ny = self.hdr['NAXIS2']
self.nx = self.hdr['NAXIS1']
self.data = pf.getdata(path)
else:
raise Exception("'path' is not a regular file or is missing.")
class Image():
def __init__(self,data,color,alpha=1.,normalize=True):
"""Represent a 2d monochromatic image as RGBA cube, with
pre-multiplied color channels.
Parameters
----------
data : 2d array
A monochromatic image / brightness map. Will be
alpha-blended with others
color : str or 3-tuple
Any valid pylab designation of a color to be used for this
2d image. E.g. 'r', 'blue', '0.5', '#ffb500', or (0.,1.,0)
(the latter is an RGB 3-tuple).
alpha : float
The alpha values (opaqueness) of this image. Between
0. (fully transparent) and 1.0 (fully opaque).
normalize : bool
If True, 'data' will be normalized to it's peak value
first.
"""
self.color = color
self.alpha = alpha
self.alpha_premultiplied = False
self.data = data
self.ny, self.nx = self.data.shape
if normalize == True:
self.data /= self.data.max()
self.construct_rgba()
def construct_rgb(self):
self.rgb_tuple = matplotlib.colors.colorConverter.to_rgb(self.color) # e.g. color='r'
# construct image as RGBA
self.rgb = N.ones((self.ny,self.nx,3),dtype=N.float64)
for channel in range(3):
self.rgb[:,:,channel] = self.data[...] * self.rgb_tuple[channel] # set red channel
def construct_rgba(self):
self.construct_rgb()
self.alpha_map = N.ones((self.nx,self.ny),dtype=N.float64) * self.alpha # same shape as a single RGB channel
self.rgba = N.dstack((self.rgb,self.alpha_map))
self.alpha_premultiply()
def alpha_premultiply(self):
"""alpha-premultiply the color channels."""
for channel in range(3):
self.rgba[:,:,channel] *= self.alpha_map
self.alpha_premultiplied = True # to be used in future versions
def __call__(self):
"""Nothing yet."""
pass
class Compositing():
def __init__(self,obj):
"""Initialize class with 'obj'.
obj has at least a Cube() instance in obj.cube. See class
ClumpyFile() or simply class Cube() for an example.
"""
self.obj = obj
def __setup(self):
"""Reset some init parameters, to allow for composite image
re-computation without having to re-compute the Cube()
instance.
"""
self.cube = self.obj.cube.copy()
self.nz, self.ny, self.nx = self.obj.cube.shape # important: retain the shape of the original cube
self.images = []
def __call__(self,colors,alphas=None,bgcolor='black',pre_multiply=True,normalize_cube=True,normalize_slices=True):
"""Compute the correctly alpha-blended composite image of N
monochromatic 2d image slices.
Parameters
----------
colors : list
Sequence of color specifications for all 2d images to be
alpha-blended. Any color specification valid in pylab is
permitted, e.g. 'r', 'green, '0.5', '#ffb500', etc., but
the use of "pure" RGB colors is recommended for more
predictable compositing. If self.obj.cube has N image
slices to blend, len(colors) must be N. Example:
colors=['r','g','b'].
alphas : list or None (default)
If None (default), the alphas (values of opaqueness) of
each layer will be computed automatically such that each
layer contributes 1/N to the final image. This is
recommended in most cases (see docstring of this
module). However, if you want to bring the features of
some layers out more, you can specify arbitrary alpha
values per layer in a list. All alpha values must be
between 0. (fully transparent) and 1. (fully opaque).
bgcolor : str
Allows to specify a custom background color for the final
image. Black is default and looks good, other colors may
look a bit washed-out, because the monochromatic
background image is also properly blended with the final
background-free composite.
pre_multiply : bool
If True (default), the images are pre-multiplied with
their alphas. This allows for easier/cleaner and more
intuitive computation of the compsite images. See the two
literature references for details.
normalize_cube : bool
If True (default), the cube of 2d images to be blended is
first normalized to the global peak. Recommended.
normalize_slices : bool
If True (dafault), every single 2d images slice is
normalized individually first. This is recommended to
bring out the features in each slice more, but if the
individual images in .cube are in some physical relation
to each other (e.g. the brightness maps of a sky object in
several wavebands), you may want to preserve the images'
relative strengths in the final image. In that case set
normalize_slices=False. Note that if the difference
between individual images is too large (e.g. logarithmic),
the faint slices may be invisible in the final image
(unless you tweak their individual alpha values).
Returns
-------
Nothing. The final composite image is in self.image, and is an
RGBA array. It can be displayed via:
pylab.imshow(self.image)
Examples
--------
Load and run the compositing.ipynb Ipython Notebook file.
"""
self.__setup()
self.colors = colors
if len(self.colors) != self.nz:
raise Exception("Number of provided colors must match number of images.")
if normalize_cube:
self.cube /= self.cube.max() # normalize to cube's maximum
# background frame
self.img_bg = Image(N.ones((self.nx,self.ny)),bgcolor,1.)
self.images = [self.img_bg]
# set alphas of all layers
if alphas == None:
alphas = [1/float(j+2) for j in range(self.nz)] # +2 because a background 'image' is added
# generate RGBA representations of all slices to be blended
for j in range(self.nz):
Image_ = Image(self.cube[j,:,:],self.colors[j],alpha=alphas[j],normalize=normalize_slices)
self.images.append(Image_)
# alpha-compositing
self.compose()
# output adjustment
self.stretch_contrast()
def over_premultiplied(self,A,B):
"""Composite image of 'B over A', if both A and B have
alpha-premultiplied colors.
The formula for the resulting composite image is (see
docstring of this module for literature references):
C = B + A - beta*A
where beta is the alpha-channel of the above image B
(i.e. beta = B[...,-1]). This formula is valid for both the 3
color channels and the alpha channel.
Parameters
----------
A : RGBA array, i.e. (nx,ny,4)
image below (with alpha-premultiplied colors)
B : RGBA array, i.e. (nx,ny,4)
image above (with alpha-premultiplied colors)
"""
# Currently the alpha is a single value per image, but in
# future we may want to compute images with variable
# alphas. Thus: array ops.
beta = B[...,-1]
aux = A.copy()
for j in range(4):
aux[:,:,j] *= beta[:,:]
C = B + A - aux
return C
def compose(self):
"""alpha-compositing of all images, using over_premultiplied().
Starts with the "bottom" image (first image), and
progressively blends the next higher image, until list of
images is empty.
"""
self.image = self.images[0].rgba
for j in range(1,len(self.images)):
self.image = self.over_premultiplied(self.image,self.images[j].rgba)
def stretch_contrast(self):
"""Stretch contrast of the final image.
Normalize all 3 color channels to the global intensity
maximum.
"""
norm = self.image[...,:-1].max()
self.image[...,:-1] /= norm
# HELPER FUNCTIONS
def simplecube(*images):
"""Easilty create a compositing object from a sequence of 2D arrays.
Parameters
----------
*args : seq of 2D arrays
Each array in the sequence is one (monochromatic image
slice). They all have to have the same shape. The number of
slices can be arbitrary.
Returns
-------
c : instance
Returns an instance of the Compositing class, which can then
be used to compute and display alpha-composited images of the
slice cube.
Example
-------
from numpy import zeros
from pylab import imshow
s = (2,2)
r = zeros(s)
g = zeros(s)
b = zeros(s)
y = zeros(s)
r[0,0] = g[1,0] = b[1,1] = y[0,1] = 1
c = simplecube(r,g,b,y)
c(['r','g','b','y'],bgcolor='k',normalize_slices=True)
imshow(c.image,interpolation='none')
"""
nimages = len(images)
arr = N.array(images)
# obj = Cube(arr,slices=range(nimages))
obj = Cube(arr,slices=list(range(nimages)))
c = Compositing(obj)
return c
| {
"repo_name": "rnikutta/compositing",
"path": "compositing.py",
"copies": "1",
"size": "14533",
"license": "bsd-3-clause",
"hash": -5802967842919820000,
"line_mean": 29.7251585624,
"line_max": 118,
"alpha_frac": 0.6014587491,
"autogenerated": false,
"ratio": 3.9151400862068964,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.998879186902098,
"avg_score": 0.0055613932571831675,
"num_lines": 473
} |
"""`AlphaIMS`, `AlphaAMS`"""
import numpy as np
from collections import OrderedDict
from .base import ProsthesisSystem
from .electrodes import SquareElectrode, DiskElectrode
from .electrode_arrays import ElectrodeGrid
class AlphaIMS(ProsthesisSystem):
"""Alpha-IMS
This class creates an Alpha-IMS array with 1500 photovoltaic pixels (each
50um in diameter) as described in [Stingl2013]_, and places it in the
subretinal space, such that the center of the array is located at (x,y,z),
given in microns, and the array is rotated by rotation angle ``rot``,
given in degrees.
The device consists of 1500 50um-wide square pixels, arranged on a 39x39
rectangular grid with 72um pixel pitch.
The array is oriented upright in the visual field, such that an
array with center (0,0) has the top three rows lie in the lower
retina (upper visual field).
An electrode can be addressed by name, row/column index, or integer index
(into the flattened array).
.. note::
Column order is reversed in a left-eye implant.
Parameters
----------
x/y/z : double
3D location of the center of the electrode array.
The coordinate system is centered over the fovea.
Positive ``x`` values move the electrode into the nasal retina.
Positive ``y`` values move the electrode into the superior retina.
Positive ``z`` values move the electrode away from the retina into the
vitreous humor (sometimes called electrode-retina distance).
``z`` can either be a list with 1500 entries or a scalar that is applied
to all electrodes.
rot : float
Rotation angle of the array (deg). Positive values denote
counter-clock-wise (CCW) rotations in the retinal coordinate
system.
eye : {'RE', 'LE'}, optional
Eye in which array is implanted.
preprocess : bool or callable, optional
Either True/False to indicate whether to execute the implant's default
preprocessing method whenever a new stimulus is assigned, or a custom
function (callable).
safe_mode : bool, optional
If safe mode is enabled, only charge-balanced stimuli are allowed.
Examples
--------
Create an Alpha-IMS array centered on the fovea, at 100um distance from
the retina, rotated counter-clockwise by 5 degrees:
>>> from pulse2percept.implants import AlphaIMS
>>> AlphaIMS(x=0, y=0, z=100, rot=5) # doctest: +NORMALIZE_WHITESPACE
AlphaIMS(earray=ElectrodeGrid, eye='RE', preprocess=True,
safe_mode=False, shape=(39, 39), stim=None)
Get access to the third electrode in the top row (by name or by row/column
index):
>>> alpha_ims = AlphaIMS(x=0, y=0, z=100, rot=0)
>>> alpha_ims['A3'] # doctest: +NORMALIZE_WHITESPACE
SquareElectrode(a=50.0, activated=True, name='A3',
x=-1224.0, y=-1368.0, z=100.0)
>>> alpha_ims[0, 2] # doctest: +NORMALIZE_WHITESPACE
SquareElectrode(a=50.0, activated=True, name='A3',
x=-1224.0, y=-1368.0, z=100.0)
"""
# Frozen class: User cannot add more class attributes
__slots__ = ('shape',)
def __init__(self, x=0, y=0, z=-100, rot=0, eye='RE', stim=None,
preprocess=True, safe_mode=False):
self.eye = eye
self.preprocess = preprocess
self.safe_mode = safe_mode
self.shape = (39, 39)
elec_width = 50.0 # um
e_spacing = 72.0 # um
# The user might provide a list of z values for each of the
# 378 resulting electrodes, not for the 22x19 initial ones.
# In this case, don't pass it to ElectrodeGrid, but overwrite
# the z values later:
overwrite_z = isinstance(z, (list, np.ndarray))
zarr = -100.0 if overwrite_z else z
self.earray = ElectrodeGrid(self.shape, e_spacing, x=x, y=y, z=zarr,
rot=rot, etype=SquareElectrode,
a=elec_width)
# Unfortunately, in the left eye the labeling of columns is reversed...
if eye == 'LE':
# FIXME: Would be better to have more flexibility in the naming
# convention. This is a quick-and-dirty fix:
names = self.earray.electrode_names
objects = self.earray.electrode_objects
names = np.array(names).reshape(self.earray.shape)
# Reverse column names:
for row in range(self.earray.shape[0]):
names[row] = names[row][::-1]
# Build a new ordered dict:
electrodes = OrderedDict([])
for name, obj in zip(names.ravel(), objects):
electrodes.update({name: obj})
# Assign the new ordered dict to earray:
self.earray._electrodes = electrodes
# Remove electrodes:
extra_elecs = ['AM39', 'AL39', 'AK39', 'AJ39', 'AI39', 'AH39', 'AG39',
'AF39', 'AE39', 'AD39', 'AC39',
'AM38', 'AL38', 'AK38', 'AJ38', 'AI38', 'AH38', 'AG38',
'AF38', 'AE38', 'AD38']
for elec in extra_elecs:
self.earray.remove_electrode(elec)
# Now that the superfluous electrodes have been deleted, adjust the
# z values:
if overwrite_z:
# Specify different height for every electrode in a list:
z_arr = np.asarray(z).flatten()
if z_arr.size != self.n_electrodes:
raise ValueError("If `z` is a list, it must have %d entries, "
"not %d." % (self.n_electrodes, z_arr.size))
for elec, z_elec in zip(self.earray.electrode_objects, z):
elec.z = z_elec
# Beware of race condition: Stim must be set last, because it requires
# indexing into self.electrodes:
self.stim = stim
def _pprint_params(self):
"""Return dict of class attributes to pretty-print"""
params = super()._pprint_params()
params.update({'shape': self.shape})
return params
class AlphaAMS(ProsthesisSystem):
"""Alpha-AMS
This class creates an Alpha-AMS array with 1600 photovoltaic pixels (each
30um in diameter) as described in [Stingl2017]_, and places it in the
subretinal space, such that the center of the array is located at (x,y,z),
given in microns, and the array is rotated by rotation angle ``rot``,
given in degrees.
The device consists of 1600 30um-wide round pixels, arranged on a 40x40
rectangular grid with 70um pixel pitch.
The array is oriented upright in the visual field, such that an
array with center (0,0) has the top three rows lie in the lower
retina (upper visual field), as shown below:
An electrode can be addressed by name, row/column index, or integer index
(into the flattened array).
.. note::
Column order is reversed in a left-eye implant.
Parameters
----------
x/y/z : double
3D location of the center of the electrode array.
The coordinate system is centered over the fovea.
Positive ``x`` values move the electrode into the nasal retina.
Positive ``y`` values move the electrode into the superior retina.
Positive ``z`` values move the electrode away from the retina into the
vitreous humor (sometimes called electrode-retina distance).
``z`` can either be a list with 1600 entries or a scalar that is applied
to all electrodes.
rot : float
Rotation angle of the array (deg). Positive values denote
counter-clock-wise (CCW) rotations in the retinal coordinate
system.
eye : {'RE', 'LE'}, optional
Eye in which array is implanted.
preprocess : bool or callable, optional
Either True/False to indicate whether to execute the implant's default
preprocessing method whenever a new stimulus is assigned, or a custom
function (callable).
safe_mode : bool, optional
If safe mode is enabled, only charge-balanced stimuli are allowed.
Examples
--------
Create an AlphaAMS array centered on the fovea, at 100um distance from
the retina, rotated counter-clockwise by 5 degrees:
>>> from pulse2percept.implants import AlphaAMS
>>> AlphaAMS(x=0, y=0, z=100, rot=5) # doctest: +NORMALIZE_WHITESPACE
AlphaAMS(earray=ElectrodeGrid, eye='RE', preprocess=True,
safe_mode=False, shape=(40, 40), stim=None)
Get access to the third electrode in the top row (by name or by row/column
index):
>>> alpha_ims = AlphaAMS(x=0, y=0, z=100, rot=0)
>>> alpha_ims['A3'] # doctest: +NORMALIZE_WHITESPACE
DiskElectrode(activated=True, name='A3', r=15.0, x=-1225.0,
y=-1365.0, z=100.0)
>>> alpha_ims[0, 2] # doctest: +NORMALIZE_WHITESPACE
DiskElectrode(activated=True, name='A3', r=15.0, x=-1225.0,
y=-1365.0, z=100.0)
"""
# Frozen class: User cannot add more class attributes
__slots__ = ('shape',)
def __init__(self, x=0, y=0, z=0, rot=0, eye='RE', stim=None,
preprocess=True, safe_mode=False):
self.eye = eye
self.preprocess = preprocess
self.safe_mode = safe_mode
self.shape = (40, 40)
elec_radius = 15.0
e_spacing = 70.0 # um
self.earray = ElectrodeGrid(self.shape, e_spacing, x=x, y=y, z=z,
rot=rot, etype=DiskElectrode,
r=elec_radius)
# Beware of race condition: Stim must be set last, because it requires
# indexing into self.electrodes:
self.stim = stim
# Set left/right eye:
# Unfortunately, in the left eye the labeling of columns is reversed...
if eye == 'LE':
# FIXME: Would be better to have more flexibility in the naming
# convention. This is a quick-and-dirty fix:
names = self.earray.electrode_names
objects = self.earray.electrode_objects
names = np.array(names).reshape(self.earray.shape)
# Reverse column names:
for row in range(self.earray.shape[0]):
names[row] = names[row][::-1]
# Build a new ordered dict:
electrodes = OrderedDict([])
for name, obj in zip(names.ravel(), objects):
electrodes.update({name: obj})
# Assign the new ordered dict to earray:
self.earray._electrodes = electrodes
def _pprint_params(self):
"""Return dict of class attributes to pretty-print"""
params = super()._pprint_params()
params.update({'shape': self.shape})
return params
| {
"repo_name": "mbeyeler/pulse2percept",
"path": "pulse2percept/implants/alpha.py",
"copies": "1",
"size": "10798",
"license": "bsd-3-clause",
"hash": -6343242073722403000,
"line_mean": 40.6911196911,
"line_max": 80,
"alpha_frac": 0.6146508613,
"autogenerated": false,
"ratio": 3.6966792194453952,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9810853413602061,
"avg_score": 0.00009533342866676199,
"num_lines": 259
} |
alpha=int(raw_input("pick a number and press enter "))
beta=int(raw_input("pick another number and press enter "))
#print alpha, beta #for testing purposes, might be added back later to verify correct numbers
end= max(alpha+1, beta+1) #because the index starts at zero it will end at alpha/beta not one number higher
bucket = [];
primes = [];
for i in range(0,end): #creates an array of zeroes length end
bucket.append(0);
#print bucket
j=2
while j**2<=end:
for i in range(j+j, len(bucket), j):
bucket[i]=1
#print bucket #soley for testing each step of the sieve
j=j+1
bucket[0]=1 #this line tells the sieve that 0 is not prime
bucket[1]=1 #this line tells the sieve that 1 is not a prime
#print bucket #for testing the sieve after it has finished (will give you an array of 0s and 1s where the 0s are primes
for i in range(0,len(bucket)): #this for loop puts the prime numbers in bucket into the array named primes
if bucket[i]==0:
primes.append(i);
i=i+1
#print primes #this is for testing if you actually created an array of just prime numbers
def primeindex(gamma, kappa):
GCD=1
LCM=1
i=0
while gamma>=primes[i] or kappa>=primes[i]:
if gamma%primes[i]==0 and kappa%primes[i]==0:
gamma=gamma/primes[i]
kappa=kappa/primes[i]
GCD=GCD*primes[i]
LCM=LCM*primes[i]
#print "alpha divided by ", primes[i], "is ", gamma, "beta divided by ", primes[i], "is ", kappa, "and the greatest common divisor is ", GCD
elif gamma%primes[i]==0 and kappa%primes[i]!=0:
gamma=gamma/primes[i]
LCM=LCM*primes[i]
#print "alpha divided by ", primes[i], "is ", gamma, "beta divided by ", primes[i], "is ", kappa, "and the least common multiple is ", LCM
elif gamma%primes[i]!=0 and kappa%primes[i]==0:
kappa=kappa/primes[i]
LCM=LCM*primes[i]
#print "alpha divided by ", primes[i], "is ", gamma, "beta divided by ", primes[i], "is ", kappa, "and the least common multiple is ", LCM
else:
i=i+1
return GCD, LCM
GCD,LCM=primeindex(alpha, beta)
print "gcd is ",GCD, "lcm is ",LCM
| {
"repo_name": "Greh/coconuts",
"path": "LCMandGCD.py",
"copies": "1",
"size": "2050",
"license": "mit",
"hash": -1933517286368731100,
"line_mean": 39.1960784314,
"line_max": 143,
"alpha_frac": 0.68,
"autogenerated": false,
"ratio": 2.7777777777777777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8041453760582248,
"avg_score": 0.183264803439106,
"num_lines": 51
} |
ALPHANUMERICAL_DIGITS= '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
BIN_DIGITS= ALPHANUMERICAL_DIGITS[0:2]
OCT_DIGITS= ALPHANUMERICAL_DIGITS[0:8]
DECIMAL_DIGITS= ALPHANUMERICAL_DIGITS[0:10]
HEX_DIGITS= ALPHANUMERICAL_DIGITS[0:16]
ALL_LOWER= ALPHANUMERICAL_DIGITS[10:36]
ALL_UPPER= ALPHANUMERICAL_DIGITS[36:]
ALPHA_DIGITS= ALPHANUMERICAL_DIGITS[10:]
def convert_from(input, source):
"""
Convert from one base to digit
:param input: String
:param source: all digits for a base
:return: Int
"""
base = len(source)
result = 0
for power, i in enumerate(input[::-1]):
result +=source.index(i) * pow(base, power)
return result
def convert_to(input, target):
"""
Convert from integer to target-base string representation
:param input: Integer
:param target: all digits for a base
:return: string representation in Base n
"""
result = ''
base = len(target)
while True:
result = '%s%s' % (str(target[input % base]),result)
input = input // base
if not (input > 0):
break
return result
def convert(input, source, target):
"""
Convert from one base to another
:param input: all base digits:from
:param source: source chars
:param target: all base digits:to
:return: results as string
"""
decimal = convert_from(input = input, source=source)
return convert_to(input=decimal,target=target)
import unittest
class TestFirst(unittest.TestCase):
def testFirst(self):
test = self
Test = self
test.assert_equals = Test.assertEqual
Test.assert_equals = Test.assertEqual
test.assert_equals(ALL_LOWER, 'abcdefghijklmnopqrstuvwxyz')
test.assert_equals(ALL_UPPER, 'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
test.assert_equals(ALPHA_DIGITS, 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
test.assert_equals(convert("15", DECIMAL_DIGITS, BIN_DIGITS), '1111', '"15" dec -> bin')
test.assert_equals(convert_from("15", DECIMAL_DIGITS), 15, )
test.assert_equals(convert_from("15", OCT_DIGITS), 13, )
test.assert_equals(convert_from("10", BIN_DIGITS), 2, )
test.assert_equals(convert_from("110", BIN_DIGITS), 6, )
test.assert_equals(convert_from("100", BIN_DIGITS), 4, )
test.assert_equals(convert_to(4, BIN_DIGITS), "100")
test.assert_equals(convert_to(8, OCT_DIGITS), "10")
test.assert_equals(convert_to(9, OCT_DIGITS), "11")
test.assert_equals(convert_to(10, OCT_DIGITS), "12")
test.assert_equals(convert_to(11, OCT_DIGITS), "13")
test.assert_equals(convert("15", DECIMAL_DIGITS, OCT_DIGITS), '17', '"15" dec -> oct')
test.assert_equals(convert("1010", BIN_DIGITS, DECIMAL_DIGITS), '10', '"1010" bin -> dec')
test.assert_equals(convert("1010", BIN_DIGITS, HEX_DIGITS), 'a', '"1010" bin -> hex')
test.assert_equals(convert("0", DECIMAL_DIGITS, ALPHA_DIGITS), 'a', '"0" dec -> alpha')
test.assert_equals(convert("27", DECIMAL_DIGITS, ALL_LOWER), 'bb', '"27" dec -> alpha_lower')
test.assert_equals(convert("hello", ALL_LOWER, HEX_DIGITS), '320048', '"hello" alpha_lower -> hex')
test.assert_equals(convert("SAME", ALL_UPPER, ALL_UPPER), 'SAME', '"SAME" alpha_upper -> alpha_upper')
| {
"repo_name": "julzhk/codekata",
"path": "BaseConversion.py",
"copies": "1",
"size": "3342",
"license": "mit",
"hash": 922105167746660900,
"line_mean": 39.265060241,
"line_max": 110,
"alpha_frac": 0.6529024536,
"autogenerated": false,
"ratio": 3.3220675944333995,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4474970048033399,
"avg_score": null,
"num_lines": null
} |
"""Alphanumeric tokenizer"""
import re
from py_stringmatching import utils
from py_stringmatching.tokenizer.definition_tokenizer import DefinitionTokenizer
class AlphanumericTokenizer(DefinitionTokenizer):
"""Alphanumeric tokenizer class.
Parameters:
return_set (boolean): flag to indicate whether to return a set of
tokens. (defaults to False)
"""
def __init__(self, return_set=False):
self.alnum_regex = re.compile('[a-zA-Z0-9]+')
super(AlphanumericTokenizer, self).__init__(return_set)
def tokenize(self, input_string):
"""
Tokenizes input string into alphanumeric tokens.
An alphanumeric token is defined as consecutive sequence of alphanumeric characters.
Args:
input_string (str): Input string
Returns:
Token list (list)
Raises:
TypeError : If the input is not a string
Examples:
>>> alnum_tok = AlphanumericTokenizer()
>>> alnum_tok.tokenize('data9,(science), data9#.(integration).88')
['data9', 'science', 'data9', 'integration', '88']
>>> alnum_tok.tokenize('#.&')
[]
>>> alnum_tok = AlphanumericTokenizer(return_set=True)
>>> alnum_tok.tokenize('data9,(science), data9#.(integration).88')
['data9', 'science', 'integration', '88']
"""
utils.tok_check_for_none(input_string)
utils.tok_check_for_string_input(input_string)
token_list = list(filter(None, self.alnum_regex.findall(input_string)))
if self.return_set:
return utils.convert_bag_to_set(token_list)
return token_list
| {
"repo_name": "Anson-Doan/py_stringmatching",
"path": "py_stringmatching/tokenizer/alphanumeric_tokenizer.py",
"copies": "1",
"size": "1746",
"license": "bsd-3-clause",
"hash": -6686849175719878000,
"line_mean": 31.3333333333,
"line_max": 92,
"alpha_frac": 0.5962199313,
"autogenerated": false,
"ratio": 4.197115384615385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5293335315915385,
"avg_score": null,
"num_lines": null
} |
# Alpha O. Sall
# 03/24/2014
from flask import Flask, request, Response
import json
import requests
from array import *
from Log import Log
def getCephRestApiUrl(request):
# discover ceph-rest-api URL
return request.url_root.replace("inkscopeCtrl","ceph-rest-api")
class Pools:
"""docstring for pools"""
def __init__(self):
pass
def newpool_attribute(self, jsonform):
jsondata = json.loads(jsonform)
self.name = jsondata['pool_name']
self.pg_num = jsondata['pg_num']
self.pgp_num = jsondata['pg_placement_num']
self.type = jsondata['type']
self.size = jsondata['size']
self.min_size = jsondata['min_size']
self.crash_replay_interval = jsondata['crash_replay_interval']
self.crush_ruleset = jsondata['crush_ruleset']
self.erasure_code_profile = jsondata['erasure_code_profile']
self.quota_max_objects = jsondata['quota_max_objects']
self.quota_max_bytes = jsondata['quota_max_bytes']
def savedpool_attribute(self, ind, jsonfile):
r = jsonfile.json()
self.name = r['output']['pools'][ind]['pool_name']
self.pg_num = r['output']['pools'][ind]['pg_num']
self.pgp_num = r['output']['pools'][ind]['pg_placement_num']
self.type = r['output']['pools'][ind]['type']
self.size = r['output']['pools'][ind]['size']
self.min_size = r['output']['pools'][ind]['min_size']
self.crash_replay_interval = r['output']['pools'][ind]['crash_replay_interval']
self.crush_ruleset = r['output']['pools'][ind]['crush_ruleset']
self.erasure_code_profile = r['output']['pools'][ind]['erasure_code_profile']
self.quota_max_objects = r['output']['pools'][ind]['quota_max_objects']
self.quota_max_bytes = r['output']['pools'][ind]['quota_max_bytes']
def register(self):
uri = self.cephRestApiUrl+'osd/pool/create?pool='+self.name+'&pool_type='+self.type+'&pg_num='+str(self.pg_num)+'&pgp_num='+str(self.pgp_num)
if self.erasure_code_profile != "":
uri += '&erasure_code_profile='+self.erasure_code_profile
register_pool = requests.put(uri)
# if newpool.register().status_code != 200:
# # return 'Error '+str(r.status_code)+' on creating pools'
# else:
def getindice(id, jsondata):
r = jsondata.content
r = json.loads(r)
mypoolsnum = array('i',[])
for i in r['output']['pools']:
mypoolsnum.append(i[u'pool'])
if id not in mypoolsnum:
return "Pool not found"
else:
for i in range(len(mypoolsnum)):
if mypoolsnum[i]==id:
id=i
return id
def getpoolname(ind, jsondata):
r = jsondata.json()
poolname = r['output']['pools'][ind]['pool_name']
return str(poolname)
def checkpool(pool_id, jsondata):
skeleton = {'status':'','output':{}}
if isinstance(pool_id, int):
ind = getindice(pool_id, jsondata)
id = ind
if id == "Pool id not found":
skeleton['status'] = id
result = json.dumps(skeleton)
return Response(result, mimetype='application/json')
else:
skeleton['status'] = 'OK'
result = json.dumps(skeleton)
return Response(result, mimetype='application/json')
if isinstance(pool_id, str):
r = jsondata.content
r = json.loads(r)
mypoolsname = array('i',[])
for i in r['output']:
mypoolsname.append(i[u'poolname'])
if pool_id not in mypoolsname:
skeleton['status'] = 'OK'
result = json.dumps(skeleton)
return Response(result, mimetype='application/json')
else:
skeleton['status'] = pool_id+'already exits. Please enter a new pool name'
result = json.dumps(skeleton)
return Response(result, mimetype='application/json')
def geterrors(url, methods):
try:
if methods == 'GET':
r = requests.get(url)
else:
r = requests.put(url)
except HTTPError, e:
return 'Error '+str(r.status_code)
else:
return 'ok'
# @app.route('/pools/', methods=['GET','POST'])
# @app.route('/pools/<int:id>', methods=['GET','DELETE','PUT'])
def pool_manage(id):
cephRestApiUrl = getCephRestApiUrl(request);
if request.method == 'GET':
if id == None:
r = requests.get(cephRestApiUrl+'osd/lspools.json')
if r.status_code != 200:
return Response(r.raise_for_status())
else:
r = r.content
return Response(r, mimetype='application/json')
else:
data = requests.get(cephRestApiUrl+'osd/dump.json')
if data.status_code != 200:
return 'Error '+str(data.status_code)+' on the request getting pools'
else:
ind = getindice(id, data)
id = ind
skeleton = {'status':'','output':{}}
if id == "Pool id not found":
skeleton['status'] = id
result = json.dumps(skeleton)
return Response(result, mimetype='application/json')
else:
r = data.content
r = json.loads(r)
#r = data.json()
skeleton['status'] = r['status']
skeleton['output'] = r['output']['pools'][id]
result = json.dumps(skeleton)
return Response(result, mimetype='application/json')
elif request.method =='POST':
jsonform = request.form['json']
newpool = Pools()
newpool.cephRestApiUrl = getCephRestApiUrl(request)
newpool.newpool_attribute(jsonform)
newpool.register()
jsondata = requests.get(cephRestApiUrl+'osd/dump.json')
r = jsondata.content
r = json.loads(r)
#r = jsondata.json()
nbpool = len(r['output']['pools'])
poolcreated = Pools()
poolcreated.savedpool_attribute(nbpool-1, jsondata)
# set pool parameter
var_name= ['size', 'min_size', 'crash_replay_interval','crush_ruleset']
param_to_set_list = [newpool.size, newpool.min_size, newpool.crash_replay_interval, newpool.crush_ruleset]
default_param_list = [poolcreated.size, poolcreated.min_size, poolcreated.crash_replay_interval, poolcreated.crush_ruleset]
for i in range(len(default_param_list)):
if param_to_set_list[i] != default_param_list[i]:
r = requests.put(cephRestApiUrl+'osd/pool/set?pool='+str(poolcreated.name)+'&var='+var_name[i]+'&val='+str(param_to_set_list[i]))
else:
pass
# set object or byte limit on pool
field_name = ['max_objects','max_bytes']
param_to_set = [newpool.quota_max_objects, newpool.quota_max_bytes]
default_param = [poolcreated.quota_max_objects, poolcreated.quota_max_bytes]
for i in range(len(default_param)):
if param_to_set[i] != default_param[i]:
r = requests.put(cephRestApiUrl+'osd/pool/set-quota?pool='+str(poolcreated.name)+'&field='+field_name[i]+'&val='+str(param_to_set[i]))
else:
pass
return 'None'
elif request.method == 'DELETE':
data = requests.get(cephRestApiUrl+'osd/dump.json')
# if data.status_code != 200:
# return 'Error '+str(r.status_code)+' on the request getting pools'
# else:
#r = data.json()
r = data.content
r = json.loads(r)
# data = requests.get('http://localhost:8080/ceph-rest-api/osd/dump.json')
ind = getindice(id, data)
id = ind
poolname = r['output']['pools'][id]['pool_name']
poolname = str(poolname)
delete_request = requests.put(cephRestApiUrl+'osd/pool/delete?pool='+poolname+'&pool2='+poolname+'&sure=--yes-i-really-really-mean-it')
return str(delete_request.status_code)
else:
jsonform = request.form['json']
newpool = Pools()
newpool.newpool_attribute(jsonform)
data = requests.get(cephRestApiUrl+'osd/dump.json')
if data.status_code != 200:
return 'Error '+str(data.status_code)+' on the request getting pools'
else:
#r = data.json()
r = data.content
r = json.loads(r)
ind = getindice(id, data)
savedpool = Pools()
savedpool.savedpool_attribute(ind, data)
# rename the poolname
if str(newpool.name) != str(savedpool.name):
r = requests.put(cephRestApiUrl+'osd/pool/rename?srcpool='+str(savedpool.name)+'&destpool='+str(newpool.name))
# set pool parameter
var_name= ['size', 'min_size', 'crash_replay_interval','pg_num','pgp_num','crush_ruleset']
param_to_set_list = [newpool.size, newpool.min_size, newpool.crash_replay_interval, newpool.pg_num, newpool.pgp_num, newpool.crush_ruleset]
default_param_list = [savedpool.size, savedpool.min_size, savedpool.crash_replay_interval, savedpool.pg_num, savedpool.pgp_num, savedpool.crush_ruleset]
for i in range(len(default_param_list)):
if param_to_set_list[i] != default_param_list[i]:
r = requests.put(cephRestApiUrl+'osd/pool/set?pool='+str(newpool.name)+'&var='+var_name[i]+'&val='+str(param_to_set_list[i]))
else:
pass
# set object or byte limit on pool
field_name = ['max_objects','max_bytes']
param_to_set = [newpool.quota_max_objects, newpool.quota_max_bytes]
default_param = [savedpool.quota_max_objects, savedpool.quota_max_bytes]
for i in range(len(default_param)):
if param_to_set[i] != default_param[i]:
r = requests.put(cephRestApiUrl+'osd/pool/set-quota?pool='+str(newpool.name)+'&field='+field_name[i]+'&val='+str(param_to_set[i]))
else:
pass
return str(r.status_code)
# @app.route('/pools/<int:id>/snapshot', methods=['POST'])
def makesnapshot(id):
cephRestApiUrl = getCephRestApiUrl(request);
data = requests.get(cephRestApiUrl+'osd/dump.json')
#r = data.json()
r = data.content
r = json.loads(r)
ind = getindice(id,data)
id = ind
poolname = r['output']['pools'][id]['pool_name']
jsondata = request.form['json']
jsondata = json.loads(jsondata)
snap = jsondata['snapshot_name']
r = requests.put(cephRestApiUrl+'osd/pool/mksnap?pool='+str(poolname)+'&snap='+str(snap))
return str(r.status_code)
# @app.route('/pools/<int:id>/snapshot/<namesnapshot>', methods=['DELETE'])
def removesnapshot(id, namesnapshot):
cephRestApiUrl = getCephRestApiUrl(request);
data = requests.get(cephRestApiUrl+'osd/dump.json')
#r = data.json()
r = data.content
r = json.loads(r)
ind = getindice(id,data)
id = ind
poolname = r['output']['pools'][id]['pool_name']
try:
r = requests.put(cephRestApiUrl+'osd/pool/rmsnap?pool='+str(poolname)+'&snap='+str(namesnapshot))
except HTTPException, e:
return e
else:
return r.content
| {
"repo_name": "abrefort/inkscope-debian",
"path": "inkscopeCtrl/poolsCtrl.py",
"copies": "1",
"size": "11366",
"license": "apache-2.0",
"hash": -2182509588389155600,
"line_mean": 36.3881578947,
"line_max": 164,
"alpha_frac": 0.5793594932,
"autogenerated": false,
"ratio": 3.5496564647095563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9605202722903601,
"avg_score": 0.004762647001191242,
"num_lines": 304
} |
# Alpha O. Sall
# 03/24/2014
from flask import Flask, request, Response, render_template
app = Flask(__name__)#,template_folder='/var/www/inkscope/inkscopeAdm/')
import requests
from array import *
import sys
from urllib2 import HTTPError
import json
from bson.json_util import dumps
import time
import mongoJuiceCore
import poolsCtrl
try:
import poolsCtrlSalt
except:
pass
import osdsCtrl
from S3Ctrl import S3Ctrl, S3Error
from Log import Log
#Added for S3 objects management
from S3ObjectCtrl import *
# Load configuration from file
configfile = "/opt/inkscope/etc/inkscope.conf"
datasource = open(configfile, "r")
conf = json.load(datasource)
datasource.close()
try:
minion = conf.get("minion")
except:
pass
#
# mongoDB query facility
#
@app.route('/<db>/<collection>', methods=['GET', 'POST'])
def find(db, collection):
return mongoJuiceCore.find(conf, db, collection)
@app.route('/<db>', methods=['POST'])
def full(db):
return mongoJuiceCore.full(conf, db)
#
# Pools management
#
## Ceph Rest API
@app.route('/pools/', methods=['GET','POST'])
@app.route('/pools/<int:id>', methods=['GET','DELETE','PUT'])
def pool_manage(id=None):
return poolsCtrl.pool_manage(id)
@app.route('/pools/<int:id>/snapshot', methods=['POST'])
def makesnapshot(id):
return poolsCtrl.makesnapshot(id)
@app.route('/pools/<int:id>/snapshot/<namesnapshot>', methods=['DELETE'])
def removesnapshot(id, namesnapshot):
return poolsCtrl.removesnapshot(id, namesnapshot)
## Rest API with Salt
try:
@app.route('/poolsalt/', methods=['GET','POST'])
@app.route('/poolsalt/<int:id>', methods=['GET','DELETE','PUT'])
def pool_manage_salt(id=None):
return poolsCtrlSalt.pool_manage_salt(id, minion)
except:
pass
try:
@app.route('/poolsalt/<int:id>/snapshot', methods=['POST'])
def makesnapshot_salt(id):
return poolsCtrl.makesnapshot(id, minion)
except:
pass
try:
@app.route('/poolsalt/<int:id>/snapshot/<namesnapshot>', methods=['DELETE'])
def removesnapshot_salt(id, namesnapshot):
return poolsCtrl.removesnapshot(id, namesnapshot, minion)
except:
pass
#
# Osds management
#
@app.route('/osds', methods=['PUT'])
def osds_manage(id=None):
return osdsCtrl.osds_manage(id)
#
# Object storage management
#
# This method return a S3 Object that id is "objId".
# An exception is trhown if the object does not exist or there an issue
@app.route('/S3/object', methods=['GET'])
def getObjectStructure() :
Log.debug("Calling getObjectStructure() method")
try :
return Response(S3ObjectCtrl(conf).getObjectStructure(),mimetype='application/json')
except S3Error , e :
Log.err(e.__str__())
return Response(e.reason, status=e.code)
# User management
@app.route('/S3/user', methods=['GET'])
def listUser():
try:
return Response(S3Ctrl(conf).listUsers(),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user', methods=['POST'])
def createUser():
try:
return Response(S3Ctrl(conf).createUser(),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>', methods=['GET'])
def getUser(uid):
try:
return Response(S3Ctrl(conf).getUser(uid),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>', methods=['PUT'])
def modifyUser(uid):
try:
return Response(S3Ctrl(conf).modifyUser(uid),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>', methods=['DELETE'])
def removeUser(uid):
try:
return Response(S3Ctrl(conf).removeUser(uid),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>/key/<string:key>', methods=['DELETE'])
def removeUserKey(uid,key):
try:
return Response(S3Ctrl(conf).removeUserKey(uid,key),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>/subuser', methods=['PUT'])
def createSubuser(uid):
try:
return Response(S3Ctrl(conf).createSubuser(uid),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>/subuser/<string:subuser>', methods=['DELETE'])
def deleteSubuser(uid, subuser):
try:
return Response(S3Ctrl(conf).deleteSubuser(uid, subuser),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>/subuser/<string:subuser>/key', methods=['PUT'])
def createSubuserKey(uid, subuser):
Log.debug("createSubuserKey")
try:
return Response(S3Ctrl(conf).createSubuserKey(uid, subuser),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>/subuser/<string:subuser>/key/<string:key>', methods=['DELETE'])
def deleteSubuserKey(uid, subuser, key):
Log.debug("deleteSubuserKey")
try:
return Response(S3Ctrl(conf).deleteSubuserKey(uid, subuser,key),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>/caps', methods=['PUT', 'POST'])
def saveCapability(uid):
Log.debug("saveCapability")
try:
return Response(S3Ctrl(conf).saveCapability(uid),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>/caps', methods=['DELETE'])
def deleteCapability(uid):
Log.debug("deleteCapability")
try:
return Response(S3Ctrl(conf).deleteCapability(uid),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
# bucket management
@app.route('/S3/user/<string:uid>/buckets', methods=['GET'])
def getUserBuckets(uid,bucket=None):
try:
return Response(S3Ctrl(conf).getUserBuckets(uid),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/bucket', methods=['PUT'])
def createBucket():
try:
return Response(S3Ctrl(conf).createBucket(), mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/bucket', methods=['GET'])
def getBuckets():
try:
return Response(S3Ctrl(conf).getBucketInfo(None), mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/bucket/<string:bucket>', methods=['GET'])
def getBucketInfo(bucket=None):
try:
return Response(S3Ctrl(conf).getBucketInfo(bucket), mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/bucket/<string:bucket>', methods=['DELETE'])
def deleteBucket(bucket):
try:
return Response(S3Ctrl(conf).deleteBucket(bucket), mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/bucket/<string:bucket>/link', methods=['DELETE','PUT'])
def linkBucket(bucket):
try:
uid = request.form['uid']
if request.method =='PUT':
return Response(S3Ctrl(conf).linkBucket(uid, bucket), mimetype='application/json')
else:
return Response(S3Ctrl(conf).unlinkBucket(uid, bucket), mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/bucket/<string:bucketName>/list', methods=['GET'])
def listBucket(bucketName):
try:
return Response(S3Ctrl(conf).listBucket(bucketName), mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
| {
"repo_name": "abrefort/inkscope-debian",
"path": "inkscopeCtrl/inkscopeCtrlcore.py",
"copies": "1",
"size": "8553",
"license": "apache-2.0",
"hash": -5762974667787807000,
"line_mean": 29.5464285714,
"line_max": 100,
"alpha_frac": 0.6587162399,
"autogenerated": false,
"ratio": 3.3319049474094276,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9404109569846038,
"avg_score": 0.017302323492678073,
"num_lines": 280
} |
# Alpha O. Sall
# 03/24/2014
from flask import request, Response
import json
import requests
from array import *
import subprocess
from StringIO import StringIO
from InkscopeError import InkscopeError
import ceph_version
class Pools:
"""docstring for pools"""
def __init__(self):
pass
def newpool_attribute(self, jsonform):
jsondata = json.loads(jsonform)
self.name = jsondata['pool_name']
self.pg_num = jsondata['pg_num']
self.pgp_num = jsondata['pg_placement_num']
self.type = jsondata['type']
self.size = jsondata['size']
self.min_size = jsondata['min_size']
self.crash_replay_interval = jsondata['crash_replay_interval']
self.crush_rule = jsondata['crush_rule']
self.erasure_code_profile = jsondata['erasure_code_profile']
self.quota_max_objects = jsondata['quota_max_objects']
self.quota_max_bytes = jsondata['quota_max_bytes']
if ceph_version.major >= 12:
self.crush_rule_name = jsondata['crush_rule_name']
self.application_metadata = jsondata['application_metadata']
def savedpool_attribute(self, ind, jsonfile):
r = jsonfile.json()
self.name = r['output']['pools'][ind]['pool_name']
self.pg_num = r['output']['pools'][ind]['pg_num']
self.pgp_num = r['output']['pools'][ind]['pg_placement_num']
self.type = r['output']['pools'][ind]['type']
self.size = r['output']['pools'][ind]['size']
self.min_size = r['output']['pools'][ind]['min_size']
self.crash_replay_interval = r['output']['pools'][ind]['crash_replay_interval']
if ceph_version<12:
self.crush_rule = r['output']['pools'][ind]['crush_ruleset']
else:
self.crush_rule = r['output']['pools'][ind]['crush_rule']
self.application_metadata = r['output']['pools'][ind]['application_metadata']
self.erasure_code_profile = r['output']['pools'][ind]['erasure_code_profile']
self.quota_max_objects = r['output']['pools'][ind]['quota_max_objects']
self.quota_max_bytes = r['output']['pools'][ind]['quota_max_bytes']
def register(self):
uri = self.cephRestApiUrl+'osd/pool/create?pool='+self.name+'&pool_type='+self.type+'&pg_num='+str(self.pg_num)+'&pgp_num='+str(self.pgp_num)
if self.erasure_code_profile != "":
uri += '&erasure_code_profile='+self.erasure_code_profile
register_pool = requests.put(uri)
# if newpool.register().status_code != 200:
# # return 'Error '+str(r.status_code)+' on creating pools'
# else:
class PoolsCtrl:
def __init__(self,conf):
self.cluster_name = conf['cluster']
ceph_rest_api_subfolder = conf.get("ceph_rest_api_subfolder", "")
ceph_rest_api_subfolder = ceph_rest_api_subfolder.strip('/')
if ceph_rest_api_subfolder != '':
ceph_rest_api_subfolder = "/" + ceph_rest_api_subfolder
self.cephRestApiUrl = "http://"+conf.get("ceph_rest_api", "")+ceph_rest_api_subfolder+"/api/v0.1/"
pass
def getCephRestApiUrl(self):
return self.cephRestApiUrl
def getindice(self,id, jsondata):
r = jsondata.content
r = json.loads(r)
mypoolsnum = array('i',[])
for i in r['output']['pools']:
mypoolsnum.append(i[u'pool'])
if id not in mypoolsnum:
return "Pool not found"
else:
for i in range(len(mypoolsnum)):
if mypoolsnum[i]==id:
id=i
return id
def getpoolname(self,ind, jsondata):
r = jsondata.json()
poolname = r['output']['pools'][ind]['pool_name']
return str(poolname)
def checkpool(self,pool_id, jsondata):
skeleton = {'status':'','output':{}}
if isinstance(pool_id, int):
ind = self.getindice(pool_id, jsondata)
id = ind
if id == "Pool id not found":
skeleton['status'] = id
result = json.dumps(skeleton)
return Response(result, mimetype='application/json')
else:
skeleton['status'] = 'OK'
result = json.dumps(skeleton)
return Response(result, mimetype='application/json')
if isinstance(pool_id, str):
r = jsondata.content
r = json.loads(r)
mypoolsname = array('i',[])
for i in r['output']:
mypoolsname.append(i[u'poolname'])
if pool_id not in mypoolsname:
skeleton['status'] = 'OK'
result = json.dumps(skeleton)
return Response(result, mimetype='application/json')
else:
skeleton['status'] = pool_id+'already exits. Please enter a new pool name'
result = json.dumps(skeleton)
return Response(result, mimetype='application/json')
def pool_list(self):
args = ['ceph',
'osd',
'lspools',
'--format=json',
'--cluster='+ self.cluster_name ]
output = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0]
output_io = StringIO(output)
return output_io
# @app.route('/pools/', methods=['GET','POST'])
# @app.route('/pools/<int:id>', methods=['GET','DELETE','PUT'])
def pool_manage(self,id):
# print "ceph_version_major", ceph_version.major
cephRestApiUrl = self.getCephRestApiUrl();
if request.method == 'GET':
if id == None: # get poolList
r = requests.get(cephRestApiUrl+'osd/lspools.json')
if r.status_code != 200:
return Response(r.raise_for_status())
else:
r = r.content
return Response(r, mimetype='application/json')
else:
data = requests.get(cephRestApiUrl+'osd/dump.json')
if data.status_code != 200:
raise InkscopeError( data.status_code, 'Error '+str(data.status_code)+' on the request getting pools: '+data.content)
else:
ind = self.getindice(id, data)
id = ind
skeleton = {'status':'','output':{}}
if id == "Pool id not found":
skeleton['status'] = id
result = json.dumps(skeleton)
return Response(result, mimetype='application/json')
else:
r = data.content
r = json.loads(r)
#r = data.json()
skeleton['status'] = r['status']
skeleton['output'] = r['output']['pools'][id]
result = json.dumps(skeleton)
return Response(result, mimetype='application/json')
elif request.method == 'POST':
jsonform = request.form['json']
newpool = Pools()
newpool.cephRestApiUrl = cephRestApiUrl
newpool.newpool_attribute(jsonform)
newpool.register()
jsondata = requests.get(cephRestApiUrl+'osd/dump.json')
r = jsondata.content
r = json.loads(r)
#r = jsondata.json()
nbpool = len(r['output']['pools'])
poolcreated = Pools()
poolcreated.savedpool_attribute(nbpool-1, jsondata)
# set pool parameter
var_name = ['size', 'min_size', 'crash_replay_interval']
param_to_set_list = [newpool.size, newpool.min_size, newpool.crash_replay_interval]
default_param_list = [poolcreated.size, poolcreated.min_size, poolcreated.crash_replay_interval]
if ceph_version.major >= 12:
var_name.append('crush_rule')
param_to_set_list.append(newpool.crush_rule_name)
default_param_list.append("")
else:
var_name.append('crush_ruleset')
param_to_set_list.append(newpool.crush_rule)
default_param_list.append(poolcreated.crush_rule)
for i in range(len(default_param_list)):
if param_to_set_list[i] != default_param_list[i]:
r = requests.put(cephRestApiUrl+'osd/pool/set?pool='+str(poolcreated.name)+'&var='+var_name[i]+'&val='+str(param_to_set_list[i]))
else:
pass
# enable application
if ceph_version.major >= 12:
url = cephRestApiUrl + "osd/pool/application/enable?pool="+str(poolcreated.name)+"&app="+newpool.application_metadata
requests.put(url)
# set object or byte limit on pool
field_name = ['max_objects','max_bytes']
param_to_set = [newpool.quota_max_objects, newpool.quota_max_bytes]
default_param = [poolcreated.quota_max_objects, poolcreated.quota_max_bytes]
for i in range(len(default_param)):
if param_to_set[i] != default_param[i]:
r = requests.put(cephRestApiUrl+'osd/pool/set-quota?pool='+str(poolcreated.name)+'&field='+field_name[i]+'&val='+str(param_to_set[i]))
else:
pass
return 'None'
elif request.method == 'DELETE':
data = requests.get(cephRestApiUrl+'osd/dump.json')
# if data.status_code != 200:
# return 'Error '+str(r.status_code)+' on the request getting pools'
# else:
#r = data.json()
r = data.content
r = json.loads(r)
# data = requests.get('http://localhost:8080/ceph-rest-api/osd/dump.json')
ind = self.getindice(id, data)
id = ind
poolname = r['output']['pools'][id]['pool_name']
poolname = str(poolname)
delete_request = requests.put(cephRestApiUrl+'osd/pool/delete?pool='+poolname+'&pool2='+poolname+'&sure=--yes-i-really-really-mean-it')
print "Delete code ", delete_request.status_code
print "Delete message ",delete_request.content
if delete_request.status_code != 200:
raise InkscopeError(delete_request.status_code, delete_request.content)
return "pool has been deleted"
else:
# "PUT" = Modify
jsonform = request.form['json']
newpool = Pools()
newpool.newpool_attribute(jsonform)
data = requests.get(cephRestApiUrl+'osd/dump.json')
if data.status_code != 200:
raise InkscopeError( data.status_code, 'Error '+str(data.status_code)+' on the request getting pools: '+data.content)
else:
#r = data.json()
r = data.content
r = json.loads(r)
ind = self.getindice(id, data)
savedpool = Pools()
savedpool.savedpool_attribute(ind, data)
# rename the poolname
if str(newpool.name) != str(savedpool.name):
r = requests.put(cephRestApiUrl+'osd/pool/rename?srcpool='+str(savedpool.name)+'&destpool='+str(newpool.name))
# set pool parameter
var_name= ['size', 'min_size', 'crash_replay_interval','pg_num','pgp_num']
param_to_set_list = [newpool.size, newpool.min_size, newpool.crash_replay_interval, newpool.pg_num, newpool.pgp_num]
default_param_list = [savedpool.size, savedpool.min_size, savedpool.crash_replay_interval, savedpool.pg_num, savedpool.pgp_num]
if ceph_version.major >= 12:
var_name.append('crush_rule')
param_to_set_list.append(newpool.crush_rule_name)
default_param_list.append(savedpool.crush_rule)
else:
var_name.append('crush_ruleset')
param_to_set_list.append(newpool.crush_rule)
default_param_list.append(savedpool.crush_rule)
message = ""
for i in range(len(default_param_list)):
if param_to_set_list[i] != default_param_list[i]:
print "set ", var_name[i], " to ", str(param_to_set_list[i])
r = requests.put(cephRestApiUrl+'osd/pool/set?pool='+str(newpool.name)+'&var='+var_name[i]+'&val='+str(param_to_set_list[i]))
if r.status_code != 200:
message= message+ "Can't set "+ var_name[i]+ " to "+ str(param_to_set_list[i])+ " : "+ r.content+"<br>"
else:
pass
# enable application
if ceph_version.major >= 12:
url = cephRestApiUrl + "osd/pool/application/enable?pool=" + str(
newpool.name) + "&app=" + newpool.application_metadata + "&force=--yes-i-really- mean-it"
requests.put(url)
# set object or byte limit on pool
field_name = ['max_objects','max_bytes']
param_to_set = [newpool.quota_max_objects, newpool.quota_max_bytes]
default_param = [savedpool.quota_max_objects, savedpool.quota_max_bytes]
for i in range(len(default_param)):
if param_to_set[i] != default_param[i]:
r = requests.put(cephRestApiUrl+'osd/pool/set-quota?pool='+str(newpool.name)+'&field='+field_name[i]+'&val='+str(param_to_set[i]))
if r.status_code != 200:
message= message+ "Can't set "+ field_name[i]+ " to "+ str(param_to_set[i])+ " : "+ r.content+"<br>"
else:
pass
return message
# @app.route('/pools/<int:id>/snapshot', methods=['POST'])
def makesnapshot(self,id):
cephRestApiUrl = self.getCephRestApiUrl();
data = requests.get(cephRestApiUrl+'osd/dump.json')
#r = data.json()
r = data.content
r = json.loads(r)
ind = self.getindice(id,data)
id = ind
poolname = r['output']['pools'][id]['pool_name']
jsondata = request.form['json']
jsondata = json.loads(jsondata)
snap = jsondata['snapshot_name']
r = requests.put(cephRestApiUrl+'osd/pool/mksnap?pool='+str(poolname)+'&snap='+str(snap))
if r.status_code != 200:
raise InkscopeError(r.status_code, r.content)
return r.content
# @app.route('/pools/<int:id>/snapshot/<namesnapshot>', methods=['DELETE'])
def removesnapshot(self,id, namesnapshot):
cephRestApiUrl = self.getCephRestApiUrl();
data = requests.get(cephRestApiUrl+'osd/dump.json')
#r = data.json()
r = data.content
r = json.loads(r)
ind = self.getindice(id,data)
id = ind
poolname = r['output']['pools'][id]['pool_name']
r = requests.put(cephRestApiUrl+'osd/pool/rmsnap?pool='+str(poolname)+'&snap='+str(namesnapshot))
if r.status_code != 200:
raise InkscopeError(r.status_code, r.content)
return r.content | {
"repo_name": "inkscope/inkscope",
"path": "inkscopeCtrl/poolsCtrl.py",
"copies": "1",
"size": "15558",
"license": "apache-2.0",
"hash": -9137341098622071000,
"line_mean": 41.0513513514,
"line_max": 154,
"alpha_frac": 0.5357372413,
"autogenerated": false,
"ratio": 3.8471810089020773,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48829182502020774,
"avg_score": null,
"num_lines": null
} |
# Alpha O. Sall
# 07/2014
from flask import Flask, request, Response
import json
import requests
from array import *
import salt.client
local = salt.client.LocalClient()
def getCephRestApiUrl(request):
# discover ceph-rest-api URL
return request.url_root.replace("inkscopeCtrl","ceph-rest-api")
class Pools:
"""docstring for pools"""
def __init__(self):
pass
def newpool_attribute(self, jsonform):
jsondata = json.loads(jsonform)
self.name = jsondata['pool_name']
self.pg_num = jsondata['pg_num']
self.pgp_num = jsondata['pg_placement_num']
self.size = jsondata['size']
self.min_size = jsondata['min_size']
self.crash_replay_interval = jsondata['crash_replay_interval']
self.crush_ruleset = jsondata['crush_ruleset']
self.quota_max_objects = jsondata['quota_max_objects']
self.quota_max_bytes = jsondata['quota_max_bytes']
def getpoolname(ind, jsondata):
r = jsondata.json()
poolname = r['output']['pools'][ind]['pool_name']
return str(poolname)
def create_pool(poolname, pg_num, pgp_num,minion):
cmd = local.cmd(minion,'cmd.run',['ceph osd pool create %s %d %d' % (poolname, pg_num, pgp_num)])
return 'salt-run jobs.lookup_jid last_job_id'
def set_pool_values(poolname, **kwargs):
minion ='v-intceph06'
for key in kwargs:
cmd = local.cmd(minion,'cmd.run',['ceph osd pool set %s %s %d' % (poolname, key, kwargs[key])])
return 'salt-run jobs.lookup_jid last_job_id'
def set_pool_quotas(poolname, **kwargs):
minion ='v-intceph06'
for key, value in kwargs.iteritems():
cmd = local.cmd(minion,'cmd.run',['ceph osd pool set-quota %s %s %d' % (poolname, key, value)])
return 'salt-run last job id'
def del_pool(poolname, minion):
cmd = local.cmd(minion,'cmd.run',['ceph osd pool delete %s %s --yes-i-really-really-mean-it' % (poolname, poolname)])
#jobs = local.cmd(minion,'cmd.run',['salt-run jobs.list_jobs'])
return 'salt-run jobs.lookup_jid last_job_id'
# @app.route('/poolsalt/', methods=['GET','POST'])
# @app.route('/poolsalt/<int:id>', methods=['GET','DELETE','PUT'])
def pool_manage_salt(id, minion):
if request.method == 'GET':
if id == None:
my_cmd = local.cmd(minion,'cmd.run',['ceph osd lspools --format=json'])
list_pools = json.loads(my_cmd[minion])
skeleton = {'status':'OK','output':[]}
for i in list_pools:
skeleton['output'].append(i)
result = json.dumps(skeleton)
return Response(result, mimetype='application/json')
else:
cmd = local.cmd(minion,'cmd.run',['ceph osd dump --format=json'])
result = json.loads(cmd[minion])
all_pools = result['pools']
skeleton = {'status':'OK','output':''}
for i in range(len(all_pools)):
if id == all_pools[i]['pool']:
indice = i
break
try:
skeleton = {'status':'OK','output':all_pools[indice]}
the_pool = json.dumps(skeleton)
return Response(the_pool, mimetype='application/json')
except UnboundLocalError:
return 'Pool id not found'
elif request.method =='POST':
jsonform = request.form['json']
newpool = Pools()
newpool.newpool_attribute(jsonform)
a=create_pool(newpool.name, newpool.pg_num, newpool.pgp_num, minion)
kwargs = {'size':newpool.size, 'min_size':newpool.min_size, 'crash_replay_interval':newpool.crash_replay_interval,'crush_ruleset':newpool.crush_ruleset}
b=set_pool_values(newpool.name, **kwargs)
quotas = {'max_objects':newpool.quota_max_objects, 'max_bytes': newpool.quota_max_bytes}
c=set_pool_quotas(newpool.name, **quotas)
return c
elif request.method == 'DELETE':
cmd = local.cmd(minion,'cmd.run',['ceph osd lspools --format=json'])
result = json.loads(cmd[minion])
for i in range(len(result)):
if id == result[i]['poolnum']:
poolname = str(result[i]['poolname'])
e=del_pool(str(poolname), minion)
return e
else:
# jsonform = request.form['json']
# newpool = Pools()
# newpool.newpool_attribute(jsonform)
#
# a=rename_pool(newpool.name, newpool.pg_num, newpool.pgp_num, minion)
# kwargs = {'size':newpool.size, 'min_size':newpool.min_size, 'crash_replay_interval':newpool.crash_replay_interval,'crush_ruleset':newpool.crush_ruleset}
# b=set_pool_values(newpool.name, **kwargs)
# quotas = {'max_objects':newpool.quota_max_objects, 'max_bytes': newpool.quota_max_bytes}
# c=set_pool_quotas(newpool.name, **quotas)
# return c
data = requests.get(cephRestApiUrl+'osd/dump.json')
if data.status_code != 200:
return 'Error '+str(data.status_code)+' on the request getting pools'
else:
#r = data.json()
r = data.content
r = json.loads(r)
ind = getindice(id, data)
savedpool = Pools()
savedpool.savedpool_attribute(ind, data)
# rename the poolname
if str(newpool.name) != str(savedpool.name):
r = requests.put(cephRestApiUrl+'osd/pool/rename?srcpool='+str(savedpool.name)+'&destpool='+str(newpool.name))
# set pool parameter
var_name= ['size', 'min_size', 'crash_replay_interval','pg_num','pgp_num','crush_ruleset']
param_to_set_list = [newpool.size, newpool.min_size, newpool.crash_replay_interval, newpool.pg_num, newpool.pgp_num, newpool.crush_ruleset]
default_param_list = [savedpool.size, savedpool.min_size, savedpool.crash_replay_interval, savedpool.pg_num, savedpool.pgp_num, savedpool.crush_ruleset]
for i in range(len(default_param_list)):
if param_to_set_list[i] != default_param_list[i]:
r = requests.put(cephRestApiUrl+'osd/pool/set?pool='+str(newpool.name)+'&var='+var_name[i]+'&val='+str(param_to_set_list[i]))
else:
pass
# set object or byte limit on pool
field_name = ['max_objects','max_bytes']
param_to_set = [newpool.quota_max_objects, newpool.quota_max_bytes]
default_param = [savedpool.quota_max_objects, savedpool.quota_max_bytes]
for i in range(len(default_param)):
if param_to_set[i] != default_param[i]:
r = requests.put(cephRestApiUrl+'osd/pool/set-quota?pool='+str(newpool.name)+'&field='+field_name[i]+'&val='+str(param_to_set[i]))
else:
pass
return str(r.status_code)
def makesnapshot_salt(id, minion):
cephRestApiUrl = getCephRestApiUrl(request);
data = requests.get(cephRestApiUrl+'osd/dump.json')
#r = data.json()
r = data.content
r = json.loads(r)
ind = getindice(id,data)
id = ind
poolname = r['output']['pools'][id]['pool_name']
jsondata = request.form['json']
jsondata = json.loads(jsondata)
snap = jsondata['snapshot_name']
r = requests.put(cephRestApiUrl+'osd/pool/mksnap?pool='+str(poolname)+'&snap='+str(snap))
return str(r.status_code)
def removesnapshot_salt(id, namesnapshot, minion):
cephRestApiUrl = getCephRestApiUrl(request);
data = requests.get(cephRestApiUrl+'osd/dump.json')
#r = data.json()
r = data.content
r = json.loads(r)
ind = getindice(id,data)
id = ind
poolname = r['output']['pools'][id]['pool_name']
try:
r = requests.put(cephRestApiUrl+'osd/pool/rmsnap?pool='+str(poolname)+'&snap='+str(namesnapshot))
except HTTPException, e:
return e
else:
return r.content | {
"repo_name": "abrefort/inkscope-debian",
"path": "inkscopeCtrl/poolsCtrlSalt.py",
"copies": "1",
"size": "7938",
"license": "apache-2.0",
"hash": 8368572895368493000,
"line_mean": 38.301980198,
"line_max": 164,
"alpha_frac": 0.5981355505,
"autogenerated": false,
"ratio": 3.3423157894736844,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9400879493815137,
"avg_score": 0.007914369231709349,
"num_lines": 202
} |
# Alpha O. Sall
# Alain Dechorgnat
# 03/24/2014
# 2015-12 A. Dechorgnat: add login security (inspired from http://thecircuitnerd.com/flask-login-tokens/)
from flask import Flask, Response, redirect
from flask_login import (LoginManager, login_required, login_user,
current_user, logout_user, UserMixin)
from itsdangerous import URLSafeTimedSerializer
from datetime import timedelta
from hashlib import md5
from bson.json_util import dumps
from InkscopeError import InkscopeError
app = Flask(__name__)
app.secret_key = "Mon Nov 30 17:20:29 2015"
app.config["REMEMBER_COOKIE_DURATION"] = timedelta(days=14)
#Login_serializer used to encryt and decrypt the cookie token for the remember
#me option of flask-login
login_serializer = URLSafeTimedSerializer(app.secret_key)
login_manager = LoginManager()
login_manager.init_app(app)
from subprocess import CalledProcessError
import mongoJuiceCore
from poolsCtrl import PoolsCtrl,Pools
import osdsCtrl
from rbdCtrl import RbdCtrl
from StringIO import StringIO
#import probesCtrl
from S3Ctrl import S3Ctrl, S3Error
from S3ObjectCtrl import *
import ceph_version
def hash_pass(password):
"""
Return the md5 hash of the password+salt
"""
salted_password = password + app.secret_key
return md5(salted_password).hexdigest()
# Load configuration from file
configfile = "/opt/inkscope/etc/inkscope.conf"
datasource = open(configfile, "r")
conf = json.load(datasource)
datasource.close()
if conf['inkscope_last_commit'] is None or conf['inkscope_last_commit'] == "":
version = "1.5.0"
else:
version = conf['inkscope_last_commit']
# control inkscope users collection in mongo
db = mongoJuiceCore.getClient(conf, 'inkscope')
if db.inkscope_users.count() == 0:
print "list of users is empty: populating with default users"
user = {"name":"admin",
"password": hash_pass("admin"),
"roles":["admin"]}
db.inkscope_users.insert(user)
user = {"name":"guest",
"password": hash_pass(""),
"roles":["supervizor"]}
db.inkscope_users.insert(user)
#
# Security
#
class User(UserMixin):
def __init__(self, name, password, roles):
self.id = name
self.password = password
self.roles = roles
@staticmethod
def get(userid):
"""
Static method to search the database and see if userid exists. If it
does exist then return a User Object. If not then return None as
required by Flask-Login.
"""
u = db.inkscope_users.find_one({"name":userid})
if u:
return User(u['name'], u['password'], u['roles'])
return None
def get_auth_token(self):
"""
Encode a secure token for cookie
"""
data = [str(self.id), self.password]
return login_serializer.dumps(data)
@login_manager.user_loader
def load_user(userid):
"""
Flask-Login user_loader callback.
The user_loader function asks this function to get a User Object or return
None based on the userid.
The userid was stored in the session environment by Flask-Login.
user_loader stores the returned User object in current_user during every
flask request.
"""
return User.get(userid)
@login_manager.token_loader
def load_token(token):
"""
Flask-Login token_loader callback.
The token_loader function asks this function to take the token that was
stored on the users computer process it to check if its valid and then
return a User Object if its valid or None if its not valid.
"""
#The Token itself was generated by User.get_auth_token. So it is up to
#us to known the format of the token data itself.
#The Token was encrypted using itsdangerous.URLSafeTimedSerializer which
#allows us to have a max_age on the token itself. When the cookie is stored
#on the users computer it also has a exipry date, but could be changed by
#the user, so this feature allows us to enforce the exipry date of the token
#server side and not rely on the users cookie to exipre.
max_age = app.config["REMEMBER_COOKIE_DURATION"].total_seconds()
#Decrypt the Security Token, data = [username, hashpass]
data = login_serializer.loads(token, max_age=max_age)
#Find the User
user = User.get(data[0])
#Check Password and return user or None
if user and data[1] == user.password:
return user
return None
@app.route("/login/", methods=["GET", "POST"])
def login_page():
"""
Web Page to Display Login Form and process form.
"""
if request.method == "POST":
user = User.get(request.form['name'])
# If we found a user based on username then compare that the submitted
# password matches the password in the database. The password is stored
# is a slated hash format, so you must hash the password before comparing
# it.
if user and hash_pass(request.form['password']) == user.password:
login_user(user, remember=True)
return redirect(request.args.get("next") or "/inkscopeViz/index.html")
else:
return redirect('/inkscopeViz/login.html?result=failed')
return redirect("/inkscopeViz/login.html", code=302)
@app.route('/logout')
def logout():
logout_user()
return redirect("/inkscopeViz/login.html", code=302)
#
# global management
#
@app.route('/conf.json', methods=['GET'])
@login_required # called by every page, so force to be identified
def conf_manage():
#force platform field to invite admin to give a name to this instance
conf['platform'] = conf.get('platform')
if conf['platform'] is None or conf['platform'] == "":
conf['platform'] = "fill 'platform' field in inkscope.conf"
if 'admin' in current_user.roles:
conf['version'] = version
conf['ceph_version'] = ceph_version.version
conf['ceph_version_name'] = ceph_version.name
conf['roles'] = current_user.roles
conf['username']= current_user.id
return Response(json.dumps(conf), mimetype='application/json')
else:
conflite = {}
conflite['version'] = version
conflite['ceph_version'] = ceph_version.version
conflite['ceph_version_name'] = ceph_version.name
conflite['roles'] = current_user.roles
conflite['platform'] = conf.get('platform')
conflite['cluster'] = conf.get('cluster')
conflite['username']= current_user.id
try:
conflite['influxdb_endpoint'] = conf.get('influxdb_endpoint')
except:
pass
return Response(json.dumps(conflite), mimetype='application/json')
#
# inkscope users management
#
@app.route('/inkscope_user/', methods=['GET'])
def inkscope_user_list():
return Response(dumps(db.inkscope_users.find()))
@app.route('/inkscope_user/<id>', methods=['GET', 'POST', 'PUT', 'DELETE'])
@login_required
def inkscope_user_manage(id):
if request.method == 'GET':
# user info
return Response(dumps(db.inkscope_users.find_one({"name":id})))
elif request.method == 'POST':
# user creation
if 'admin' not in current_user.roles:
return Response("Not enough permissions to do this", status=403)
if db.inkscope_users.find_one({"name":id}):
return Response("This user already exists", status=403)
user = json.loads(request.data)
user['password']= hash_pass(user['password'])
db.inkscope_users.insert(user)
return Response('ok', status=201)
elif request.method == 'PUT':
# user modification
if 'admin' not in current_user.roles:
return Response("Not enough permissions to do this", status=403)
print 'old', dumps(db.inkscope_users.find_one({"name":id}))
user = json.loads(request.data)
if 'newpassword' in user:
user['password']= hash_pass(user['newpassword'])
del user['newpassword']
del user['_id']
print 'rep', dumps(user)
newuser = db.inkscope_users.replace_one({"name":id}, user)
print 'old', dumps(db.inkscope_users.find_one({"name":id}))
return Response('ok')
elif request.method == 'DELETE':
# user deletion
if 'admin' not in current_user.roles:
return Response("Not enough permissions to do this", status=403)
if current_user.id == id:
return Response("You can't delete yourself", status=403)
else:
db.inkscope_users.remove({"name":id})
return Response('ok')
@app.route('/inkscope_user_role/', methods=['GET'])
def inkscope_user_role_list():
roles = ["admin","admin_rgw","admin_rbd","admin_pool","supervizor"]
return Response(dumps(roles), mimetype='application/json')
#
# mongoDB query facility
#
@app.route('/<db>/<collection>', methods=['GET', 'POST'])
def find(db, collection):
return mongoJuiceCore.find(conf, db, collection)
@app.route('/<db>', methods=['POST'])
def full(db):
return mongoJuiceCore.full(conf, db)
#
# Pools management
#
@app.route('/poolList/', methods=['GET'])
def pool_list():
try:
return Response(PoolsCtrl(conf).pool_list(), mimetype='application/json')
except InkscopeError as e:
return Response(e.message, e.status)
@app.route('/pools/', methods=['GET', 'POST'])
@app.route('/pools/<int:id>', methods=['GET', 'DELETE', 'PUT'])
def pool_manage(id=None):
try:
return PoolsCtrl(conf).pool_manage(id)
except InkscopeError as e:
return Response(e.message, e.status)
@app.route('/pools/<int:id>/snapshot', methods=['POST'])
def makesnapshot(id):
try:
return PoolsCtrl(conf).makesnapshot(id)
except InkscopeError as e:
return Response(e.message, e.status)
@app.route('/pools/<int:id>/snapshot/<namesnapshot>', methods=['DELETE'])
def removesnapshot(id, namesnapshot):
try:
return PoolsCtrl(conf).removesnapshot(id, namesnapshot)
except InkscopeError as e:
return Response(e.message, e.status)
#
# RBD management
#
#
# Images
#
@app.route('/RBD/images', methods=['GET'])
def getImagesList():
# Log.debug("Calling RbdCtrl(conf).listImages() method")
try:
return Response(RbdCtrl(conf).list_images(), mimetype='application/json')
except CalledProcessError, e:
return Response(e.output, status=500)
@app.route('/RBD/images/<string:pool_name>/<string:image_name>', methods=['GET'])
def getImagesInfo(pool_name, image_name):
# Log.debug("Calling RbdCtrl(conf).getImagesInfo() method")
try:
return Response(RbdCtrl(conf).image_info(pool_name, image_name), mimetype='application/json')
except CalledProcessError, e:
return Response(e.output, status=500)
@app.route('/RBD/images/<string:pool_name>/<string:image_name>', methods=['PUT'])
def createImage(pool_name, image_name):
# Log.debug("Calling RbdCtrl(conf).listImages() method")
try:
return Response(RbdCtrl(conf).create_image(pool_name, image_name), mimetype='application/json')
except CalledProcessError, e:
return Response(e.output, status=500)
@app.route('/RBD/images/<string:pool_name>/<string:image_name>/<string:action>', methods=['POST'])
def modifyImage(pool_name, image_name , action):
# Log.debug("Calling RbdCtrl(conf).modifyImages() method")
try:
return Response(RbdCtrl(conf).modify_image(pool_name, image_name, action), mimetype='application/json')
except CalledProcessError, e:
return Response(e.output, status=500)
@app.route('/RBD/images/<string:pool_name>/<string:image_name>', methods=['DELETE'])
def deleteImage(pool_name, image_name):
# Log.debug("Calling RbdCtrl(conf).deleteImage() method")
try:
return Response(RbdCtrl(conf).delete_image(pool_name, image_name), mimetype='application/json')
except CalledProcessError, e:
return Response(e.output, status=500)
#
# Snapshots
#
@app.route('/RBD/snapshots/<string:pool_name>/<string:image_name>/<string:snap_name>', methods=['GET'])
def infoImageSnapshot(pool_name, image_name,snap_name):
# Log.debug("Calling RbdCtrl(conf).info_image_snapshot() method")
try:
return Response(RbdCtrl(conf).info_image_snapshot(pool_name, image_name, snap_name), mimetype='application/json')
except CalledProcessError, e:
return Response(e.output, status=500)
@app.route('/RBD/snapshots/<string:pool_name>/<string:image_name>/<string:snap_name>', methods=['PUT'])
def createImageSnapshot(pool_name, image_name,snap_name):
# Log.debug("Calling RbdCtrl(conf).create_image_snapshot() method")
try:
return Response(RbdCtrl(conf).create_image_snapshot(pool_name, image_name, snap_name), mimetype='application/json')
except CalledProcessError, e:
return Response(e.output, status=500)
@app.route('/RBD/snapshots/<string:pool_name>/<string:image_name>/<string:snap_name>', methods=['DELETE'])
def deleteImageSnapshot(pool_name, image_name,snap_name):
# Log.debug("Calling RbdCtrl(conf).delete_image_snapshot() method")
try:
return Response(RbdCtrl(conf).delete_image_snapshot(pool_name, image_name, snap_name), mimetype='application/json')
except CalledProcessError, e:
return Response(e.output, status=500)
@app.route('/RBD/snapshots/<string:pool_name>/<string:image_name>/<string:snap_name>/<string:action>', methods=['POST'])
def actionOnImageSnapshot(pool_name, image_name,snap_name, action):
# print "Calling RbdCtrl(conf).action_on_image_snapshot() method", action
try:
return Response(RbdCtrl(conf).action_on_image_snapshot(pool_name, image_name, snap_name, action), mimetype='application/json')
except CalledProcessError, e:
return Response(e.output, status=500)
#
# Probes management
#
#@app.route('/probes/<string:probe_type>/<string:probe_name>/<string:action>', methods=['POST'])
#def actionOnProbe(probe_type, probe_name, action):
# print "Calling probesCtrl.action_on_probe() method", action
# try:
# return Response(probesCtrl.action_on_probe(probe_type, probe_name, action), mimetype='application/json')
# except CalledProcessError, e:
# return Response(e.output, status=500)
#
#
# Osds management
#
@app.route('/osds', methods=['PUT'])
def osds_manage(id=None):
return osdsCtrl.osds_manage(id)
#
# Object storage management
#
# This method return a S3 Object that id is "objId".
# An exception is trhown if the object does not exist or there an issue
@app.route('/S3/object', methods=['GET'])
def getObjectStructure():
Log.debug("Calling getObjectStructure() method")
try:
return Response(S3ObjectCtrl(conf).getObjectStructure(),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
# User management
@app.route('/S3/user', methods=['GET'])
def listUser():
try:
return Response(S3Ctrl(conf).listUsers(),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user', methods=['POST'])
def createUser():
try:
return Response(S3Ctrl(conf).createUser(),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>', methods=['GET'])
def getUser(uid):
try:
return Response(S3Ctrl(conf).getUser(uid),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>', methods=['PUT'])
def modifyUser(uid):
try:
return Response(S3Ctrl(conf).modifyUser(uid),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>', methods=['DELETE'])
def removeUser(uid):
try:
return Response(S3Ctrl(conf).removeUser(uid),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>/key/<string:key>', methods=['DELETE'])
def removeUserKey(uid,key):
try:
return Response(S3Ctrl(conf).removeUserKey(uid,key),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>/subuser', methods=['PUT'])
def createSubuser(uid):
try:
return Response(S3Ctrl(conf).createSubuser(uid),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>/subuser/<string:subuser>', methods=['DELETE'])
def deleteSubuser(uid, subuser):
try:
return Response(S3Ctrl(conf).deleteSubuser(uid, subuser),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>/subuser/<string:subuser>/key', methods=['PUT'])
def createSubuserKey(uid, subuser):
Log.debug("createSubuserKey")
try:
return Response(S3Ctrl(conf).createSubuserKey(uid, subuser),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>/subuser/<string:subuser>/key', methods=['DELETE'])
def deleteSubuserKey(uid, subuser):
Log.debug("deleteSubuserKey")
try:
return Response(S3Ctrl(conf).deleteSubuserKey(uid, subuser),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>/caps', methods=['PUT', 'POST'])
def saveCapability(uid):
Log.debug("saveCapability")
try:
return Response(S3Ctrl(conf).saveCapability(uid),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/user/<string:uid>/caps', methods=['DELETE'])
def deleteCapability(uid):
Log.debug("deleteCapability")
try:
return Response(S3Ctrl(conf).deleteCapability(uid),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
# bucket management
@app.route('/S3/user/<string:uid>/buckets', methods=['GET'])
def getUserBuckets(uid,bucket=None):
try:
return Response(S3Ctrl(conf).getUserBuckets(uid),mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/bucket', methods=['PUT'])
def createBucket():
try:
return Response(S3Ctrl(conf).createBucket(), mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/bucket', methods=['GET'])
def getBuckets():
try:
return Response(S3Ctrl(conf).getBucketInfo(None), mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/bucket/<string:bucket>', methods=['GET'])
def getBucketInfo(bucket=None):
try:
return Response(S3Ctrl(conf).getBucketInfo(bucket), mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/bucket/<string:bucket>', methods=['DELETE'])
def deleteBucket(bucket):
try:
return Response(S3Ctrl(conf).deleteBucket(bucket), mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/bucket/<string:bucket>/acl', methods=['GET'])
def getBucketACL(bucket):
try:
return Response(S3Ctrl(conf).getBucketACL(bucket), mimetype='application/json')
except S3Error, e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/bucket/<string:bucket>/<string:user>/acl', methods=['GET'])
def getUserACL(user, bucket):
try:
return Response(S3Ctrl(conf).getUserAccess(bucket, user), mimetype='application/json')
except S3Error, e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/bucket/<string:bucket>/<string:user>/acl', methods=['PUT'])
def modifyACL(user, bucket):
try:
return Response(S3Ctrl(conf).grantAccess(user, bucket), mimetype='application/json')
except InkscopeError as e:
return Response(e.status,e.message)
@app.route('/S3/bucket/<string:bucket>/<string:user>/noacl', methods=['PUT'])
def revokeAccess(user, bucket):
try:
return Response(S3Ctrl(conf).revokeAccess(user, bucket), mimetype='application/json')
except S3Error, e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/bucket/<string:bucket>/link', methods=['DELETE','PUT'])
def linkBucket(bucket):
try:
uid = request.form['uid']
if request.method =='PUT':
return Response(S3Ctrl(conf).linkBucket(uid, bucket), mimetype='application/json')
else:
return Response(S3Ctrl(conf).unlinkBucket(uid, bucket), mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
@app.route('/S3/bucket/<string:bucketName>/list', methods=['GET'])
def listBucket(bucketName):
try:
return Response(S3Ctrl(conf).listBucket(bucketName), mimetype='application/json')
except S3Error , e:
Log.err(e.__str__())
return Response(e.reason, status=e.code)
| {
"repo_name": "inkscope/inkscope",
"path": "inkscopeCtrl/inkscopeCtrlcore.py",
"copies": "1",
"size": "21791",
"license": "apache-2.0",
"hash": -5159411330744859000,
"line_mean": 32.8895800933,
"line_max": 134,
"alpha_frac": 0.664127392,
"autogenerated": false,
"ratio": 3.5129775914879895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46771049834879896,
"avg_score": null,
"num_lines": null
} |
"""Alpha probability distribution."""
import numpy
from scipy import special
import chaospy
from ..baseclass import SimpleDistribution, ShiftScaleDistribution
class alpha(SimpleDistribution):
"""Standard Alpha distribution."""
def __init__(self, a=1):
super(alpha, self).__init__(dict(a=a))
def _cdf(self, x, a):
return special.ndtr(a-1./x)/special.ndtr(a)
def _ppf(self, q, a):
out = 1.0/(a-special.ndtri(q*special.ndtr(a)))
return numpy.where(q == 1, self._upper(a), out)
def _pdf(self, x, a):
return numpy.where(
x == 0, 0, numpy.e**(-.5*(a-1./x)**2)/
(numpy.sqrt(2*numpy.pi)*x**2*special.ndtr(a))
)
def _lower(self, a):
return 0.
def _upper(self, a):
return 1./(a-special.ndtri((1-1e-10)*special.ndtr(a)))
class Alpha(ShiftScaleDistribution):
"""
Alpha distribution.
Args:
shape (float, Distribution):
Shape parameter
scale (float, Distribution):
Scale Parameter
shift (float, Distribution):
Location of lower threshold
Examples:
>>> distribution = chaospy.Alpha(6)
>>> distribution
Alpha(6)
>>> uloc = numpy.linspace(0, 1, 6)
>>> uloc
array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
>>> xloc = distribution.inv(uloc)
>>> xloc.round(3)
array([ 0. , 0.146, 0.16 , 0.174, 0.194, 63.709])
>>> numpy.allclose(distribution.fwd(xloc), uloc)
True
>>> distribution.pdf(xloc).round(3)
array([ 0. , 13.104, 15.108, 12.759, 7.449, 0. ])
>>> distribution.sample(4).round(3)
array([0.178, 0.139, 0.23 , 0.165])
"""
def __init__(self, shape=1, scale=1, shift=0):
super(Alpha, self).__init__(
dist=alpha(shape),
scale=scale,
shift=shift,
repr_args=[shape],
)
| {
"repo_name": "jonathf/chaospy",
"path": "chaospy/distributions/collection/alpha.py",
"copies": "1",
"size": "1953",
"license": "mit",
"hash": -3789500369627800000,
"line_mean": 26.125,
"line_max": 66,
"alpha_frac": 0.5294418843,
"autogenerated": false,
"ratio": 3.344178082191781,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4373619966491781,
"avg_score": null,
"num_lines": null
} |
# Alpha release.
import re
# Ask user for string to be transliterated to Cyrillic, including soft and hard signs.
latin_string = input("What is the Latin text you want transliterated to Cyrillic? Include hard and soft signs by using apostrophes and quotation marks respectively.\n> ")
latin_string = latin_string.lower() # If the Latin string has any capitals, conver them to lowercase
# Mechanism to search the string and replace letters with Cyrillic equivalents, e.g. d to д. For unclear situations (e.g. deciding between и and й), it will either make an educated guess based on the context or ask the user what Cyrillic letter was actually meant.
if re.search("j", latin_string):
uses_j = True # Sign that i = и only
else:
uses_j = False # Sign that i probably also = й
cyrillic_string = latin_string
substitutions = [
("a", "а"),
("b", "б"),
("c", "ц"),
("č", "ч"),
("d", "д"),
("е", "е"),
("f", "ф"),
("g", "г"),
("h", "х"),
("i", "и"),
("k", "к"),
("l", "л"),
("m", "м"),
("n", "н"),
("o", "о"),
("p", "п"),
("q", "я"),
("r", "р"),
("s", "с"),
("š", "ш"),
("t", "т"),
("u", "у"),
("v", "в"),
("w", "в"),
("x", "х"),
("y", "ы"),
("z", "з"),
("ž", "ж"),
("\'", "ь"),
("\"", "ъ"),
]
for find, replace in substitutions:
cyrillic_string = re.sub(find, replace, cyrillic_string)
# Properly replace J
if uses_j == True:
cyrillic_string = re.sub("j", "й", cyrillic_string)
else:
# Ask user in vowel + и combinations which ones are really й.
pass # Placeholder
corrections = [
("тс", "ц"), # Fix ц.
("цк", "тск"), # Fix some common -тс- problems.
("цх", "ч"),
("кх", "х"),
("сх", "ш"),
("зх", "ж"),
("шч", "щ"), # Шч cannot occur in Russian orthography.
("([аеиоуы])ы", "\\1й"), # Rather safe as -ыы never occurs at the end of words.
("[ый]у", "ю"),
("[ый]а", "я"),
("гаян", "гайан"), # E.g. Гайана
("(^| )ые", "\\1е"), # Only needs to activate at the beginning of words. -ые is actually quite a common grammatical ending.
("ыети", "иети"), # Workaround for эти/йети problem as both would go to ети otherwise.
("йети", "иети"),
("йе", "е"),
("иети", "йети"),
# Corrects spelling of words like Йемен and Йеллоунайф, and others with йе.
("егер", "йегер"), # Name.
("ейтелес", "йейтелес"), # Name.
("ейтс", "йейтс"), # Name.
("ейттелес", "йейттелес"), # Name.
("еллоу", "йеллоу"),
("емен", "йемен"),
("(^| )ен", "\\1йен"), # Alone, as a name.
("енни", "йенни"), # Name.
("енс", "йенс"), # Name.
("еспер", "йеспер"), # Name.
("есс", "йесс"), # As part of a name.
("[ый]о", "ё"),
# Corrects spelling of words when э should be used instead of е.
("(^| )ето", "\\1это"), # Words like лето mess this up, so it's only changed at the beginning.
("(^| )ети", "\\1эти"), # Same thing here.
("едмонтон", "эдмонтон"), # Geographical name.
("ейнштейн", "эйнштейн"), # Name.
("еква", "эква"), # e.g. экватор
("експ", "эксп"), # e.g. эксперт
("(^| )екст", "\\1экст"), # e.g. экстремизм
("екзам", "экзам"), # e.g. экзамен
("елаёпласт", "элайопласт"),
("електр", "электр"), # e.g. электричество
("елемент", "элемент"),
("енерг", "энерг"), # E.g. энергия
("ентроп", "энтроп"), # E.g. энтропия
("епидеми", "эпидеми"), # E.g. эпидемия
("ерик", "эрик"), # Name.
("аеро", "аэро"), # Prefix.
("естони", "эстони"), # E.g. Эстония
("ефиопи", "эфиопи"), # E.g. Эфиопия
("етаж", "этаж"),
("економи", "экономи"), # E.g. экономия
("екзо", "экзо"),
("ендо", "эндо"),
("(^| )ерби", "\\1эрби"), # e.g. эрбий.
("еритр", "эритр"), # e.g. Эритрея
("етимологи", "этимологи"), # e.g. этимология
("едуард", "эдуард"), # Name.
("емили", "эмили"), # Name, e.g. Эмлиля
("реп", "рэп"),
# э will never appear after ь/ъ. The correct letter is "e".
("([ьъ])э", "\\1e"),
# Corrects spelling of words like район, майор, and cases with йо.
("аёва", "айова"), # Geographical name.
("баёнет", "байонет"),
("ваёминг", "вайоминг"), # Geographical name.
("ваёл", "вайол"), # E.g. вайола
("заён", "зайон"),
("коёт", "койот"),
("маёнез", "майонез"),
("маёр", "майор"),
("маётта", "майотта"), # Geographical name.
("огаё", "огайо"), # Geographical name.
("ораён", "орайон"),
("паёл", "пайол"),
("раён", "район"),
("тоёта", "тойота"),
# More instances of -йо- in a word here
("(^| )ёг", "\\1йог"), # At the beginning of words, e.g. йогa, йогурт. The only exceptions are minor geographical names.
("(^| )ёд", "\\1йод"), # As a word, not as a part of a word. мёд is an actual word, for example.
("ёжеф", "йожеф"), # Name.
("ёзеф", "йозеф"), # Name.
("ёрг", "йорг"), # Part of some common names. Some words start with ёрг, but they are all minor geographical names.
("ёрдан", "йордан"), # Name.
("(^| )ёрк", "\\1йорк"), # Alone, e.g. Нью Йорк. Used in a lot of words, e.g. шестёрка.
("ёсеф", "йосеф"), # Name.
("ёсиф", "йосиф"), # Name.
("ёта", "йота"), # Name of the Greek letter iota. Also common in words.
("ёун", "йоун"), # Name.
("ёхан", "йохан"), # Name.
("ёшкар", "йошкар"), # Part of a name of a city in Russia, Йошкар-Ола.
("ёшуа", "йошуа"), # Name.
("ёэ", "йоэ"), # Probably part of a name, since ёэ can never occur.
("([еo])во( |$)", "\\1го\\2"), # Only needs to activate at the end of words.
]
for find, replace in corrections:
cyrillic_string = re.sub(find, replace, cyrillic_string)
# TODO: Check if any i/j conflicts need to be resolved
# I know that a common one is moi, which can go either way if there are no j's...
print("\"{}\" would be transliterated as:\n{}".format(latin_string, cyrillic_string))
| {
"repo_name": "TheHockeyist/russian-untransliterator",
"path": "code.py",
"copies": "1",
"size": "6920",
"license": "mit",
"hash": -6374143347282309000,
"line_mean": 34.8658536585,
"line_max": 264,
"alpha_frac": 0.5414824889,
"autogenerated": false,
"ratio": 2.171280915466962,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3212763404366962,
"avg_score": null,
"num_lines": null
} |
"""alpha URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import RedirectView
import webapp.urls
urlpatterns = [
url(r'^$', RedirectView.as_view(url='webapp/')),
url(r'^admin/', admin.site.urls),
url(r'^webapp/', include(webapp.urls)),
url(r'^i18n/', include('django.conf.urls.i18n')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG: # pragma: nocover
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| {
"repo_name": "abertal/alpha",
"path": "alpha/urls.py",
"copies": "2",
"size": "1290",
"license": "bsd-3-clause",
"hash": -2646009800374124500,
"line_mean": 35.8571428571,
"line_max": 79,
"alpha_frac": 0.6992248062,
"autogenerated": false,
"ratio": 3.505434782608696,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 35
} |
""" ALPHA VERSION
As of currently, this version of the calculator does not address expressions such as: "1 + --1" or "1 + -(-1)" properly.
This will be addressed in the next version of the calculator
"""
import operator
SUPPORTED_OPERATORS = ( \
\
{'+': operator.add, \
'-': operator.sub}, \
\
{'/': operator.div, \
'*': operator.mul, \
'mod': operator.mod}, \
\
{'^': operator.pow }) \
def IsTokenOperator(token):
for opset in SUPPORTED_OPERATORS:
if token in opset:
return True
return False
def GetTokenOperatorFunc(token):
for opset in SUPPORTED_OPERATORS:
if token in opset:
return opset[token]
raise ValueError("token not a supported operator")
def ComparePrecedence(operator1, operator2):
p1 = -1
p2 = -1
i = 0
for opset in SUPPORTED_OPERATORS:
if operator1 in opset:
p1 = i
if operator2 in opset:
p2 = i
if p1 > -1 and p2 > -1 and p1 == p2:
return 0
i += 1
if p1 > p2:
return 1
elif p1 < p2:
return -1
return 0
def TokenizeExp(exp):
explen = len(exp)
if explen < 1:
return []
tokens = []
token = [exp[0]]
i = 1
while i < explen:
if ((exp[i].isdigit() and exp[i - 1].isdigit())
or (exp[i].isalpha() and exp[i - 1].isalpha())
or (exp[i].isdigit() and exp[i - 1].isalpha())
or (exp[i].isalpha() and exp[i - 1].isdigit())
or (exp[i].isdigit() and exp[i - 1] == '.')
or (exp[i] == '.' and exp[i - 1].isdigit())):
token.append(exp[i])
else:
if len(token) > 0:
tokens.append("".join(token))
if not exp[i].isspace():
token = [exp[i]]
else:
token = []
if i == explen - 1 and len(token) > 0:
tokens.append("".join(token))
break
i += 1
return tokens
def InfixToRPN(exp):
"""uses Dijkstra's "Shunting Yard" Algorithm"""
outputQueue = []
operatorStack = []
tokens = TokenizeExp(exp)
for token in tokens:
try:
# if token is a number
num = float(token)
outputQueue.append(num)
except ValueError:
# token not a number
if IsTokenOperator(token):
# if operator
while (len(operatorStack)
and ComparePrecedence(token, operatorStack[-1]) <= 0):
operator = operatorStack.pop()
outputQueue.append(operator)
operatorStack.append(token)
elif token == '(':
operatorStack.append(token)
elif token == ')':
try:
while operatorStack[-1] != '(':
operator = operatorStack.pop()
outputQueue.append(operator)
operatorStack.pop()
except IndexError:
raise ValueError("misplaced parentheses in the expression")
else:
raise ValueError("expression in a wrong format")
while len(operatorStack) > 0:
operator = operatorStack.pop()
if operator == '(':
raise ValueError("misplaced parenthesis in the expression")
outputQueue.append(operator)
return outputQueue
def ParseRPN(exp):
operandsStack = []
tokens = TokenizeExp(exp)
for token in tokens:
try:
# if token is a number
value = float(token)
operandsStack.append(value)
except ValueError:
if IsTokenOperator(token):
operator = GetTokenOperatorFunc(token)
if len(operandsStack) > 1:
operand2 = operandsStack.pop()
operand1 = operandsStack.pop()
value = operator(operand1, operand2)
else:
value = operator(0, operandsStack.pop())
operandsStack.append(value)
else:
raise ValueError("expression in a wrong format")
return operandsStack.pop()
def ParseInfix(exp):
rpnOutputQueue = InfixToRPN(exp)
return ParseRPN(" ".join([str(token) for token in rpnOutputQueue]))
while True:
try:
exp = raw_input(">>> ")
ret = ParseInfix(exp)
print ret
except ValueError:
print "error"
| {
"repo_name": "CptDemocracy/Python",
"path": "Misc/infixCalc.py",
"copies": "1",
"size": "4658",
"license": "mit",
"hash": -5174823535974456000,
"line_mean": 29.4444444444,
"line_max": 120,
"alpha_frac": 0.5010734221,
"autogenerated": false,
"ratio": 4.253881278538813,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5254954700638813,
"avg_score": null,
"num_lines": null
} |
# Alright, this is how we regression test our little fake processor
# We startup a new (blank slate each time)
# Run a couple of commands, then make sure the output makes sense
def over18Bit(number):
if number > 2**18:
raise RuntimeError("Failure to maintain word size")
from bitUtils import *
def testConversions():
# Test bit array conversion functions
print "Testing Bit Conversions"
i = 0
for i in xrange(2**18):
assert i == baToInt(intToBA(i, 32)), "wtf!"
i += 1
print "Success"
# Arithmatic operations can be run at any time,
# with any stack state, so lets generate all the possible combinations
import itertools
import FA18A_functions
# Woah this line is hairy. Make all possible arithmatic/logic op combos
allArithmaticCombos = itertools.combinations_with_replacement(FA18A_functions.ARLMOpList, 4)
allValidArithmaticCombos = filter(lambda ops: ops[3].minBitCount() == 3, allArithmaticCombos)
allValidArithmaticCombos += list(itertools.combinations_with_replacement(FA18A_functions.ARLMOpList, 3))
allValidArithmaticCombos += list(itertools.combinations_with_replacement(FA18A_functions.ARLMOpList, 2))
allValidArithmaticCombos += list(itertools.combinations_with_replacement(FA18A_functions.ARLMOpList, 1))
# import random
# random.shuffle(allArithmaticCombos)
print "Testing All arithmatic combos (%i):" % len(allValidArithmaticCombos)
for n, opList in enumerate(allValidArithmaticCombos[::-1]):
# For each one, it should serialize and deserizlize back to the correct bitstream
packedBits = FA18A_functions.packInstructionsToBits(opList)
newOpList = FA18A_functions.unpackInstructionsFromBits(packedBits)
if len(opList) == 2:
print "packed to %i bits:" % len(packedBits), packedBits
print n, [op.code for op in opList]
print " ".join([str(op) for op in opList])
print opList
print newOpList
assert all( [op1 == op2 for op1, op2 in zip(opList, newOpList)] ), "Op serializtion failure!"
| {
"repo_name": "meawoppl/GA144Tools",
"path": "unitTests.py",
"copies": "1",
"size": "2024",
"license": "mit",
"hash": 6251771849126285000,
"line_mean": 34.5087719298,
"line_max": 104,
"alpha_frac": 0.7341897233,
"autogenerated": false,
"ratio": 3.4074074074074074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9610529297653756,
"avg_score": 0.006213566610730338,
"num_lines": 57
} |
# als_data_preprocessor.py
#
# Standalone Python/Spark program to perform data pre-processing..
# Reads Ratings data and meta data to combine where necessary
# and encode labels to a form fit for processing.
#
#
# Usage: spark-submit data_preprocessor.py <inputdatafile>
# Example usage: spark-submit data_preprocessor.py ratings.csv
#
#
import sys
import pandas as pd
import numpy as np
import csv
import gzip
from sklearn import preprocessing
from pyspark import SparkContext, SparkConf, SQLContext
conf = (SparkConf().set("spark.driver.maxResultSize", "8g"))
#to read data from gzip files
def parse(path):
g = gzip.open(path, 'rb')
for l in g:
yield eval(l)
#make a dataframe
def getDF(path):
i = 0
df = {}
for d in parse(path):
df[i] = d
i += 1
return pd.DataFrame.from_dict(df, orient='index')
names = [
'user_id',
'product_id',
'rating',
'timestamp',
]
def labelEncoder(in_csv):
"This function converts categorical data to numerical values in the supplied dataframe"
#using pandas read the csv and append column names from names
# input_data = pd.read_csv(in_csv, sep=",", names=names)
input_data = pd.read_csv(in_csv, sep=",")
#print input_data.head()
#
user_id_en = preprocessing.LabelEncoder()
product_id_en = preprocessing.LabelEncoder()
user_id_en.fit(input_data.user_id)
product_id_en.fit(input_data.product_id)
encoded_df = input_data
encoded_df.user_id = user_id_en.transform(input_data.user_id)
encoded_df.product_id = product_id_en.transform(input_data.product_id)
#encoded_df.to_csv('encoded_data_w_index_headers.csv', sep='::',index = False)
encoded_df.to_csv('ratings_als.csv', sep='#', index = False, header=None)
#return encoded_df
#return input_data
if __name__ == "__main__":
# if len(sys.argv) !=3:
# print >> sys.stderr, "Usage: data_preprocessor <ratings_file> <metadata_gzip_file>"
# exit(-1)
sc = SparkContext(appName="DataProcessor", conf=conf)
sqlContext = SQLContext(sc)
# ## Use this if the file being read is a JSON that is gzipped.
# metadata_df = getDF(sys.argv[1])
# metadata_df.rename(columns={'asin': 'product_id'}, inplace=True)
# metadata_df.drop('description', axis=1, inplace=True)
# metadata_df.drop('price', axis=1, inplace=True)
# metadata_df.drop('salesRank', axis=1, inplace=True)
# metadata_df.drop('imUrl', axis=1, inplace=True)
# metadata_df.drop('brand', axis=1, inplace=True)
# metadata_df.drop('related', axis=1, inplace=True)
# #metadata_df.to_csv('metadata.csv', sep=',')
# metadata_df.to_csv('temp_metadata.csv', sep=',', index = False)
labelEncoder(sys.argv[1])
#labelEncoder(temp_metadata.csv)
# input_df.drop('timestamp', axis=1, inplace=True)
# input_df.to_csv('input.csv', sep=',', index = False)
sc.stop()
| {
"repo_name": "shreyas15/Product-Recommender-Engine",
"path": "als_data_preprocessor.py",
"copies": "1",
"size": "2900",
"license": "mit",
"hash": -6342272674900172000,
"line_mean": 29.2083333333,
"line_max": 93,
"alpha_frac": 0.6627586207,
"autogenerated": false,
"ratio": 3.2044198895027622,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4367178510202762,
"avg_score": null,
"num_lines": null
} |
"""A L shape attached with a joint and constrained to not tip over.
"""
__version__ = "$Id:$"
__docformat__ = "reStructuredText"
import random, sys
import pygame
from pygame.locals import *
from pygame.color import *
import pymunk as pm
def to_pygame(p):
"""Small hack to convert pymunk to pygame coordinates"""
return int(p.x), int(-p.y+600)
def main():
pygame.init()
screen = pygame.display.set_mode((600, 600))
pygame.display.set_caption("Joints. Just wait and the L will tip over")
clock = pygame.time.Clock()
running = True
### Physics stuff
space = pm.Space()
space.gravity = 0.0, -900.0
## Balls
balls = []
### static stuff
rot_center_body = pm.Body()
rot_center_body.position = (300,300)
### To hold back the L
rot_limit_body = pm.Body()
rot_limit_body.position = (200,300)
### The moving L shape
l1 = [(-150, 0), (255.0, 0.0)]
l2 = [(-150.0, 0), (-150.0, 50.0)]
body = pm.Body(10,10000)
body.position = (300,300)
lines = [pm.Segment(body, l1[0], l1[1], 5.0)
,pm.Segment(body, l2[0], l2[1], 5.0)
]
space.add(body)
space.add(lines)
### The L rotates around this
rot_center = pm.PinJoint(body, rot_center_body, (0,0), (0,0))
### And is constrained by this
joint_limit = 25
rot_limit = pm.SlideJoint(body, rot_limit_body, (-100,0), (0,0), 0, joint_limit)
space.add(rot_center, rot_limit)
ticks_to_next_ball = 10
while running:
for event in pygame.event.get():
if event.type == QUIT:
running = False
elif event.type == KEYDOWN and event.key == K_ESCAPE:
running = False
elif event.type == KEYDOWN and event.key == K_p:
pygame.image.save(screen, "slide_and_pinjoint.png")
ticks_to_next_ball -= 1
if ticks_to_next_ball <= 0:
ticks_to_next_ball = 25
mass = 1
radius = 14
inertia = pm.moment_for_circle(mass, 0, radius, (0,0))
body = pm.Body(mass, inertia)
x = random.randint(120,380)
body.position = x, 550
shape = pm.Circle(body, radius, (0,0))
space.add(body, shape)
balls.append(shape)
### Clear screen
screen.fill(THECOLORS["white"])
### Draw stuff
balls_to_remove = []
for ball in balls:
if ball.body.position.y < 150: balls_to_remove.append(ball)
p = to_pygame(ball.body.position)
pygame.draw.circle(screen, THECOLORS["blue"], p, int(ball.radius), 2)
for ball in balls_to_remove:
space.remove(ball, ball.body)
balls.remove(ball)
for line in lines:
body = line.body
pv1 = body.position + line.a.rotated(body.angle)
pv2 = body.position + line.b.rotated(body.angle)
p1 = to_pygame(pv1)
p2 = to_pygame(pv2)
pygame.draw.lines(screen, THECOLORS["lightgray"], False, [p1,p2], 4)
### The rotation center of the L shape
pygame.draw.circle(screen, THECOLORS["red"], to_pygame(rot_center_body.position), 5)
### The limits where it can move.
pygame.draw.circle(screen, THECOLORS["green"], to_pygame(rot_limit_body.position), joint_limit, 2)
### Update physics
dt = 1.0/50.0/10.0
for x in range(10):
space.step(dt)
### Flip screen
pygame.display.flip()
clock.tick(50)
if __name__ == '__main__':
sys.exit(main())
| {
"repo_name": "sneharavi12/DeepLearningFinals",
"path": "pymunk-pymunk-4.0.0/examples/slide_and_pinjoint.py",
"copies": "5",
"size": "3744",
"license": "mit",
"hash": 1364345080804963600,
"line_mean": 28.7142857143,
"line_max": 106,
"alpha_frac": 0.5360576923,
"autogenerated": false,
"ratio": 3.4285714285714284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6464629120871429,
"avg_score": null,
"num_lines": null
} |
"""A L shape attached with a joint and constrained to not tip over.
This example is also used in the Get Started Tutorial.
"""
__docformat__ = "reStructuredText"
import random
import sys
import pygame
import pymunk
import pymunk.pygame_util
random.seed(1)
def add_ball(space):
"""Add a ball to the given space at a random position"""
mass = 1
radius = 14
inertia = pymunk.moment_for_circle(mass, 0, radius, (0, 0))
body = pymunk.Body(mass, inertia)
x = random.randint(120, 380)
body.position = x, 50
shape = pymunk.Circle(body, radius, (0, 0))
shape.friction = 1
space.add(body, shape)
return shape
def add_L(space):
"""Add a inverted L shape with two joints"""
rotation_center_body = pymunk.Body(body_type=pymunk.Body.STATIC)
rotation_center_body.position = (300, 300)
rotation_limit_body = pymunk.Body(body_type=pymunk.Body.STATIC)
rotation_limit_body.position = (200, 300)
body = pymunk.Body(10, 10000)
body.position = (300, 300)
l1 = pymunk.Segment(body, (-145, 0), (255.0, 0.0), 1)
l2 = pymunk.Segment(body, (-145, 0), (-145.0, -25.0), 1)
l1.friction = 1
l2.friction = 1
rotation_center_joint = pymunk.PinJoint(body, rotation_center_body, (0, 0), (0, 0))
joint_limit = 25
rotation_limit_joint = pymunk.SlideJoint(
body, rotation_limit_body, (-100, 0), (0, 0), 0, joint_limit
)
space.add(l1, l2, body, rotation_center_joint, rotation_limit_joint)
return l1, l2
def main():
pygame.init()
screen = pygame.display.set_mode((600, 600))
pygame.display.set_caption("Joints. Just wait and the L will tip over")
clock = pygame.time.Clock()
space = pymunk.Space()
space.gravity = (0.0, 900.0)
lines = add_L(space)
balls = []
draw_options = pymunk.pygame_util.DrawOptions(screen)
ticks_to_next_ball = 10
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(0)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
sys.exit(0)
elif event.type == pygame.KEYDOWN and event.key == pygame.K_p:
pygame.image.save(screen, "slide_and_pinjoint.png")
ticks_to_next_ball -= 1
if ticks_to_next_ball <= 0:
ticks_to_next_ball = 25
ball_shape = add_ball(space)
balls.append(ball_shape)
balls_to_remove = []
for ball in balls:
if ball.body.position.y > 450:
balls_to_remove.append(ball)
for ball in balls_to_remove:
space.remove(ball, ball.body)
balls.remove(ball)
space.step(1 / 50.0)
screen.fill((255, 255, 255))
space.debug_draw(draw_options)
pygame.display.flip()
clock.tick(50)
if __name__ == "__main__":
main()
| {
"repo_name": "viblo/pymunk",
"path": "examples/slide_and_pinjoint.py",
"copies": "1",
"size": "2895",
"license": "mit",
"hash": 2024984011289739000,
"line_mean": 26.5714285714,
"line_max": 87,
"alpha_frac": 0.6013816926,
"autogenerated": false,
"ratio": 3.1297297297297297,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9229719956652549,
"avg_score": 0.0002782931354359926,
"num_lines": 105
} |
"""A L shape attached with a joint and constrained to not tip over.
"""
__version__ = "$Id:$"
__docformat__ = "reStructuredText"
import random
import pygame
from pygame.locals import *
from pygame.color import *
import pymunk as pm
def to_pygame(p):
"""Small hack to convert pymunk to pygame coordinates"""
return int(p.x), int(-p.y+600)
def main():
pygame.init()
screen = pygame.display.set_mode((600, 600))
pygame.display.set_caption("Joints. Just wait and the L will tip over")
clock = pygame.time.Clock()
running = True
### Physics stuff
space = pm.Space()
space.gravity = 0.0, -900.0
## Balls
balls = []
### static stuff
rot_center_body = pm.Body()
rot_center_body.position = (300,300)
### To hold back the L
rot_limit_body = pm.Body()
rot_limit_body.position = (200,300)
### The moving L shape
l1 = [(-150, 0), (255.0, 0.0)]
l2 = [(-150.0, 0), (-150.0, 50.0)]
body = pm.Body(10,10000)
body.position = (300,300)
lines = [pm.Segment(body, l1[0], l1[1], 5.0)
,pm.Segment(body, l2[0], l2[1], 5.0)
]
space.add(body)
space.add(lines)
### The L rotates around this
rot_center = pm.PinJoint(body, rot_center_body, (0,0), (0,0))
### And is constrained by this
joint_limit = 25
rot_limit = pm.SlideJoint(body, rot_limit_body, (-100,0), (0,0), 0, joint_limit)
space.add(rot_center, rot_limit)
ticks_to_next_ball = 10
while running:
for event in pygame.event.get():
if event.type == QUIT:
running = False
elif event.type == KEYDOWN and event.key == K_ESCAPE:
running = False
elif event.type == KEYDOWN and event.key == K_p:
pygame.image.save(screen, "slide_and_pinjoint.png")
ticks_to_next_ball -= 1
if ticks_to_next_ball <= 0:
ticks_to_next_ball = 25
mass = 1
radius = 14
inertia = pm.moment_for_circle(mass, 0, radius, (0,0))
body = pm.Body(mass, inertia)
x = random.randint(120,380)
body.position = x, 550
shape = pm.Circle(body, radius, (0,0))
space.add(body, shape)
balls.append(shape)
### Clear screen
screen.fill(THECOLORS["white"])
### Draw stuff
balls_to_remove = []
for ball in balls:
if ball.body.position.y < 150: balls_to_remove.append(ball)
p = to_pygame(ball.body.position)
pygame.draw.circle(screen, THECOLORS["blue"], p, int(ball.radius), 2)
for ball in balls_to_remove:
space.remove(ball, ball.body)
balls.remove(ball)
for line in lines:
body = line.body
pv1 = body.position + line.a.rotated(body.angle)
pv2 = body.position + line.b.rotated(body.angle)
p1 = to_pygame(pv1)
p2 = to_pygame(pv2)
pygame.draw.lines(screen, THECOLORS["lightgray"], False, [p1,p2], 4)
### The rotation center of the L shape
pygame.draw.circle(screen, THECOLORS["red"], to_pygame(rot_center_body.position), 5)
### The limits where it can move.
pygame.draw.circle(screen, THECOLORS["green"], to_pygame(rot_limit_body.position), joint_limit, 2)
### Update physics
dt = 1.0/50.0/10.0
for x in range(10):
space.step(dt)
### Flip screen
pygame.display.flip()
clock.tick(50)
if __name__ == '__main__':
sys.exit(main())
| {
"repo_name": "cfobel/python___pymunk",
"path": "examples/slide_and_pinjoint.py",
"copies": "1",
"size": "3865",
"license": "mit",
"hash": -4147716911107770400,
"line_mean": 28.6746031746,
"line_max": 106,
"alpha_frac": 0.5184993532,
"autogenerated": false,
"ratio": 3.5200364298724955,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.940068439966235,
"avg_score": 0.027570276682029084,
"num_lines": 126
} |
# Also from the bipartite
import datetime
from tulip import tlp
# start the clock
start_script = datetime.datetime.now()
# The updateVisualization(centerViews = True) function can be called
# during script execution to update the opened views
# The pauseScript() function can be called to pause the script execution.
# To resume the script execution, you will have to click on the "Run script " button.
# The runGraphScript(scriptFile, graph) function can be called to launch
# another edited script on a tlp.Graph object.
# The scriptFile parameter defines the script name to call (in the form [a-zA-Z0-9_]+.py)
# The main(graph) function must be defined
# to run the script on the current graph
def main(graph):
barPower = graph.getDoubleProperty("barPower")
viewLayout = graph.getLayoutProperty("viewLayout")
TentativeSIC = graph.getStringProperty("TentativeSIC")
acronym = graph.getStringProperty("acronym")
activityType = graph.getStringProperty("activityType")
birthDate = graph.getIntegerProperty("birthDate")
call = graph.getStringProperty("call")
city = graph.getStringProperty("city")
commDate = graph.getDoubleProperty("commDate")
country = graph.getStringProperty("country")
ecContribution = graph.getDoubleProperty("ecContribution")
ecMaxContribution = graph.getDoubleProperty("ecMaxContribution")
endDate = graph.getStringProperty("endDate")
endOfParticipation = graph.getBooleanProperty("endOfParticipation")
fundingScheme = graph.getStringProperty("fundingScheme")
intimacy = graph.getDoubleProperty("intimacy")
manager = graph.getBooleanProperty("manager")
myMoney = graph.getDoubleProperty("myMoney")
name = graph.getStringProperty("name")
numPartners = graph.getIntegerProperty("numPartners")
numProjects = graph.getIntegerProperty("numProjects")
objective = graph.getStringProperty("objective")
orgId = graph.getStringProperty("orgId")
organizationUrl = graph.getStringProperty("organizationUrl")
postCode = graph.getStringProperty("postCode")
programme = graph.getStringProperty("programme")
projectNode = graph.getBooleanProperty("projectNode")
projectUrl = graph.getStringProperty("projectUrl")
rcn = graph.getStringProperty("rcn")
role = graph.getStringProperty("role")
shortName = graph.getStringProperty("shortName")
startDate = graph.getStringProperty("startDate")
status = graph.getStringProperty("status")
street = graph.getStringProperty("street")
topics = graph.getStringProperty("topics")
totalCost = graph.getDoubleProperty("totalCost")
viewBorderColor = graph.getColorProperty("viewBorderColor")
viewBorderWidth = graph.getDoubleProperty("viewBorderWidth")
viewColor = graph.getColorProperty("viewColor")
viewFont = graph.getStringProperty("viewFont")
viewFontAwesomeIcon = graph.getStringProperty("viewFontAwesomeIcon")
viewFontSize = graph.getIntegerProperty("viewFontSize")
viewIcon = graph.getStringProperty("viewIcon")
viewLabel = graph.getStringProperty("viewLabel")
viewLabelBorderColor = graph.getColorProperty("viewLabelBorderColor")
viewLabelBorderWidth = graph.getDoubleProperty("viewLabelBorderWidth")
viewLabelColor = graph.getColorProperty("viewLabelColor")
viewLabelPosition = graph.getIntegerProperty("viewLabelPosition")
viewMetric = graph.getDoubleProperty("viewMetric")
viewRotation = graph.getDoubleProperty("viewRotation")
viewSelection = graph.getBooleanProperty("viewSelection")
viewShape = graph.getIntegerProperty("viewShape")
viewSize = graph.getSizeProperty("viewSize")
viewSrcAnchorShape = graph.getIntegerProperty("viewSrcAnchorShape")
viewSrcAnchorSize = graph.getSizeProperty("viewSrcAnchorSize")
viewTexture = graph.getStringProperty("viewTexture")
viewTgtAnchorShape = graph.getIntegerProperty("viewTgtAnchorShape")
viewTgtAnchorSize = graph.getSizeProperty("viewTgtAnchorSize")
# initialize the weighted barPower property. This is a node property
wBarPower = graph.getDoubleProperty('wBarPower')
for n in graph.getNodes():
if projectNode[n] == False:
# this time I start from organisations.
denominator = 0
numerator = 0
for e in graph.getOutEdges(n):
denominator += ecMaxContribution[e]
numerator += barPower[e] * ecMaxContribution[e]
wBarPower[n] = numerator/denominator
end_script = datetime.datetime.now()
print ('Runtime: ' + str (end_script - start_script))
| {
"repo_name": "spaghetti-open-data/ODFest2017-horizon2020-network",
"path": "H2020_Code_2017/compute_weighted_bar_power.py",
"copies": "1",
"size": "4440",
"license": "mit",
"hash": 1529456325798259200,
"line_mean": 44.306122449,
"line_max": 89,
"alpha_frac": 0.7725225225,
"autogenerated": false,
"ratio": 3.9713774597495526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5243899982249552,
"avg_score": null,
"num_lines": null
} |
#also known as the why you don't do inheritance cos it is evil like a bad fantasy villian from princess bride.
class Parent(object):
def implict(self):
print 'parent implict()'
def override(self):
print 'parent crash override'
def altered(self):
print 'parent altered'
class Child(Parent):
def __init__(self, stuff):
self.stuff = stuff
super(Child, self).__init__()
def override(self):
print 'child crash override'
def altered(self):
print 'child altered start'
super(Child, self).altered()
print 'child altered end'
class Other(object):
def override(self):
print 'other crash override'
def implict(self):
print 'other implict'
def altered(self):
print 'other altered'
class NovaChild(object):
def __init__(self):
self.other = Other()
def implict(self):
self.other.implict()
def override(self):
print 'novachild crash override'
def altered(self):
print 'novachild start'
self.other.altered()
print 'novachild end'
parent = Parent()
child = Child("stuff")
parent.implict()
child.implict()
parent.override()
child.override()
parent.altered()
child.altered()
novachild = NovaChild()
novachild.implict()
novachild.override()
novachild.altered() | {
"repo_name": "vanonselenp/Learning",
"path": "Python/LPTHW/ex44.py",
"copies": "1",
"size": "1208",
"license": "mit",
"hash": -7639811511596243000,
"line_mean": 17.0447761194,
"line_max": 110,
"alpha_frac": 0.7086092715,
"autogenerated": false,
"ratio": 3.073791348600509,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4282400620100508,
"avg_score": null,
"num_lines": null
} |
# also using qt designer to get quick visual preview of how the window should look like. Please install qt designer to open the .ui file. It CAN be converted to python code, but its like a translated-from-c++ version and very inelegant. Trying to define the functions individually for easier debugging/edits.
# WMS's attempt at starting the graphical user interface effort for the benefit of future members of the Nanomaterials lab ("if only everything can be done with a single click of a single button")
# from PyQt4 import QtCore, QtGui
def stageInit(self):
self.home_position = QtWidgets.QPushButton("Move to absolute home (0,0)")
self.set_origin = QtWidgets.QPushButton("Set as (0,0)")
self.velocity = QtWidgets.QLineEdit()
self.velocity.setValidator(QtGui.QIntValidator(0,1000))
self.velocity.setFont(QtGui.QFont("Arial",20))
self.step_size = QtWidgets.QLineEdit()
self.step_size.setValidator(QtGui.QIntValidator(0.5,1000))
self.step_size.setFont(QtGui.QFont("Arial",20))
# need to link to stagecontrol to read position of controllers
self.lcdx = QtWidgets.QLCDNumber()
self.lcdy = QtWidgets.QLCDNumber()
self.stage_layout = QtWidgets.QGridLayout()
self.stage_layout.addWidget(self.home_position)
self.stage_layout.addWidget(self.set_origin)
self.stage_layout.addWidget(self.velocity)
self.stage_layout.addWidget(self.step_size)
self.stage_layout.addWidget(self.lcdx)
self.stage_layout.addWidget(self.lcdy)
# self.stage_layout.addWidget(self.stage_layout)
self.stage_widget = QtWidgets.QWidget()
# MENU
file_menu.triggered[QtWidgets.QAction].connect(self.processtrigger)
def processtrigger(self,q):
print(q.text()+" is triggered") | {
"repo_name": "sunjerry019/photonLauncher",
"path": "micron/project_guimicro/_archive/archive.py",
"copies": "1",
"size": "1746",
"license": "apache-2.0",
"hash": 4562587701307450400,
"line_mean": 41.6097560976,
"line_max": 308,
"alpha_frac": 0.7422680412,
"autogenerated": false,
"ratio": 3.6074380165289255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4849706057728925,
"avg_score": null,
"num_lines": null
} |
class SplicingAnnotationData:
def ArrayType(self):
self._array_type = array_type
return self._array_type
def Probeset(self): return self._probeset
def setProbeset(self,probeset): self._probeset = probeset
def ExonID(self): return self._exonid
def setDisplayExonID(self,exonid): self._exonid = exonid
def GeneID(self): return self._geneid
def Symbol(self):
symbol = ''
if self.GeneID() in annotate_db:
y = annotate_db[self.GeneID()]
symbol = y.Symbol()
return symbol
def ExternalGeneID(self): return self._external_gene
def ProbesetType(self):
###e.g. Exon, junction, constitutive(gene)
return self._probeset_type
def GeneStructure(self): return self._block_structure
def SecondaryExonID(self): return self._block_exon_ids
def setSecondaryExonID(self,ids): self._block_exon_ids = ids
def setLocationData(self, chromosome, strand, probeset_start, probeset_stop):
self._chromosome = chromosome; self._strand = strand
self._start = probeset_start; self._stop = probeset_stop
def LocationSummary(self):
location = self.Chromosome()+':'+self.ProbeStart()+'-'+self.ProbeStop()+'('+self.Strand()+')'
return location
def Chromosome(self): return self._chromosome
def Strand(self): return self._strand
def ProbeStart(self): return self._start
def ProbeStop(self): return self._stop
def ProbesetClass(self):
###e.g. core, extendended, full
return self._probest_class
def ExternalExonIDs(self): return self._external_exonids
def ExternalExonIDList(self):
external_exonid_list = string.split(self.ExternalExonIDs(),'|')
return external_exonid_list
def Constitutive(self): return self._constitutive_status
def setTranscriptCluster(self,secondary_geneid): self._secondary_geneid = secondary_geneid
def setNovelExon(self,novel_exon): self._novel_exon = novel_exon
def NovelExon(self): return self._novel_exon
def SecondaryGeneID(self): return self._secondary_geneid
def setExonRegionID(self,exon_region): self._exon_region = exon_region
def ExonRegionID(self): return self._exon_region
def SplicingEvent(self):
splice_event = self._splicing_event
if len(splice_event)!=0:
if splice_event[0] == '|': splice_event = splice_event[1:]
return splice_event
def SplicingCall(self): return self._splicing_call
def SpliceJunctions(self): return self._splice_junctions
def Delete(self): del self
def Report(self):
output = self.ArrayType() +'|'+ self.ExonID() +'|'+ self.ExternalGeneID()
return output
def __repr__(self): return self.Report()
class AltMouseData(SplicingAnnotationData):
def __init__(self,affygene,exons,ensembl,block_exon_ids,block_structure,probe_type_call):
self._geneid = affygene; self._external_gene = ensembl; self._exonid = exons; self._secondary_geneid = ensembl
self._probeset_type = probe_type_call; self._block_structure = block_structure; self._block_exon_ids = block_exon_ids
self._external_exonids = 'NA';
self._constitutive_status = 'no'
self._splicing_event = ''
self._secondary_geneid = 'NA'
self._exon_region = ''
if self._probeset_type == 'gene': self._constitutive_status = 'yes'
else: self._constitutive_status = 'no'
class AffyExonSTData(SplicingAnnotationData):
def __init__(self,ensembl_gene_id,exon_id,ens_exon_ids, constitutive_call_probeset, exon_region, splicing_event, splice_junctions, splicing_call):
self._geneid = ensembl_gene_id; self._external_gene = ensembl_gene_id; self._exonid = exon_id
self._constitutive_status = constitutive_call_probeset#; self._start = probeset_start; self._stop = probeset_stop
self._external_exonids = ens_exon_ids; #self._secondary_geneid = transcript_cluster_id#; self._chromosome = chromosome; self._strand = strand
self._exon_region=exon_region; self._splicing_event=splicing_event; self._splice_junctions=splice_junctions; self._splicing_call = splicing_call
if self._exonid[0] == 'U': self._probeset_type = 'UTR'
elif self._exonid[0] == 'E': self._probeset_type = 'exonic'
elif self._exonid[0] == 'I': self._probeset_type = 'intronic'
class AffyExonSTDataAbbreviated(SplicingAnnotationData):
def __init__(self,ensembl_gene_id,exon_id,splicing_call):
self._geneid = ensembl_gene_id; self._exonid = exon_id; self._splicing_call = splicing_call
def importSplicingAnnotations(array_type,Species,probeset_type,avg_ss_for_all,root_dir):
global filter_probesets_by; filter_probesets_by = probeset_type
global species; species = Species; global avg_all_for_ss; avg_all_for_ss = avg_ss_for_all; global exon_db; exon_db={}
global summary_data_db; summary_data_db={}; global remove_intronic_junctions; remove_intronic_junctions = 'no'
if array_type == 'RNASeq':
probeset_annotations_file = root_dir+'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_junctions.txt'
else: probeset_annotations_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_probesets.txt'
filtered_arrayids={};filter_status='no'
constitutive_probeset_db,exon_db,genes_being_analyzed = importSplicingAnnotationDatabase(probeset_annotations_file,array_type,filtered_arrayids,filter_status)
return exon_db, constitutive_probeset_db
def importSplicingAnnotationDatabase(filename,array_type,filtered_arrayids,filter_status):
begin_time = time.time()
probesets_included_by_new_evidence = 0; export_exon_regions = 'yes'
if 'fake' in array_type: array_type = string.replace(array_type,'-fake',''); original_arraytype = 'RNASeq'
else: original_arraytype = array_type
if filter_status == 'no': global gene_transcript_cluster_db; gene_transcript_cluster_db={}; gene_transcript_cluster_db2={}; global last_exon_region_db; last_exon_region_db = {}
else: new_exon_db={}
fn=filepath(filename)
last_gene = ' '; last_exon_region = ''
constitutive_probeset_db = {}; constitutive_gene = {}
count = 0; x = 0; constitutive_original = {}
#if filter_status == 'yes': exon_db = {}
if array_type == 'AltMouse':
for line in open(fn,'rU').xreadlines():
probeset_data = cleanUpLine(line) #remove endline
probeset,affygene,exons,transcript_num,transcripts,probe_type_call,ensembl,block_exon_ids,block_structure,comparison_info = string.split(probeset_data,'\t')
###note: currently exclude comparison_info since not applicable for existing analyses
if x == 0: x = 1
else:
if exons[-1] == '|': exons = exons[0:-1]
if affygene[-1] == '|': affygene = affygene[0:-1]; constitutive_gene[affygene]=[]
if probe_type_call == 'gene': constitutive_call = 'yes' #looked through the probe annotations and the gene seems to be the most consistent constitutive feature
else: constitutive_call = 'no'
include_call,constitutive_call = ProbesetCalls(array_type,'',exons,constitutive_call,'')
if include_call == 'yes':
probe_data = AltMouseData(affygene,exons,ensembl,block_exon_ids,block_structure,probe_type_call) #this used to just have affygene,exon in the values (1/17/05)
exon_db[probeset] = probe_data
if filter_status == 'yes': new_exon_db[probeset] = probe_data
if constitutive_call == 'yes': constitutive_probeset_db[probeset] = affygene
genes_being_analyzed = constitutive_gene
else:
for line in open(fn,'rU').xreadlines():
probeset_data = cleanUpLine(line) #remove endline
if x == 0: x = 1
else:
try: probeset_id, exon_id, ensembl_gene_id, transcript_cluster_id, chromosome, strand, probeset_start, probeset_stop, affy_class, constitutive_call_probeset, external_exonid, ens_const_exons, exon_region, exon_region_start, exon_region_stop, splicing_event, splice_junctions = string.split(probeset_data,'\t')
except Exception: print probeset_data;force_error
if affy_class == 'free': affy_class = 'full' ### Don't know what the difference is
include_call,constitutive_call = ProbesetCalls(array_type,affy_class,splicing_event,constitutive_call_probeset,external_exonid)
#if 'ENSG00000163904:E11.5' in probeset_id: print probeset_data
#print array_type,affy_class,splicing_event,constitutive_call_probeset,external_exonid,constitutive_call,include_call;kill
if array_type == 'junction' and '.' not in exon_id: exon_id = string.replace(exon_id,'-','.'); exon_region = string.replace(exon_region,'-','.')
if ensembl_gene_id != last_gene: new_gene = 'yes'
else: new_gene = 'no'
if filter_status == 'no' and new_gene == 'yes':
if '.' in exon_id: ### Exclude junctions
if '-' not in last_exon_region and 'E' in last_exon_region: last_exon_region_db[last_gene] = last_exon_region
else: last_exon_region_db[last_gene] = last_exon_region
last_gene = ensembl_gene_id
if len(exon_region)>1: last_exon_region = exon_region ### some probeset not linked to an exon region
###Record the transcript clusters assoicated with each gene to annotate the results later on
if constitutive_call_probeset!=constitutive_call: probesets_included_by_new_evidence +=1#; print probeset_id,[splicing_event],[constitutive_call_probeset];kill
proceed = 'no'; as_call = 0
if array_type == 'RNASeq' or array_type == 'junction': include_call = 'yes' ### Constitutive expression is not needed
if remove_intronic_junctions == 'yes':
if 'E' not in exon_id: include_call = 'no' ### Remove junctions that only have splice-sites within an intron or UTR
if include_call == 'yes' or constitutive_call == 'yes':
#if proceed == 'yes':
as_call = EvidenceOfAltSplicing(splicing_event)
if filter_status == 'no':
probe_data = AffyExonSTDataAbbreviated(ensembl_gene_id, exon_id, as_call)
if array_type != 'RNASeq':
probe_data.setTranscriptCluster(transcript_cluster_id)
try:
if export_exon_regions == 'yes':
probe_data.setExonRegionID(exon_region)
except Exception: null=[]
else:
probe_data = AffyExonSTData(ensembl_gene_id, exon_id, external_exonid, constitutive_call, exon_region, splicing_event, splice_junctions, as_call)
probe_data.setLocationData(chromosome, strand, probeset_start, probeset_stop)
if array_type != 'RNASeq':
probe_data.setTranscriptCluster(transcript_cluster_id)
else:
probe_data.setNovelExon(affy_class)
if filter_status == 'yes':
try: ### saves memory
null = filtered_arrayids[probeset_id]
new_exon_db[probeset_id] = probe_data
except KeyError: null = []
else: exon_db[probeset_id] = probe_data
if constitutive_call == 'yes' and filter_status == 'no': ###only perform function when initially running
constitutive_probeset_db[probeset_id] = ensembl_gene_id
try: constitutive_gene[ensembl_gene_id].append(probeset_id)
except Exception: constitutive_gene[ensembl_gene_id] = [probeset_id]
###Only consider transcript clusters that make up the constitutive portion of the gene or that are alternatively regulated
if array_type != 'RNASeq':
try: gene_transcript_cluster_db[ensembl_gene_id].append(transcript_cluster_id)
except KeyError: gene_transcript_cluster_db[ensembl_gene_id] = [transcript_cluster_id]
if constitutive_call_probeset == 'yes' and filter_status == 'no': ###only perform function when initially running
try: constitutive_original[ensembl_gene_id].append(probeset_id)
except KeyError: constitutive_original[ensembl_gene_id] = [probeset_id]
if array_type != 'RNASeq':
try: gene_transcript_cluster_db2[ensembl_gene_id].append(transcript_cluster_id)
except KeyError: gene_transcript_cluster_db2[ensembl_gene_id] = [transcript_cluster_id]
###If no constitutive probesets for a gene as a result of additional filtering (removing all probesets associated with a splice event), add these back
original_probesets_add = 0; genes_being_analyzed = {}
for gene in constitutive_gene: genes_being_analyzed[gene]=[]
for gene in constitutive_original:
if gene not in constitutive_gene:
genes_being_analyzed[gene] = [gene]
constitutive_gene[gene]=[]
original_probesets_add +=1
gene_transcript_cluster_db[gene] = gene_transcript_cluster_db2[gene]
for probeset in constitutive_original[gene]: constitutive_probeset_db[probeset] = gene
#if array_type == 'junction' or array_type == 'RNASeq':
### Added the below in 1.16!!!
### If no constitutive probesets for a gene assigned, assign all gene probesets
for probeset in exon_db:
gene = exon_db[probeset].GeneID()
proceed = 'no'
exonid = exon_db[probeset].ExonID()
### Rather than add all probesets, still filter based on whether the probeset is in an annotated exon
if 'E' in exonid and 'I' not in exonid and '_' not in exonid: proceed = 'yes'
if proceed == 'yes':
if gene not in constitutive_gene:
constitutive_probeset_db[probeset] = gene
genes_being_analyzed[gene] = [gene]
### DO NOT ADD TO constitutive_gene SINCE WE WANT ALL mRNA ALIGNING EXONS/JUNCTIONS TO BE ADDED!!!!
#constitutive_gene[gene]=[]
gene_transcript_cluster_db = eliminate_redundant_dict_values(gene_transcript_cluster_db)
#if affygene == 'ENSMUSG00000023089': print [abs(fold_change_log)],[log_fold_cutoff];kill
if array_type == 'RNASeq':
import RNASeq
try: last_exon_region_db = RNASeq.importExonAnnotations(species,'distal-exon','')
except Exception: null=[]
constitutive_original=[]; constitutive_gene=[]
#clearObjectsFromMemory(exon_db); constitutive_probeset_db=[];genes_being_analyzed=[] ### used to evaluate how much memory objects are taking up
#print 'remove_intronic_junctions:',remove_intronic_junctions
#print constitutive_gene['ENSMUSG00000031170'];kill ### Determine if avg_ss_for_all is working
if original_arraytype == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
print len(exon_db),id_name,'stored as instances of SplicingAnnotationData in memory'
#print len(constitutive_probeset_db),'array IDs stored as constititive'
#print probesets_included_by_new_evidence, 'array IDs were re-annotated as NOT constitutive based on mRNA evidence'
if array_type != 'AltMouse': print original_probesets_add, 'genes not viewed as constitutive as a result of filtering',id_name,'based on splicing evidence, added back'
end_time = time.time(); time_diff = int(end_time-begin_time)
#print filename,"import finished in %d seconds" % time_diff
if filter_status == 'yes': return new_exon_db
else:
summary_data_db['gene_assayed'] = len(genes_being_analyzed)
try: exportDenominatorGenes(genes_being_analyzed)
except Exception: null=[]
return constitutive_probeset_db,exon_db,genes_being_analyzed
def exportDenominatorGenes(genes_being_analyzed):
goelite_output = root_dir+'GO-Elite/denominator/AS.denominator.txt'
goelite_data = export.ExportFile(goelite_output)
systemcode = 'En'
goelite_data.write("GeneID\tSystemCode\n")
for gene in genes_being_analyzed:
if array_type == 'AltMouse':
try: gene = annotate_db[gene].ExternalGeneID()
except KeyError: null = []
goelite_data.write(gene+'\t'+systemcode+'\n')
try: goelite_data.close()
except Exception: null=[]
def performExpressionAnalysis(filename,constitutive_probeset_db,exon_db,annotate_db,dataset_name):
#if analysis_method == 'splicing-index': returnLargeGlobalVars();kill ### used to ensure all large global vars from the reciprocal junction analysis have been cleared from memory
#returnLargeGlobalVars()
"""import list of expression values for arrayids and calculates statistics"""
global fold_dbase; global original_conditions; global normalization_method
stats_dbase = {}; fold_dbase={}; ex_db={}; si_db=[]; bad_row_import = {}; count=0
global array_group_name_db; array_group_name_db = {}
global array_group_db; array_group_db = {};
global array_raw_group_values; array_raw_group_values = {}; global original_array_names; original_array_names=[]
global max_replicates; global equal_replicates; global array_group_list
array_index_list = [] ###Use this list for permutation analysis
fn=filepath(filename); line_num = 1
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line); t = string.split(data,'\t'); probeset = t[0]
if t[0]== '#': null=[] ### Don't import line
elif line_num == 1:
line_num += 1 #makes this value null for the next loop of actual array data
###Below ocucrs if the data is raw opposed to precomputed
if ':' in t[1]:
array_group_list = []; x=0 ###gives us an original index value for each entry in the group
for entry in t[1:]:
original_array_names.append(entry)
aa = string.split(entry,':')
try: array_group,array_name = aa
except Exception: array_name = string.join(aa[1:],':'); array_group = aa[0]
try:
array_group_db[array_group].append(x)
array_group_name_db[array_group].append(array_name)
except KeyError:
array_group_db[array_group] = [x]
array_group_name_db[array_group] = [array_name]
### below only occurs with a new group addition
array_group_list.append(array_group) #use this to generate comparisons in the below linked function
x += 1
else:
#try: print data_type
#except Exception,exception:
#print exception
#print traceback.format_exc()
print_out = 'The AltAnalyze filtered expression file "'+filename+'" is not propperly formatted.\n Review formatting requirements if this file was created by another application.\n'
print_out += "\nFirst line\n"+line
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
badExit()
else:
#if probeset in exon_db:
#if exon_db[probeset].GeneID() == 'ENSG00000139970':
###Use the index values from above to assign each expression value to a new database
temp_group_array = {}
line_num+=1
for group in array_group_db:
if count == 0: array_index_list.append(array_group_db[group])
for array_index in array_group_db[group]:
try: exp_val = float(t[array_index+1])
except Exception:
if 'Gene_ID' not in line: bad_row_import[probeset]=line; exp_val = 1
###appended is the numerical expression value for each array in the group (temporary array)
try: temp_group_array[group].append(exp_val) #add 1 since probeset is the first column
except KeyError: temp_group_array[group] = [exp_val]
if count == 0: array_index_list.sort(); count = 1
####store the group database within the probeset database entry
try:
null = exon_db[probeset] ###To conserve memory, don't store any probesets not used for downstream analyses (e.g. not linked to mRNAs)
#if 'ENSG00000139970' in probeset:
#print [max_exp]
#print t[1:];kill
#max_exp = max(map(float, t[1:]))
#if len(array_raw_group_values)>10000: break
#if max_exp>math.log(70,2):
array_raw_group_values[probeset] = temp_group_array
except KeyError:
#print probeset
pass
print len(array_raw_group_values), 'sequence identifiers imported out of', line_num-1
if len(bad_row_import)>0:
print len(bad_row_import), "Rows with an unexplained import error processed and deleted."
print "Example row:"; x=0
for i in bad_row_import:
if x==0: print bad_row_import[i]
try: del array_raw_group_values[i]
except Exception: null=[]
x+=1
### If no gene expression reporting probesets were imported, update constitutive_probeset_db to include all mRNA aligning probesets
cs_genedb={}; missing_genedb={}; addback_genedb={}; rnaseq_cs_gene_db={}
for probeset in constitutive_probeset_db:
gene = constitutive_probeset_db[probeset]
#if gene == 'ENSG00000185008': print [probeset]
try:
null=array_raw_group_values[probeset]; cs_genedb[gene]=[]
if gene == probeset: rnaseq_cs_gene_db[gene]=[] ### If RPKM normalization used, use the gene expression values already calculated
except Exception: missing_genedb[gene]=[] ### Collect possible that are missing from constitutive database (verify next)
for gene in missing_genedb:
try: null=cs_genedb[gene]
except Exception: addback_genedb[gene]=[]
for probeset in array_raw_group_values:
try:
gene = exon_db[probeset].GeneID()
try:
null=addback_genedb[gene]
if 'I' not in probeset and 'U' not in probeset: ### No intron or UTR containing should be used for constitutive expression
null=string.split(probeset,':')
if len(null)<3: ### No trans-gene junctions should be used for constitutive expression
constitutive_probeset_db[probeset]=gene
except Exception: null=[]
except Exception: null=[]
for probeset in constitutive_probeset_db:
gene = constitutive_probeset_db[probeset]
#if gene == 'ENSG00000185008': print [[probeset]]
### Only examine values for associated exons when determining RNASeq constitutive expression (when exon data is present)
normalization_method = 'raw'
if array_type == 'RNASeq':
junction_count=0; constitutive_probeset_db2={}
for uid in constitutive_probeset_db:
if '-' in uid: junction_count+=1
if len(rnaseq_cs_gene_db)>0: ### If filtered RPKM gene-level expression data present, use this instead (and only this)
normalization_method = 'RPKM'
constitutive_probeset_db={} ### Re-set this database
for gene in rnaseq_cs_gene_db:
constitutive_probeset_db[gene]=gene
elif junction_count !=0 and len(constitutive_probeset_db) != junction_count:
### occurs when there is a mix of junction and exon IDs
for uid in constitutive_probeset_db:
if '-' not in uid: constitutive_probeset_db2[uid] = constitutive_probeset_db[uid]
constitutive_probeset_db = constitutive_probeset_db2; constitutive_probeset_db2=[]
"""
for probeset in constitutive_probeset_db:
gene = constitutive_probeset_db[probeset]
if gene == 'ENSG00000185008': print [probeset]
"""
###Build all putative splicing events
global alt_junction_db; global exon_dbase; global critical_exon_db; critical_exon_db={}
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
### Applies to reciprocal junction analyses only
if array_type == 'AltMouse':
alt_junction_db,critical_exon_db,exon_dbase,exon_inclusion_db,exon_db = ExonAnnotate_module.identifyPutativeSpliceEvents(exon_db,constitutive_probeset_db,array_raw_group_values,agglomerate_inclusion_probesets,onlyAnalyzeJunctions)
print 'Number of Genes with Examined Splice Events:',len(alt_junction_db)
elif (array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null':
import JunctionArray
alt_junction_db,critical_exon_db,exon_dbase,exon_inclusion_db,exon_db = JunctionArray.getPutativeSpliceEvents(species,array_type,exon_db,agglomerate_inclusion_probesets,root_dir)
print 'Number of Genes with Examined Splice Events:',len(alt_junction_db)
#alt_junction_db=[]; critical_exon_db=[]; exon_dbase=[]; exon_inclusion_db=[]
if agglomerate_inclusion_probesets == 'yes':
array_raw_group_values = agglomerateInclusionProbesets(array_raw_group_values,exon_inclusion_db)
exon_inclusion_db=[]
### For datasets with high memory requirements (RNASeq), filter the current and new databases
### Begin this function after agglomeration to ensure agglomerated probesets are considered
reciprocal_probesets = {}
if array_type == 'junction' or array_type == 'RNASeq':
for affygene in alt_junction_db:
for event in alt_junction_db[affygene]:
reciprocal_probesets[event.InclusionProbeset()]=[]
reciprocal_probesets[event.ExclusionProbeset()]=[]
not_evalutated={}
for probeset in array_raw_group_values:
try: null=reciprocal_probesets[probeset]
except Exception:
### Don't remove constitutive probesets
try: null=constitutive_probeset_db[probeset]
except Exception: not_evalutated[probeset]=[]
#print 'Removing',len(not_evalutated),'exon/junction IDs not evaulated for splicing'
for probeset in not_evalutated:
del array_raw_group_values[probeset]
###Check to see if we have precomputed expression data or raw to be analyzed
x=0; y=0; array_raw_group_values2={}; probesets_to_delete=[] ### Record deleted probesets
if len(array_raw_group_values)==0:
print_out = "No genes were considered 'Expressed' based on your input options. Check to make sure that the right species database is indicated and that the right data format has been selected (e.g., non-log versus log expression)."
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out; print "Exiting program"
badExit()
elif len(array_raw_group_values)>0:
###array_group_list should already be unique and correctly sorted (see above)
for probeset in array_raw_group_values:
data_lists=[]
for group_name in array_group_list:
data_list = array_raw_group_values[probeset][group_name] ###nested database entry access - baseline expression
if global_addition_factor > 0: data_list = addGlobalFudgeFactor(data_list,'log')
data_lists.append(data_list)
if len(array_group_list)==2:
data_list1 = data_lists[0]; data_list2 = data_lists[-1]; avg1 = statistics.avg(data_list1); avg2 = statistics.avg(data_list2)
log_fold = avg2 - avg1
try:
#t,df,tails = statistics.ttest(data_list1,data_list2,2,3) #unpaired student ttest, calls p_value function
#t = abs(t); df = round(df) #Excel doesn't recognize fractions in a DF
#p = statistics.t_probability(t,df)
p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
if p == -1:
if len(data_list1)>1 and len(data_list2)>1:
print_out = "The probability statistic selected ("+probability_statistic+") is not compatible with the\nexperimental design. Please consider an alternative statistic or correct the problem.\nExiting AltAnalyze."
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out; print "Exiting program"
badExit()
else: p = 1
except Exception: p = 1
fold_dbase[probeset] = [0]; fold_dbase[probeset].append(log_fold)
stats_dbase[probeset]=[avg1]; stats_dbase[probeset].append(p)
###replace entries with the two lists for later permutation analysis
if p == -1: ### should by p == 1: Not sure why this filter was here, but mistakenly removes probesets where there is just one array for each group
del fold_dbase[probeset];del stats_dbase[probeset]; probesets_to_delete.append(probeset); x += 1
if x == 1: print 'Bad data detected...', data_list1, data_list2
elif (avg1 < expression_threshold and avg2 < expression_threshold and p > p_threshold) and array_type != 'RNASeq': ### Inserted a filtering option to exclude small variance, low expreession probesets
del fold_dbase[probeset];del stats_dbase[probeset]; probesets_to_delete.append(probeset); x += 1
else: array_raw_group_values2[probeset] = [data_list1,data_list2]
else: ###Non-junction analysis can handle more than 2 groups
index=0
for data_list in data_lists:
try: array_raw_group_values2[probeset].append(data_list)
except KeyError: array_raw_group_values2[probeset] = [data_list]
if len(array_group_list)>2: ### Thus, there is some variance for this probeset
### Create a complete stats_dbase containing all fold changes
if index==0:
avg_baseline = statistics.avg(data_list); stats_dbase[probeset] = [avg_baseline]
else:
avg_exp = statistics.avg(data_list)
log_fold = avg_exp - avg_baseline
try: fold_dbase[probeset].append(log_fold)
except KeyError: fold_dbase[probeset] = [0,log_fold]
index+=1
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
array_raw_group_values = array_raw_group_values2; array_raw_group_values2=[]
print x, id_name,"excluded prior to analysis... predicted not detected"
global original_avg_const_exp_db; global original_fold_dbase
global avg_const_exp_db; global permute_lists; global midas_db
if len(array_raw_group_values)>0:
adj_fold_dbase, nonlog_NI_db, conditions, gene_db, constitutive_gene_db, constitutive_fold_change, original_avg_const_exp_db = constitutive_exp_normalization(fold_dbase,stats_dbase,exon_db,constitutive_probeset_db)
stats_dbase=[] ### No longer needed after this point
original_fold_dbase = fold_dbase; avg_const_exp_db = {}; permute_lists = []; y = 0; original_conditions = conditions; max_replicates,equal_replicates = maxReplicates()
gene_expression_diff_db = constitutive_expression_changes(constitutive_fold_change,annotate_db) ###Add in constitutive fold change filter to assess gene expression for ASPIRE
while conditions > y:
avg_const_exp_db = constitutive_exp_normalization_raw(gene_db,constitutive_gene_db,array_raw_group_values,exon_db,y,avg_const_exp_db); y+=1
#print len(avg_const_exp_db),constitutive_gene_db['ENSMUSG00000054850']
###Export Analysis Results for external splicing analysis (e.g. MiDAS format)
if run_MiDAS == 'yes' and normalization_method != 'RPKM': ### RPKM has negative values which will crash MiDAS
status = ResultsExport_module.exportTransitResults(array_group_list,array_raw_group_values,array_group_name_db,avg_const_exp_db,adj_fold_dbase,exon_db,dataset_name,apt_location)
print "Finished exporting input data for MiDAS analysis"
try: midas_db = ResultsExport_module.importMidasOutput(dataset_name)
except Exception: midas_db = {} ### Occurs if there are not enough samples to calculate a MiDAS p-value
else: midas_db = {}
###Provides all pairwise permuted group comparisons
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
permute_lists = statistics.permute_arrays(array_index_list)
### Now remove probesets from the analysis that were used to evaluate gene expression
for probeset in constitutive_probeset_db:
try: null = reciprocal_probesets[probeset]
except Exception:
try: del array_raw_group_values[probeset]
except Exception: null=[]
not_evalutated=[]; reciprocal_probesets=[]
constitutive_probeset_db=[]
### Above, all conditions were examined when more than 2 are present... change this so that only the most extreeem are analyzed further
if len(array_group_list)>2 and analysis_method == 'splicing-index' and (array_type == 'exon' or array_type == 'gene' or explicit_data_type != 'null'): ### USED FOR MULTIPLE COMPARISONS
print 'Calculating splicing-index values for multiple group comparisons (please be patient)...',
"""
if len(midas_db)==0:
print_out = 'Warning!!! MiDAS failed to run for multiple groups. Please make\nsure there are biological replicates present for your groups.\nAltAnalyze requires replicates for multi-group (more than two) analyses.'
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out; print "Exiting program"
badExit()"""
if filter_for_AS == 'yes':
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try: del nonlog_NI_db[probeset]
except KeyError: null=[]
if export_NI_values == 'yes':
export_exon_regions = 'yes'
### Currently, we don't deal with raw adjusted expression values, just group, so just export the values for each group
summary_output = root_dir+'AltResults/RawSpliceData/'+species+'/'+analysis_method+'/'+dataset_name[:-1]+'.txt'
print "Exporting all normalized intensities to:\n"+summary_output
adjoutput = export.ExportFile(summary_output)
title = string.join(['Gene\tExonID\tprobesetID']+original_array_names,'\t')+'\n'; adjoutput.write(title)
### Pick which data lists have the most extreem values using the NI_dbase (adjusted folds for each condition)
original_increment = int(len(nonlog_NI_db)/20); increment = original_increment; interaction = 0
for probeset in nonlog_NI_db:
if interaction == increment: increment+=original_increment; print '*',
interaction +=1
geneid = exon_db[probeset].GeneID(); ed = exon_db[probeset]
index=0; NI_list=[] ### Add the group_name to each adj fold value
for NI in nonlog_NI_db[probeset]:
NI_list.append((NI,index)); index+=1 ### setup to sort for the extreeme adj folds and get associated group_name using the index
raw_exp_vals = array_raw_group_values[probeset]
adj_exp_lists={} ### Store the adjusted expression values for each group
if geneid in avg_const_exp_db:
k=0; gi=0; adj_exp_vals = []
for exp_list in raw_exp_vals:
for exp in exp_list:
adj_exp_val = exp-avg_const_exp_db[geneid][k]
try: adj_exp_lists[gi].append(adj_exp_val)
except Exception: adj_exp_lists[gi] = [adj_exp_val]
if export_NI_values == 'yes': adj_exp_vals.append(str(adj_exp_val))
k+=1
gi+=1
if export_NI_values == 'yes':
#print geneid+'-'+probeset, adj_exp_val, [ed.ExonID()];kill
if export_exon_regions == 'yes':
try: ### Thid will only work if ExonRegionID is stored in the abreviated AffyExonSTData object - useful in comparing results between arrays (exon-region centric)
if (array_type == 'exon' or array_type == 'gene') or '-' not in ed.ExonID(): ### only include exon entries not junctions
exon_regions = string.split(ed.ExonRegionID(),'|')
for er in exon_regions:
if len(er)>0: er = er
else:
try: er = ed.ExonID()
except Exception: er = 'NA'
ev = string.join([geneid+'\t'+er+'\t'+probeset]+adj_exp_vals,'\t')+'\n'
if len(filtered_probeset_db)>0:
if probeset in filtered_probeset_db: adjoutput.write(ev) ### This is used when we want to restrict to only probesets known to already by changed
else: adjoutput.write(ev)
except Exception:
ev = string.join([geneid+'\t'+'NA'+'\t'+probeset]+adj_exp_vals,'\t')+'\n'; adjoutput.write(ev)
NI_list.sort()
examine_pairwise_comparisons = 'yes'
if examine_pairwise_comparisons == 'yes':
k1=0; k2=0; filtered_NI_comps = []
NI_list_rev = list(NI_list); NI_list_rev.reverse()
NI1,index1 = NI_list[k1]; NI2,index2 = NI_list_rev[k2]; abs_SI = abs(math.log(NI1/NI2,2))
if abs_SI<alt_exon_logfold_cutoff:
### Indicates that no valid matches were identified - hence, exit loop and return an NI_list with no variance
NI_list = [NI_list[0],NI_list[0]]
else:
### Indicates that no valid matches were identified - hence, exit loop and return an NI_list with no variance
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
#print 'original',abs_SI,k1,k2, ge_fold, constit_exp1, constit_exp2
if abs(ge_fold) < log_fold_cutoff: filtered_NI_comps.append([abs_SI,k1,k2])
else:
for i1 in NI_list:
k2=0
for i2 in NI_list_rev:
NI1,index1 = i1; NI2,index2 = i2; abs_SI = abs(math.log(NI1/NI2,2))
#constit_exp1 = original_avg_const_exp_db[geneid][index1]
#constit_exp2 = original_avg_const_exp_db[geneid][index2]
#ge_fold = constit_exp2-constit_exp1
#if abs(ge_fold) < log_fold_cutoff: filtered_NI_comps.append([abs_SI,k1,k2])
#print k1,k2, i1, i2, abs_SI, abs(ge_fold), log_fold_cutoff, alt_exon_logfold_cutoff
if abs_SI<alt_exon_logfold_cutoff: break
else:
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
if abs(ge_fold) < log_fold_cutoff:
filtered_NI_comps.append([abs_SI,k1,k2])
#if k1 == 49 or k1 == 50 or k1 == 51: print probeset, abs_SI, k1, k2, abs(ge_fold),log_fold_cutoff, index1, index2, NI1, NI2, constit_exp1,constit_exp2
k2+=1
k1+=1
if len(filtered_NI_comps)>0:
#print filtered_NI_comps
#print NI_list_rev
#print probeset,geneid
#print len(filtered_NI_comps)
#print original_avg_const_exp_db[geneid]
filtered_NI_comps.sort()
si,k1,k2 = filtered_NI_comps[-1]
NI_list = [NI_list[k1],NI_list_rev[k2]]
"""
NI1,index1 = NI_list[0]; NI2,index2 = NI_list[-1]
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
print probeset, si, ge_fold, NI_list"""
#print k1,k2;sys.exit()
index1 = NI_list[0][1]; index2 = NI_list[-1][1]
nonlog_NI_db[probeset] = [NI_list[0][0],NI_list[-1][0]] ### Update the values of this dictionary
data_list1 = array_raw_group_values[probeset][index1]; data_list2 = array_raw_group_values[probeset][index2]
avg1 = statistics.avg(data_list1); avg2 = statistics.avg(data_list2); log_fold = avg2 - avg1
group_name1 = array_group_list[index1]; group_name2 = array_group_list[index2]
try:
#t,df,tails = statistics.ttest(data_list1,data_list2,2,3) #unpaired student ttest, calls p_value function
#t = abs(t); df = round(df); ttest_exp_p = statistics.t_probability(t,df)
ttest_exp_p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
except Exception: ttest_exp_p = 1
fold_dbase[probeset] = [0]; fold_dbase[probeset].append(log_fold)
if ttest_exp_p == -1: del fold_dbase[probeset]; probesets_to_delete.append(probeset); x += 1
elif avg1 < expression_threshold and avg2 < expression_threshold and (ttest_exp_p > p_threshold and ttest_exp_p != 1): ### Inserted a filtering option to exclude small variance, low expreession probesets
del fold_dbase[probeset]; probesets_to_delete.append(probeset); x += 1
else:
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
normInt1 = (avg1-constit_exp1); normInt2 = (avg2-constit_exp2)
adj_fold = normInt2 - normInt1
splicing_index = -1*adj_fold; abs_splicing_index = abs(splicing_index)
#print probeset, splicing_index, ge_fold, index1, index2
#normIntList1 = adj_exp_lists[index1]; normIntList2 = adj_exp_lists[index2]
all_nI=[]
for g_index in adj_exp_lists: all_nI.append(adj_exp_lists[g_index])
try: normIntensityP = statistics.OneWayANOVA(all_nI) #[normIntList1,normIntList2] ### This stays an ANOVA independent of the algorithm choosen since groups number > 2
except Exception: normIntensityP = 'NA'
if (normInt1*normInt2)<0: opposite_SI_log_mean = 'yes'
else: opposite_SI_log_mean = 'no'
abs_log_ratio = abs(ge_fold)
if probeset in midas_db:
try: midas_p = float(midas_db[probeset])
except ValueError: midas_p = 'NA'
else: midas_p = 'NA'
#if 'ENSG00000059588' in geneid: print probeset, splicing_index, constit_exp1, constit_exp2, ge_fold,group_name2+'_vs_'+group_name1, index1, index2
if abs_splicing_index>alt_exon_logfold_cutoff and (midas_p < p_threshold or midas_p == 'NA'): #and abs_log_ratio>1 and ttest_exp_p<0.05: ###and ge_threshold_count==2
exonid = ed.ExonID(); critical_exon_list = [1,[exonid]]
ped = ProbesetExpressionData(avg1, avg2, log_fold, adj_fold, ttest_exp_p, group_name2+'_vs_'+group_name1)
sid = ExonData(splicing_index,probeset,critical_exon_list,geneid,normInt1,normInt2,normIntensityP,opposite_SI_log_mean)
sid.setConstitutiveExpression(constit_exp1); sid.setConstitutiveFold(ge_fold); sid.setProbesetExpressionData(ped)
si_db.append((splicing_index,sid))
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(splicing_index,geneid,normIntensityP)
ex_db[probeset] = eed
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
print len(si_db),id_name,"with evidence of Alternative expression"
original_fold_dbase = fold_dbase; si_db.sort()
summary_data_db['denominator_exp_events']=len(nonlog_NI_db)
del avg_const_exp_db; del gene_db; del constitutive_gene_db; gene_expression_diff_db={}
if export_NI_values == 'yes': adjoutput.close()
### Above, all conditions were examined when more than 2 are present... change this so that only the most extreeem are analyzed further
elif len(array_group_list)>2 and (array_type == 'junction' or array_type == 'RNASeq' or array_type == 'AltMouse'): ### USED FOR MULTIPLE COMPARISONS
excluded_probeset_db={}
group_sizes = []; original_array_indices = permute_lists[0] ###p[0] is the original organization of the group samples prior to permutation
for group in original_array_indices: group_sizes.append(len(group))
if analysis_method == 'linearregres': ### For linear regression, these scores are non-long
original_array_raw_group_values = copy.deepcopy(array_raw_group_values)
for probeset in array_raw_group_values:
ls_concatenated=[]
for group in array_raw_group_values[probeset]: ls_concatenated+=group
ls_concatenated = statistics.log_fold_conversion_fraction(ls_concatenated)
array_raw_group_values[probeset] = ls_concatenated
pos1=0; pos2=0; positions=[]
for group in group_sizes:
if pos1 == 0: pos2 = group; positions.append((pos1,pos2))
else: pos2 = pos1+group; positions.append((pos1,pos2))
pos1 = pos2
if export_NI_values == 'yes':
export_exon_regions = 'yes'
### Currently, we don't deal with raw adjusted expression values, just group, so just export the values for each group
summary_output = root_dir+'AltResults/RawSpliceData/'+species+'/'+analysis_method+'/'+dataset_name[:-1]+'.txt'
print "Exporting all normalized intensities to:\n"+summary_output
adjoutput = export.ExportFile(summary_output)
title = string.join(['gene\tprobesets\tExonRegion']+original_array_names,'\t')+'\n'; adjoutput.write(title)
events_examined= 0; denominator_events=0; fold_dbase=[]; adj_fold_dbase=[]; scores_examined=0
splice_event_list=[]; splice_event_list_mx=[]; splice_event_list_non_mx=[]; event_mx_temp = []; permute_p_values={}; probeset_comp_db={}#use this to exclude duplicate mx events
for geneid in alt_junction_db:
affygene = geneid
for event in alt_junction_db[geneid]:
if array_type == 'AltMouse':
#event = [('ei', 'E16-E17'), ('ex', 'E16-E18')]
#critical_exon_db[affygene,tuple(critical_exons)] = [1,'E'+str(e1a),'E'+str(e2b)] --- affygene,tuple(event) == key, 1 indicates both are either up or down together
event_call = event[0][0] + '-' + event[1][0]
exon_set1 = event[0][1]; exon_set2 = event[1][1]
probeset1 = exon_dbase[affygene,exon_set1]
probeset2 = exon_dbase[affygene,exon_set2]
critical_exon_list = critical_exon_db[affygene,tuple(event)]
if array_type == 'junction' or array_type == 'RNASeq':
event_call = 'ei-ex' ### Below objects from JunctionArrayEnsemblRules - class JunctionInformation
probeset1 = event.InclusionProbeset(); probeset2 = event.ExclusionProbeset()
exon_set1 = event.InclusionJunction(); exon_set2 = event.ExclusionJunction()
try: novel_event = event.NovelEvent()
except Exception: novel_event = 'known'
critical_exon_list = [1,event.CriticalExonSets()]
key,jd = formatJunctionData([probeset1,probeset2],geneid,critical_exon_list[1])
if array_type == 'junction' or array_type == 'RNASeq':
try: jd.setSymbol(annotate_db[geneid].Symbol())
except Exception: null=[]
#if '|' in probeset1: print probeset1, key,jd.InclusionDisplay();kill
probeset_comp_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
dI_scores=[]
if probeset1 in nonlog_NI_db and probeset2 in nonlog_NI_db and probeset1 in array_raw_group_values and probeset2 in array_raw_group_values:
events_examined+=1
if analysis_method == 'ASPIRE':
index1=0; NI_list1=[]; NI_list2=[] ### Add the group_name to each adj fold value
for NI in nonlog_NI_db[probeset1]: NI_list1.append(NI)
for NI in nonlog_NI_db[probeset2]: NI_list2.append(NI)
for NI1_g1 in NI_list1:
NI2_g1 = NI_list2[index1]; index2=0
for NI1_g2 in NI_list1:
try: NI2_g2 = NI_list2[index2]
except Exception: print index1, index2, NI_list1, NI_list2;kill
if index1 != index2:
b1 = NI1_g1; e1 = NI1_g2
b2 = NI2_g1; e2 = NI2_g2
try:
dI = statistics.aspire_stringent(b1,e1,b2,e2); Rin = b1/e1; Rex = b2/e2
if (Rin>1 and Rex<1) or (Rin<1 and Rex>1):
if dI<0: i1,i2 = index2,index1 ### all scores should indicate upregulation
else: i1,i2=index1,index2
dI_scores.append((abs(dI),i1,i2))
except Exception:
#if array_type != 'RNASeq': ### RNASeq has counts of zero and one that can cause the same result between groups and probesets
#print probeset1, probeset2, b1, e1, b2, e2, index1, index2, events_examined;kill
### Exception - Occurs for RNA-Seq but can occur for array data under extreemly rare circumstances (Rex=Rin even when different b1,e1 and b2,ed values)
null=[]
index2+=1
index1+=1
dI_scores.sort()
if analysis_method == 'linearregres':
log_fold,i1,i2 = getAllPossibleLinearRegressionScores(probeset1,probeset2,positions,group_sizes)
dI_scores.append((log_fold,i1,i2))
raw_exp_vals1 = original_array_raw_group_values[probeset1]; raw_exp_vals2 = original_array_raw_group_values[probeset2]
else: raw_exp_vals1 = array_raw_group_values[probeset1]; raw_exp_vals2 = array_raw_group_values[probeset2]
adj_exp_lists1={}; adj_exp_lists2={} ### Store the adjusted expression values for each group
if geneid in avg_const_exp_db:
gi=0; l=0; adj_exp_vals = []; anova_test=[]
for exp_list in raw_exp_vals1:
k=0; anova_group=[]
for exp in exp_list:
adj_exp_val1 = exp-avg_const_exp_db[geneid][l]
try: adj_exp_lists1[gi].append(adj_exp_val1)
except Exception: adj_exp_lists1[gi] = [adj_exp_val1]
adj_exp_val2 = raw_exp_vals2[gi][k]-avg_const_exp_db[geneid][l]
try: adj_exp_lists2[gi].append(adj_exp_val2)
except Exception: adj_exp_lists2[gi] = [adj_exp_val2]
anova_group.append(adj_exp_val2-adj_exp_val1)
if export_NI_values == 'yes':
#if analysis_method == 'ASPIRE':
adj_exp_vals.append(str(adj_exp_val2-adj_exp_val1))
### BELOW CODE PRODUCES THE SAME RESULT!!!!
"""folds1 = statistics.log_fold_conversion_fraction([exp])
folds2 = statistics.log_fold_conversion_fraction([raw_exp_vals2[gi][k]])
lr_score = statistics.convert_to_log_fold(statistics.simpleLinRegress(folds1,folds2))
adj_exp_vals.append(str(lr_score))"""
k+=1; l+=0
gi+=1; anova_test.append(anova_group)
if export_NI_values == 'yes':
if export_exon_regions == 'yes':
exon_regions = string.join(critical_exon_list[1],'|')
exon_regions = string.split(exon_regions,'|')
for er in exon_regions:
ev = string.join([geneid+'\t'+probeset1+'-'+probeset2+'\t'+er]+adj_exp_vals,'\t')+'\n'
if len(filtered_probeset_db)>0:
if probeset1 in filtered_probeset_db and probeset2 in filtered_probeset_db:
adjoutput.write(ev) ### This is used when we want to restrict to only probesets known to already by changed
else: adjoutput.write(ev)
try: anovaNIp = statistics.OneWayANOVA(anova_test) ### This stays an ANOVA independent of the algorithm choosen since groups number > 2
except Exception: anovaNIp='NA'
if len(dI_scores)>0 and geneid in avg_const_exp_db:
dI,index1,index2 = dI_scores[-1]; count=0
probesets = [probeset1, probeset2]; index=0
key,jd = formatJunctionData([probeset1,probeset2],affygene,critical_exon_list[1])
if array_type == 'junction' or array_type == 'RNASeq':
try: jd.setSymbol(annotate_db[affygene].Symbol())
except Exception:null=[]
probeset_comp_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
if max_replicates >2 or equal_replicates==2: permute_p_values[(probeset1,probeset2)] = [anovaNIp, 'NA', 'NA', 'NA']
index=0
for probeset in probesets:
if analysis_method == 'linearregres':
data_list1 = original_array_raw_group_values[probeset][index1]; data_list2 = original_array_raw_group_values[probeset][index2]
else: data_list1 = array_raw_group_values[probeset][index1]; data_list2 = array_raw_group_values[probeset][index2]
baseline_exp = statistics.avg(data_list1); experimental_exp = statistics.avg(data_list2); fold_change = experimental_exp - baseline_exp
group_name1 = array_group_list[index1]; group_name2 = array_group_list[index2]
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
except Exception: ttest_exp_p = 'NA'
if ttest_exp_p==1: ttest_exp_p = 'NA'
if index == 0:
try: adj_fold = statistics.avg(adj_exp_lists1[index2]) - statistics.avg(adj_exp_lists1[index1])
except Exception:
print raw_exp_vals1,raw_exp_vals2, avg_const_exp_db[geneid]
print probeset,probesets,adj_exp_lists1,adj_exp_lists2,index1,index2;kill
ped1 = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, group_name2+'_vs_'+group_name1)
else:
adj_fold = statistics.avg(adj_exp_lists2[index2]) - statistics.avg(adj_exp_lists2[index1])
ped2 = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, group_name2+'_vs_'+group_name1)
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
index+=1
try:
pp1 = statistics.runComparisonStatistic(adj_exp_lists1[index1], adj_exp_lists1[index2],probability_statistic)
pp2 = statistics.runComparisonStatistic(adj_exp_lists2[index1], adj_exp_lists2[index2],probability_statistic)
except Exception: pp1 = 'NA'; pp2 = 'NA'
if analysis_method == 'ASPIRE' and len(dI_scores)>0:
p1 = JunctionExpressionData(adj_exp_lists1[index1], adj_exp_lists1[index2], pp1, ped1)
p2 = JunctionExpressionData(adj_exp_lists2[index1], adj_exp_lists2[index2], pp2, ped2)
### ANOVA p-replaces the below p-value
"""try: baseline_scores, exp_scores, pairwiseNIp = calculateAllASPIREScores(p1,p2)
except Exception: baseline_scores = [0]; exp_scores=[dI]; pairwiseNIp = 0 """
#if pairwiseNIp == 'NA': pairwiseNIp = 0 ### probably comment out
if len(dI_scores)>0:
scores_examined+=1
if probeset in midas_db:
try: midas_p = float(midas_db[probeset])
except ValueError: midas_p = 'NA'
else: midas_p = 'NA'
if dI>alt_exon_logfold_cutoff and (anovaNIp < p_threshold or perform_permutation_analysis == 'yes' or anovaNIp == 'NA' or anovaNIp == 1): #and abs_log_ratio>1 and ttest_exp_p<0.05: ###and ge_threshold_count==2
#print [dI, probeset1,probeset2, anovaNIp, alt_exon_logfold_cutoff];kill
ejd = ExonJunctionData(dI,probeset1,probeset2,pp1,pp2,'upregulated',event_call,critical_exon_list,affygene,ped1,ped2)
ejd.setConstitutiveFold(ge_fold); ejd.setConstitutiveExpression(constit_exp1)
if array_type == 'RNASeq':
ejd.setNovelEvent(novel_event)
splice_event_list.append((dI,ejd))
else: excluded_probeset_db[affygene+':'+critical_exon_list[1][0]] = probeset1, affygene, dI, 'NA', anovaNIp
statistics.adjustPermuteStats(permute_p_values)
ex_db = splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db
original_fold_dbase = fold_dbase; original_avg_const_exp_db=[]; nonlog_NI_db = []; fold_dbase=[]
summary_data_db['denominator_exp_events']=events_examined
del avg_const_exp_db; del gene_db; del constitutive_gene_db; gene_expression_diff_db={}
if export_NI_values == 'yes': adjoutput.close()
print len(splice_event_list), 'alternative exons out of %s exon events examined' % events_examined
fold_dbase=[]; original_fold_dbase=[]; exon_db=[]; constitutive_gene_db=[]; addback_genedb=[]
gene_db=[]; missing_genedb=[]
"""
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
return conditions,adj_fold_dbase,nonlog_NI_db,dataset_name,gene_expression_diff_db,midas_db,ex_db,si_db
class ProbesetExpressionData:
def __init__(self, baseline_exp, experimental_exp, fold_change, adj_fold, ttest_raw_exp, annotation):
self.baseline_exp = baseline_exp; self.experimental_exp = experimental_exp
self.fold_change = fold_change; self.adj_fold = adj_fold
self.ttest_raw_exp = ttest_raw_exp; self.annotation = annotation
def BaselineExp(self): return str(self.baseline_exp)
def ExperimentalExp(self): return str(self.experimental_exp)
def FoldChange(self): return str(self.fold_change)
def AdjFold(self): return str(self.adj_fold)
def ExpPval(self): return str(self.ttest_raw_exp)
def Annotation(self): return self.annotation
def __repr__(self): return self.BaselineExp()+'|'+FoldChange()
def agglomerateInclusionProbesets(array_raw_group_values,exon_inclusion_db):
###Combine expression profiles for inclusion probesets that correspond to the same splice event
for excl_probeset in exon_inclusion_db:
inclusion_event_profiles = []
if len(exon_inclusion_db[excl_probeset])>1:
for incl_probeset in exon_inclusion_db[excl_probeset]:
if incl_probeset in array_raw_group_values and excl_probeset in array_raw_group_values:
array_group_values = array_raw_group_values[incl_probeset]
inclusion_event_profiles.append(array_group_values)
#del array_raw_group_values[incl_probeset] ###Remove un-agglomerated original entry
if len(inclusion_event_profiles) > 0: ###Thus, some probesets for this splice event in input file
combined_event_profile = combine_profiles(inclusion_event_profiles)
###Combine inclusion probesets into a single ID (identical manner to that in ExonAnnotate_module.identifyPutativeSpliceEvents
incl_probesets = exon_inclusion_db[excl_probeset]
incl_probesets_str = string.join(incl_probesets,'|')
array_raw_group_values[incl_probesets_str] = combined_event_profile
return array_raw_group_values
def combine_profiles(profile_list):
profile_group_sizes={}
for db in profile_list:
for key in db: profile_group_sizes[key] = len(db[key])
break
new_profile_db={}
for key in profile_group_sizes:
x = profile_group_sizes[key] ###number of elements in list for key
new_val_list=[]; i = 0
while i<x:
temp_val_list=[]
for db in profile_list:
if key in db: val = db[key][i]; temp_val_list.append(val)
i+=1; val_avg = statistics.avg(temp_val_list); new_val_list.append(val_avg)
new_profile_db[key] = new_val_list
return new_profile_db
def constitutive_exp_normalization(fold_db,stats_dbase,exon_db,constitutive_probeset_db):
"""For every expression value, normalize to the expression of the constitutive gene features for that condition,
then store those ratios (probeset_exp/avg_constitutive_exp) and regenerate expression values relative only to the
baseline avg_constitutive_exp, for all conditions, to normalize out gene expression changes"""
#print "\nParameters:"
#print "Factor_out_expression_changes:",factor_out_expression_changes
#print "Only_include_constitutive_containing_genes:",only_include_constitutive_containing_genes
#print "\nAdjusting probeset average intensity values to factor out condition specific expression changes for optimal splicing descrimination"
gene_db = {}; constitutive_gene_db = {}
### organize everything by gene
for probeset in fold_db: conditions = len(fold_db[probeset]); break
remove_diff_exp_genes = remove_transcriptional_regulated_genes
if conditions > 2: remove_diff_exp_genes = 'no'
for probeset in exon_db:
affygene = exon_db[probeset].GeneID() #exon_db[probeset] = affygene,exons,ensembl,block_exon_ids,block_structure,comparison_info
if probeset in fold_db:
try: gene_db[affygene].append(probeset)
except KeyError: gene_db[affygene] = [probeset]
if probeset in constitutive_probeset_db and (only_include_constitutive_containing_genes == 'yes' or factor_out_expression_changes == 'no'):
#the second conditional is used to exlcude constitutive data if we wish to use all probesets for
#background normalization rather than just the designated 'gene' probesets.
if probeset in stats_dbase:
try: constitutive_gene_db[affygene].append(probeset)
except KeyError: constitutive_gene_db[affygene] = [probeset]
if len(constitutive_gene_db)>0:
###This is blank when there are no constitutive and the above condition is implemented
gene_db2 = constitutive_gene_db
else: gene_db2 = gene_db
avg_const_exp_db = {}
for affygene in gene_db2:
probeset_list = gene_db2[affygene]
x = 0
while x < conditions:
### average all exp values for constitutive probesets for each condition
exp_list=[]
for probeset in probeset_list:
probe_fold_val = fold_db[probeset][x]
baseline_exp = stats_dbase[probeset][0]
exp_val = probe_fold_val + baseline_exp
exp_list.append(exp_val)
avg_const_exp = statistics.avg(exp_list)
try: avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: avg_const_exp_db[affygene] = [avg_const_exp]
x += 1
adj_fold_dbase={}; nonlog_NI_db={}; constitutive_fold_change={}
for affygene in avg_const_exp_db: ###If we only wish to include propper constitutive probes, this will ensure we only examine those genes and probesets that are constitutive
probeset_list = gene_db[affygene]
x = 0
while x < conditions:
exp_list=[]
for probeset in probeset_list:
expr_to_subtract = avg_const_exp_db[affygene][x]
baseline_const_exp = avg_const_exp_db[affygene][0]
probe_fold_val = fold_db[probeset][x]
baseline_exp = stats_dbase[probeset][0]
exp_val = probe_fold_val + baseline_exp
exp_val_non_log = statistics.log_fold_conversion_fraction(exp_val)
expr_to_subtract_non_log = statistics.log_fold_conversion_fraction(expr_to_subtract)
baseline_const_exp_non_log = statistics.log_fold_conversion_fraction(baseline_const_exp)
if factor_out_expression_changes == 'yes':
exp_splice_valff = exp_val_non_log/expr_to_subtract_non_log
else: #if no, then we just normalize to the baseline constitutive expression in order to keep gene expression effects (useful if you don't trust constitutive feature expression levels)
exp_splice_valff = exp_val_non_log/baseline_const_exp_non_log
constitutive_fold_diff = expr_to_subtract_non_log/baseline_const_exp_non_log
###To calculate adjusted expression, we need to get the fold change in the constitutive avg (expr_to_subtract/baseline_const_exp) and divide the experimental expression
###By this fold change.
ge_adj_exp_non_log = exp_val_non_log/constitutive_fold_diff #gives a GE adjusted expression
try: ge_adj_exp = math.log(ge_adj_exp_non_log,2)
except ValueError: print probeset,ge_adj_exp_non_log,constitutive_fold_diff,exp_val_non_log,exp_val,baseline_exp, probe_fold_val, dog
adj_probe_fold_val = ge_adj_exp - baseline_exp
### Here we normalize probeset expression to avg-constitutive expression by dividing probe signal by avg const.prove sig (should be < 1)
### refered to as steady-state normalization
if array_type != 'AltMouse' or (probeset not in constitutive_probeset_db):
"""Can't use constitutive gene features since these have no variance for pearson analysis
Python will approximate numbers to a small decimal point range. If the first fold value is
zero, often, zero will be close to but not exactly zero. Correct below """
try:
adj_fold_dbase[probeset].append(adj_probe_fold_val)
except KeyError:
if abs(adj_probe_fold_val - 0) < 0.0000001: #make zero == exactly to zero
adj_probe_fold_val = 0
adj_fold_dbase[probeset] = [adj_probe_fold_val]
try: nonlog_NI_db[probeset].append(exp_splice_valff) ###ratio of junction exp relative to gene expression at that time-point
except KeyError: nonlog_NI_db[probeset] = [exp_splice_valff]
n = 0
#if expr_to_subtract_non_log != baseline_const_exp_non_log: ###otherwise this is the first value in the expression array
if x!=0: ###previous expression can produce errors when multiple group averages have identical values
fold_change = expr_to_subtract_non_log/baseline_const_exp_non_log
fold_change_log = math.log(fold_change,2)
constitutive_fold_change[affygene] = fold_change_log
### If we want to remove any genes from the analysis with large transcriptional changes
### that may lead to false positive splicing calls (different probeset kinetics)
if remove_diff_exp_genes == 'yes':
if abs(fold_change_log) > log_fold_cutoff:
del constitutive_fold_change[affygene]
try: del adj_fold_dbase[probeset]
except KeyError: n = 1
try: del nonlog_NI_db[probeset]
except KeyError: n = 1
"""elif expr_to_subtract_non_log == baseline_const_exp_non_log: ###This doesn't make sense, since n can't equal 1 if the conditional is false (check this code again later 11/23/07)
if n == 1:
del adj_fold_dbase[probeset]
del nonlog_NI_db[probeset]"""
x += 1
print "Intensity normalization complete..."
if factor_out_expression_changes == 'no':
adj_fold_dbase = fold_db #don't change expression values
print len(constitutive_fold_change), "genes undergoing analysis for alternative splicing/transcription"
summary_data_db['denominator_exp_genes']=len(constitutive_fold_change)
"""
mir_gene_count = 0
for gene in constitutive_fold_change:
if gene in gene_microRNA_denom: mir_gene_count+=1
print mir_gene_count, "Genes with predicted microRNA binding sites undergoing analysis for alternative splicing/transcription"
"""
global gene_analyzed; gene_analyzed = len(constitutive_gene_db)
return adj_fold_dbase, nonlog_NI_db, conditions, gene_db, constitutive_gene_db,constitutive_fold_change, avg_const_exp_db
class TranscriptionData:
def __init__(self, constitutive_fold, rna_processing_annotation):
self._constitutive_fold = constitutive_fold; self._rna_processing_annotation = rna_processing_annotation
def ConstitutiveFold(self): return self._constitutive_fold
def ConstitutiveFoldStr(self): return str(self._constitutive_fold)
def RNAProcessing(self): return self._rna_processing_annotation
def __repr__(self): return self.ConstitutiveFoldStr()+'|'+RNAProcessing()
def constitutive_expression_changes(constitutive_fold_change,annotate_db):
###Add in constitutive fold change filter to assess gene expression for ASPIRE
gene_expression_diff_db = {}
for affygene in constitutive_fold_change:
constitutive_fold = constitutive_fold_change[affygene]; rna_processing_annotation=''
if affygene in annotate_db:
if len(annotate_db[affygene].RNAProcessing()) > 4: rna_processing_annotation = annotate_db[affygene].RNAProcessing()
###Add in evaluation of RNA-processing/binding factor
td = TranscriptionData(constitutive_fold,rna_processing_annotation)
gene_expression_diff_db[affygene] = td
return gene_expression_diff_db
def constitutive_exp_normalization_raw(gene_db,constitutive_gene_db,array_raw_group_values,exon_db,y,avg_const_exp_db):
"""normalize expression for raw expression data (only for non-baseline data)"""
#avg_true_const_exp_db[affygene] = [avg_const_exp]
temp_avg_const_exp_db={}
for probeset in array_raw_group_values:
conditions = len(array_raw_group_values[probeset][y]); break #number of raw expresson values to normalize
for affygene in gene_db:
###This is blank when there are no constitutive or the above condition is implemented
if affygene in constitutive_gene_db:
probeset_list = constitutive_gene_db[affygene]
z = 1
else: ###so we can analyze splicing independent of gene expression even if no 'gene' feature is present
probeset_list = gene_db[affygene]
z = 0
x = 0
while x < conditions:
### average all exp values for constitutive probesets for each conditionF
exp_list=[]
for probeset in probeset_list:
try: exp_val = array_raw_group_values[probeset][y][x] ### try statement is used for constitutive probes that were deleted due to filtering in performExpressionAnalysis
except KeyError: continue
exp_list.append(exp_val)
try: avg_const_exp = statistics.avg(exp_list)
except Exception: avg_const_exp = 'null'
if only_include_constitutive_containing_genes == 'yes' and avg_const_exp != 'null':
if z == 1:
try: avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: avg_const_exp_db[affygene] = [avg_const_exp]
try: temp_avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: temp_avg_const_exp_db[affygene] = [avg_const_exp]
elif avg_const_exp != 'null': ###***
try: avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: avg_const_exp_db[affygene] = [avg_const_exp]
try: temp_avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError: temp_avg_const_exp_db[affygene] = [avg_const_exp]
x += 1
if analysis_method == 'ANOVA':
global normalized_raw_exp_ratios; normalized_raw_exp_ratios = {}
for affygene in gene_db:
probeset_list = gene_db[affygene]
for probeset in probeset_list:
while x < group_size:
new_ratios = [] ### Calculate expression ratios relative to constitutive expression
exp_val = array_raw_group_values[probeset][y][x]
const_exp_val = temp_avg_const_exp_db[affygene][x]
###Since the above dictionary is agglomerating all constitutive expression values for permutation,
###we need an unbiased way to grab just those relevant const. exp. vals. (hence the temp dictionary)
#non_log_exp_val = statistics.log_fold_conversion_fraction(exp_val)
#non_log_const_exp_val = statistics.log_fold_conversion_fraction(const_exp_val)
#non_log_exp_ratio = non_log_exp_val/non_log_const_exp_val
log_exp_ratio = exp_val - const_exp_val
try: normalized_raw_exp_ratios[probeset].append(log_exp_ratio)
except KeyError: normalized_raw_exp_ratios[probeset] = [log_exp_ratio]
return avg_const_exp_db
######### Z Score Analyses #######
class ZScoreData:
def __init__(self,element,changed,measured,zscore,null_z,gene_symbols):
self._element = element; self._changed = changed; self._measured = measured
self._zscore = zscore; self._null_z = null_z; self._gene_symbols = gene_symbols
def ElementID(self): return self._element
def Changed(self): return str(self._changed)
def Measured(self): return str(self._measured)
def AssociatedWithElement(self): return str(self._gene_symbols)
def ZScore(self): return str(self._zscore)
def SetP(self,p): self._permute_p = p
def PermuteP(self): return str(self._permute_p)
def SetAdjP(self,adjp): self._adj_p = adjp
def AdjP(self): return str(self._adj_p)
def PercentChanged(self):
try: pc = float(self.Changed())/float(self.Measured())*100
except Exception: pc = 0
return str(pc)
def NullZ(self): return self._null_z
def Report(self):
output = self.ElementID()
return output
def __repr__(self): return self.Report()
class FDRStats(ZScoreData):
def __init__(self,p): self._permute_p = p
def AdjP(self): return str(self._adj_p)
def countGenesForElement(permute_input_list,probeset_to_gene,probeset_element_db):
element_gene_db={}
for probeset in permute_input_list:
try:
element_list = probeset_element_db[probeset]
gene = probeset_to_gene[probeset]
for element in element_list:
try: element_gene_db[element].append(gene)
except KeyError: element_gene_db[element] = [gene]
except KeyError: null=[]
### Count the number of unique genes per element
for element in element_gene_db:
t = {}
for i in element_gene_db[element]: t[i]=[]
element_gene_db[element] = len(t)
return element_gene_db
def formatGeneSymbolHits(geneid_list):
symbol_list=[]
for geneid in geneid_list:
symbol = ''
if geneid in annotate_db: symbol = annotate_db[geneid].Symbol()
if len(symbol)<1: symbol = geneid
symbol_list.append(symbol)
symbol_str = string.join(symbol_list,', ')
return symbol_str
def zscore(r,n,N,R):
z = (r - n*(R/N))/math.sqrt(n*(R/N)*(1-(R/N))*(1-((n-1)/(N-1)))) #z = statistics.zscore(r,n,N,R)
return z
def calculateZScores(hit_count_db,denom_count_db,total_gene_denom_count,total_gene_hit_count,element_type):
N = float(total_gene_denom_count) ###Genes examined
R = float(total_gene_hit_count) ###AS genes
for element in denom_count_db:
element_denom_gene_count = denom_count_db[element]
n = float(element_denom_gene_count) ###all genes associated with element
if element in hit_count_db:
element_hit_gene_count = len(hit_count_db[element])
gene_symbols = formatGeneSymbolHits(hit_count_db[element])
r = float(element_hit_gene_count) ###regulated genes associated with element
else: r = 0; gene_symbols = ''
try: z = zscore(r,n,N,R)
except Exception: z = 0; #print 'error:',element,r,n,N,R; kill
try: null_z = zscore(0,n,N,R)
except Exception: null_z = 0; #print 'error:',element,r,n,N,R; kill
zsd = ZScoreData(element,r,n,z,null_z,gene_symbols)
if element_type == 'domain': original_domain_z_score_data[element] = zsd
elif element_type == 'microRNA': original_microRNA_z_score_data[element] = zsd
permuted_z_scores[element] = [z]
if perform_element_permutation_analysis == 'no':
### The below is an alternative to the permute t-statistic that is more effecient
p = FishersExactTest(r,n,R,N)
zsd.SetP(p)
return N,R
######### Begin Permutation Analysis #######
def calculatePermuteZScores(permute_element_inputs,element_denominator_gene_count,N,R):
###Make this code as efficient as possible
for element_input_gene_count in permute_element_inputs:
for element in element_input_gene_count:
r = element_input_gene_count[element]
n = element_denominator_gene_count[element]
try: z = statistics.zscore(r,n,N,R)
except Exception: z = 0
permuted_z_scores[element].append(abs(z))
#if element == '0005488':
#a.append(r)
def calculatePermuteStats(original_element_z_score_data):
for element in original_element_z_score_data:
zsd = original_element_z_score_data[element]
z = abs(permuted_z_scores[element][0])
permute_scores = permuted_z_scores[element][1:] ###Exclude the true value
nullz = zsd.NullZ()
if abs(nullz) == z: ###Only add the nullz values if they can count towards the p-value (if equal to the original z)
null_z_to_add = permutations - len(permute_scores)
permute_scores+=[abs(nullz)]*null_z_to_add ###Add null_z's in proportion to the amount of times there were not genes found for that element
if len(permute_scores)>0:
p = permute_p(permute_scores,z)
else: p = 1
#if p>1: p=1
zsd.SetP(p)
def FishersExactTest(r,n,R,N):
a = r; b = n-r; c=R-r; d=N-R-b
table = [[int(a),int(b)], [int(c),int(d)]]
try: ### Scipy version - cuts down rutime by ~1/3rd the time
oddsratio, pvalue = stats.fisher_exact(table)
return pvalue
except Exception:
ft = fishers_exact_test.FishersExactTest(table)
return ft.two_tail_p()
def adjustPermuteStats(original_element_z_score_data):
#1. Sort ascending the original input p value vector. Call this spval. Keep the original indecies so you can sort back.
#2. Define a new vector called tmp. tmp= spval. tmp will contain the BH p values.
#3. m is the length of tmp (also spval)
#4. i=m-1
#5 tmp[ i ]=min(tmp[i+1], min((m/i)*spval[ i ],1)) - second to last, last, last/second to last
#6. i=m-2
#7 tmp[ i ]=min(tmp[i+1], min((m/i)*spval[ i ],1))
#8 repeat step 7 for m-3, m-4,... until i=1
#9. sort tmp back to the original order of the input p values.
spval=[]
for element in original_element_z_score_data:
zsd = original_element_z_score_data[element]
p = float(zsd.PermuteP())
spval.append([p,element])
spval.sort(); tmp = spval; m = len(spval); i=m-2; x=0 ###Step 1-4
while i > -1:
tmp[i]=min(tmp[i+1][0], min((float(m)/(i+1))*spval[i][0],1)),tmp[i][1]; i -= 1
for (adjp,element) in tmp:
zsd = original_element_z_score_data[element]
zsd.SetAdjP(adjp)
spval=[]
def permute_p(null_list,true_value):
y = 0; z = 0; x = permutations
for value in null_list:
if value >= true_value: y += 1
#if true_value > 8: global a; a = null_list; print true_value,y,x;kill
return (float(y)/float(x)) ###Multiply probabilty x2?
######### End Permutation Analysis #######
def exportZScoreData(original_element_z_score_data,element_type):
element_output = root_dir+'AltResults/AlternativeOutput/' + dataset_name + analysis_method+'-'+element_type+'-zscores.txt'
data = export.ExportFile(element_output)
headers = [element_type+'-Name','Number Changed','Number Measured','Percent Changed', 'Zscore','PermuteP','AdjP','Changed GeneSymbols']
headers = string.join(headers,'\t')+'\n'
data.write(headers); sort_results=[]
#print "Results for",len(original_element_z_score_data),"elements exported to",element_output
for element in original_element_z_score_data:
zsd=original_element_z_score_data[element]
try: results = [zsd.Changed(), zsd.Measured(), zsd.PercentChanged(), zsd.ZScore(), zsd.PermuteP(), zsd.AdjP(), zsd.AssociatedWithElement()]
except AttributeError: print element,len(permuted_z_scores[element]);kill
results = [element] + results
results = string.join(results,'\t') + '\n'
sort_results.append([float(zsd.PermuteP()),-1/float(zsd.Measured()),results])
sort_results.sort()
for values in sort_results:
results = values[2]
data.write(results)
data.close()
def getInputsForPermutationAnalysis(exon_db):
### Filter fold_dbase, which is the proper denominator
probeset_to_gene = {}; denominator_list = []
for probeset in exon_db:
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else: proceed = 'yes'
if proceed == 'yes':
gene = exon_db[probeset].GeneID()
probeset_to_gene[probeset] = gene
denominator_list.append(probeset)
return probeset_to_gene,denominator_list
def getJunctionSplicingAnnotations(regulated_exon_junction_db):
filter_status = 'yes'
########### Import critical exon annotation for junctions, build through the exon array analysis pipeline - link back to probesets
filtered_arrayids={}; critical_probeset_annotation_db={}
if array_type == 'RNASeq' and explicit_data_type == 'null':
critical_exon_annotation_file = root_dir+'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_exons.txt'
elif array_type == 'RNASeq' and explicit_data_type != 'null':
critical_exon_annotation_file = root_dir+'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_junctions.txt'
else:
critical_exon_annotation_file = "AltDatabase/"+species+"/"+array_type+"/"+species+"_Ensembl_"+array_type+"_probesets.txt"
critical_exon_annotation_file = filename=getFilteredFilename(critical_exon_annotation_file)
for uid in regulated_exon_junction_db:
gene = regulated_exon_junction_db[uid].GeneID()
critical_exons = regulated_exon_junction_db[uid].CriticalExons()
"""### It appears that each critical exon for junction arrays can be a concatenation of multiple exons, making this unnecessary
if len(critical_exons)>1 and array_type == 'junction':
critical_exons_joined = string.join(critical_exons,'|')
filtered_arrayids[gene+':'+critical_exon].append(uid)"""
for critical_exon in critical_exons:
try:
try: filtered_arrayids[gene+':'+critical_exon].append(uid)
except TypeError: print gene, critical_exon, uid;kill
except KeyError: filtered_arrayids[gene+':'+critical_exon]=[uid]
critical_exon_annotation_db = importSplicingAnnotationDatabase(critical_exon_annotation_file,'exon-fake',filtered_arrayids,filter_status);null=[] ###The file is in exon centric format, so designate array_type as exon
for key in critical_exon_annotation_db:
ced = critical_exon_annotation_db[key]
for junction_probesets in filtered_arrayids[key]:
try: critical_probeset_annotation_db[junction_probesets].append(ced) ###use for splicing and Exon annotations
except KeyError: critical_probeset_annotation_db[junction_probesets] = [ced]
for junction_probesets in critical_probeset_annotation_db:
if len(critical_probeset_annotation_db[junction_probesets])>1: ###Thus multiple exons associated, must combine annotations
exon_ids=[]; external_exonids=[]; exon_regions=[]; splicing_events=[]
for ed in critical_probeset_annotation_db[junction_probesets]:
ensembl_gene_id = ed.GeneID(); transcript_cluster_id = ed.ExternalGeneID()
exon_ids.append(ed.ExonID()); external_exonids.append(ed.ExternalExonIDs()); exon_regions.append(ed.ExonRegionID()); se = string.split(ed.SplicingEvent(),'|')
for i in se: splicing_events.append(i)
splicing_events = unique.unique(splicing_events) ###remove duplicate entries
exon_id = string.join(exon_ids,'|'); external_exonid = string.join(external_exonids,'|'); exon_region = string.join(exon_regions,'|'); splicing_event = string.join(splicing_events,'|')
probe_data = AffyExonSTData(ensembl_gene_id, exon_id, external_exonid, '', exon_region, splicing_event, '','')
if array_type != 'RNASeq': probe_data.setTranscriptCluster(transcript_cluster_id)
critical_probeset_annotation_db[junction_probesets] = probe_data
else:
critical_probeset_annotation_db[junction_probesets] = critical_probeset_annotation_db[junction_probesets][0]
return critical_probeset_annotation_db
def determineExternalType(external_probeset_db):
external_probeset_db2={}
if 'TC' in external_probeset_db:
temp_index={}; i=0; type = 'JETTA'
for name in external_probeset_db['TC'][0]: temp_index[i]=i; i+=1
if 'PS:norm_expr_fold_change' in temp_index: NI_fold_index = temp_index['PS:norm_expr_fold_change']
if 'MADS:pv_1over2' in temp_index: MADS_p1_index = temp_index['MADS:pv_1over2']
if 'MADS:pv_2over1' in temp_index: MADS_p2_index = temp_index['MADS:pv_2over1']
if 'TC:expr_fold_change' in temp_index: MADS_p2_index = temp_index['MADS:pv_2over1']
if 'PsId' in temp_index: ps_index = temp_index['PsId']
for tc in external_probeset_db:
for list in external_probeset_db[tc]:
try: NI_fold = float(list[NI_fold_index])
except Exception: NI_fold = 1
try: MADSp1 = float(list[MADS_p1_index])
except Exception: MADSp1 = 1
try: MADSp2 = float(list[MADS_p2_index])
except Exception: MADSp1 = 1
if MADSp1<MADSp2: pval = MADSp1
else: pval = MADSp2
probeset = list[ps_index]
external_probeset_db2[probeset] = NI_fold,pval
else:
type = 'generic'
a = []; b = []
for id in external_probeset_db:
#print external_probeset_db[id]
try: a.append(abs(float(external_probeset_db[id][0][0])))
except Exception: null=[]
try: b.append(abs(float(external_probeset_db[id][0][1])))
except Exception: null=[]
a.sort(); b.sort(); pval_index = None; score_index = None
if len(a)>0:
if max(a) > 1: score_index = 0
else: pval_index = 0
if len(b)>0:
if max(b) > 1: score_index = 1
else: pval_index = 1
for id in external_probeset_db:
if score_index != None: score = external_probeset_db[id][0][score_index]
else: score = 1
if pval_index != None: pval = external_probeset_db[id][0][pval_index]
else: pval = 1
external_probeset_db2[id] = score,pval
return external_probeset_db2, type
def importExternalProbesetData(dataset_dir):
excluded_probeset_db={}; splice_event_list=[]; p_value_call={}; permute_p_values={}; gene_expression_diff_db={}
analyzed_probeset_db = {}
external_probeset_db = importExternalDBList(dataset_dir)
external_probeset_db, ext_type = determineExternalType(external_probeset_db)
for probeset in exon_db: analyzed_probeset_db[probeset] = []
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db)>0:
temp_db={}
for probeset in analyzed_probeset_db: temp_db[probeset]=[]
for probeset in temp_db:
try: filtered_probeset_db[probeset]
except KeyError: del analyzed_probeset_db[probeset]
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing annotation)
if filter_for_AS == 'yes':
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try: del analyzed_probeset_db[probeset]
except KeyError: null=[]
for probeset in analyzed_probeset_db:
ed = exon_db[probeset]; geneid = ed.GeneID()
td = TranscriptionData('',''); gene_expression_diff_db[geneid] = td
if probeset in external_probeset_db:
exonid = ed.ExonID(); critical_exon_list = [1,[exonid]]
splicing_index,normIntensityP = external_probeset_db[probeset]
group1_ratios=[]; group2_ratios=[];exp_log_ratio=''; ttest_exp_p='';normIntensityP='';opposite_SI_log_mean=''
sid = ExonData(splicing_index,probeset,critical_exon_list,geneid,group1_ratios,group2_ratios,normIntensityP,opposite_SI_log_mean)
splice_event_list.append((splicing_index,sid))
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(0,geneid,'NA')
excluded_probeset_db[probeset] = eed
print len(splice_event_list), 'pre-filtered external results imported...\n'
return splice_event_list, p_value_call, permute_p_values, excluded_probeset_db, gene_expression_diff_db
def splicingAnalysisAlgorithms(nonlog_NI_db,fold_dbase,dataset_name,gene_expression_diff_db,exon_db,ex_db,si_db,dataset_dir):
protein_exon_feature_db={}; global regulated_exon_junction_db; global critical_exon_annotation_db; global probeset_comp_db; probeset_comp_db={}
if original_conditions == 2: print "Beginning to run", analysis_method, "algorithm on",dataset_name[0:-1],"data"
if run_from_scratch == 'Annotate External Results':
splice_event_list, p_value_call, permute_p_values, excluded_probeset_db, gene_expression_diff_db = importExternalProbesetData(dataset_dir)
elif analysis_method == 'ASPIRE' or analysis_method == 'linearregres':
original_exon_db = exon_db
if original_conditions > 2:
splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db = ex_db
splice_event_list, p_value_call, permute_p_values, exon_db, regulated_exon_junction_db = furtherProcessJunctionScores(splice_event_list, probeset_comp_db, permute_p_values)
else:
splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db = analyzeJunctionSplicing(nonlog_NI_db)
splice_event_list, p_value_call, permute_p_values, exon_db, regulated_exon_junction_db = furtherProcessJunctionScores(splice_event_list, probeset_comp_db, permute_p_values)
elif analysis_method == 'splicing-index':
regulated_exon_junction_db = {}
if original_conditions > 2:
excluded_probeset_db = ex_db; splice_event_list = si_db;
clearObjectsFromMemory(ex_db); clearObjectsFromMemory(si_db)
ex_db=[]; si_db=[]; permute_p_values={}; p_value_call=''
else: splice_event_list, p_value_call, permute_p_values, excluded_probeset_db = analyzeSplicingIndex(fold_dbase)
elif analysis_method == 'FIRMA':
regulated_exon_junction_db = {}
splice_event_list, p_value_call, permute_p_values, excluded_probeset_db = FIRMAanalysis(fold_dbase)
global permuted_z_scores; permuted_z_scores={}; global original_domain_z_score_data; original_domain_z_score_data={}
global original_microRNA_z_score_data; original_microRNA_z_score_data={}
nonlog_NI_db=[] ### Clear memory of this large dictionary
try: clearObjectsFromMemory(original_avg_const_exp_db); clearObjectsFromMemory(array_raw_group_values)
except Exception: null=[]
try: clearObjectsFromMemory(avg_const_exp_db)
except Exception: null=[]
try: clearObjectsFromMemory(alt_junction_db)
except Exception: null=[]
try: clearObjectsFromMemory(fold_dbase); fold_dbase=[]
except Exception: null=[]
microRNA_full_exon_db,microRNA_count_db,gene_microRNA_denom = ExonAnalyze_module.importmicroRNADataExon(species,array_type,exon_db,microRNA_prediction_method,explicit_data_type,root_dir)
#print "MicroRNA data imported"
if use_direct_domain_alignments_only == 'yes':
protein_ft_db_len,domain_associated_genes = importProbesetAligningDomains(exon_db,'gene')
else: protein_ft_db_len,domain_associated_genes = importProbesetProteinCompDomains(exon_db,'gene','exoncomp')
if perform_element_permutation_analysis == 'yes':
probeset_to_gene,denominator_list = getInputsForPermutationAnalysis(exon_db)
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
exon_gene_array_translation_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_'+array_type+'-exon_probesets.txt'
try: exon_array_translation_db = importGeneric(exon_gene_array_translation_file)
except Exception: exon_array_translation_db={} ### Not present for all species
exon_hits={}; clearObjectsFromMemory(probeset_comp_db); probeset_comp_db=[]
###Run analyses in the ExonAnalyze_module module to assess functional changes
for (score,ed) in splice_event_list:
geneid = ed.GeneID()
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
pl = string.split(ed.Probeset1(),'|'); probeset1 = pl[0] ### When agglomerated, this is important
uid = (probeset1,ed.Probeset2())
else: uid = ed.Probeset1()
gene_exon = geneid,uid; exon_hits[gene_exon] = ed
#print probeset1,ed.Probeset1(),ed.Probeset2(),gene_exon,ed.CriticalExons()
dataset_name_original = analysis_method+'-'+dataset_name[8:-1]
global functional_attribute_db; global protein_features
### Possibly Block-out code for DomainGraph export
########### Re-import the exon_db for significant entries with full annotaitons
exon_db={}; filtered_arrayids={}; filter_status='yes' ###Use this as a means to save memory (import multiple times - only storing different types relevant information)
for (score,entry) in splice_event_list:
try: probeset = original_exon_db[entry.Probeset1()].Probeset()
except Exception: probeset = entry.Probeset1()
pl = string.split(probeset,'|'); probeset = pl[0]; filtered_arrayids[probeset] = [] ### When agglomerated, this is important
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
try: probeset = entry.Probeset2(); filtered_arrayids[probeset] = []
except AttributeError: null =[] ###occurs when running Splicing
exon_db = importSplicingAnnotationDatabase(probeset_annotations_file,array_type,filtered_arrayids,filter_status);null=[] ###replace existing exon_db (probeset_annotations_file should be a global)
###domain_gene_changed_count_db is the number of genes for each domain that are found for regulated probesets
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
if use_direct_domain_alignments_only == 'yes':
protein_features,domain_gene_changed_count_db,functional_attribute_db = importProbesetAligningDomains(regulated_exon_junction_db,'probeset')
else: protein_features,domain_gene_changed_count_db,functional_attribute_db = importProbesetProteinCompDomains(regulated_exon_junction_db,'probeset','exoncomp')
else:
if use_direct_domain_alignments_only == 'yes':
protein_features,domain_gene_changed_count_db,functional_attribute_db = importProbesetAligningDomains(exon_db,'probeset')
else: protein_features,domain_gene_changed_count_db,functional_attribute_db = importProbesetProteinCompDomains(exon_db,'probeset','exoncomp')
filtered_microRNA_exon_db = ExonAnalyze_module.filterMicroRNAProbesetAssociations(microRNA_full_exon_db,exon_hits)
microRNA_full_exon_db=[]
###add microRNA data to functional_attribute_db
microRNA_hit_gene_count_db = {}; all_microRNA_gene_hits={}; microRNA_attribute_db={}; probeset_mirBS_db={}
for (affygene,uid) in filtered_microRNA_exon_db: ###example ('G7091354', 'E20|') [('hsa-miR-130a', 'Pbxip1'), ('hsa-miR-130a', 'Pbxip1'
###3-1-08
miR_list = []
microRNA_symbol_list = filtered_microRNA_exon_db[(affygene,uid)]
for mir_key in microRNA_symbol_list:
microRNA,gene_symbol,miR_seq, miR_sources = mir_key
#if 'ENS' in microRNA: print microRNA; kill ### bug in some miRNA annotations introduced in the build process
specific_microRNA_tuple = (microRNA,'~')
try: microRNA_hit_gene_count_db[microRNA].append(affygene)
except KeyError: microRNA_hit_gene_count_db[microRNA] = [affygene]
###Create a database with the same structure as "protein_exon_feature_db"(below) for over-representation analysis (direction specific), after linking up splice direction data
try: microRNA_attribute_db[(affygene,uid)].append(specific_microRNA_tuple)
except KeyError: microRNA_attribute_db[(affygene,uid)] = [specific_microRNA_tuple]
miR_data = microRNA+':'+miR_sources
miR_list.append(miR_data) ###Add miR information to the record
function_type = ('miR-sequence: ' +'('+miR_data+')'+miR_seq,'~') ###Add miR sequence information to the sequence field of the report
try: functional_attribute_db[(affygene,uid)].append(function_type)
except KeyError: functional_attribute_db[(affygene,uid)]=[function_type]
#print (affygene,uid), [function_type];kill
if perform_element_permutation_analysis == 'yes':
try: probeset_mirBS_db[uid].append(microRNA)
except KeyError: probeset_mirBS_db[uid] = [microRNA]
miR_str = string.join(miR_list,','); miR_str = '('+miR_str+')'
function_type = ('microRNA-target'+miR_str,'~')
try: functional_attribute_db[(affygene,uid)].append(function_type)
except KeyError: functional_attribute_db[(affygene,uid)]=[function_type]
all_microRNA_gene_hits[affygene] = []
###Replace the gene list for each microRNA hit with count data
microRNA_hit_gene_count_db = eliminate_redundant_dict_values(microRNA_hit_gene_count_db)
###Combines any additional feature alignment info identified from 'ExonAnalyze_module.characterizeProteinLevelExonChanges' (e.g. from Ensembl or junction-based queries rather than exon specific) and combines
###this with this database of (Gene,Exon)=[(functional element 1,'~'),(functional element 2,'~')] for downstream result file annotatations
domain_hit_gene_count_db = {}; all_domain_gene_hits = {}; probeset_domain_db={}
for entry in protein_features:
gene,uid = entry
for data_tuple in protein_features[entry]:
domain,call = data_tuple
try: protein_exon_feature_db[entry].append(data_tuple)
except KeyError: protein_exon_feature_db[entry] = [data_tuple]
try: domain_hit_gene_count_db[domain].append(gene)
except KeyError: domain_hit_gene_count_db[domain] = [gene]
all_domain_gene_hits[gene]=[]
if perform_element_permutation_analysis == 'yes':
try: probeset_domain_db[uid].append(domain)
except KeyError: probeset_domain_db[uid] = [domain]
protein_features=[]; domain_gene_changed_count_db=[]
###Replace the gene list for each microRNA hit with count data
domain_hit_gene_count_db = eliminate_redundant_dict_values(domain_hit_gene_count_db)
############ Perform Element Over-Representation Analysis ############
"""Domain/FT Fishers-Exact test: with "protein_exon_feature_db" (transformed to "domain_hit_gene_count_db") we can analyze over-representation of domain/features WITHOUT taking into account exon-inclusion or exclusion
Do this using: "domain_associated_genes", which contains domain tuple ('Tyr_pkinase', 'IPR001245') as a key and count in unique genes as the value in addition to
Number of genes linked to splice events "regulated" (SI and Midas p<0.05), number of genes with constitutive probesets
MicroRNA Fishers-Exact test: "filtered_microRNA_exon_db" contains gene/exon to microRNA data. For each microRNA, count the representation in spliced genes microRNA (unique gene count - make this from the mentioned file)
Do this using: "microRNA_count_db"""
domain_gene_counts = {} ### Get unique gene counts for each domain
for domain in domain_associated_genes:
domain_gene_counts[domain] = len(domain_associated_genes[domain])
total_microRNA_gene_hit_count = len(all_microRNA_gene_hits)
total_microRNA_gene_denom_count = len(gene_microRNA_denom)
Nm,Rm = calculateZScores(microRNA_hit_gene_count_db,microRNA_count_db,total_microRNA_gene_denom_count,total_microRNA_gene_hit_count,'microRNA')
gene_microRNA_denom =[]
summary_data_db['miRNA_gene_denom'] = total_microRNA_gene_denom_count
summary_data_db['miRNA_gene_hits'] = total_microRNA_gene_hit_count
summary_data_db['alt_events']=len(splice_event_list)
total_domain_gene_hit_count = len(all_domain_gene_hits)
total_domain_gene_denom_count = protein_ft_db_len ###genes connected to domain annotations
Nd,Rd = calculateZScores(domain_hit_gene_count_db,domain_gene_counts,total_domain_gene_denom_count,total_domain_gene_hit_count,'domain')
microRNA_hit_gene_counts={}; gene_to_miR_db={} ### Get unique gene counts for each miR and the converse
for microRNA in microRNA_hit_gene_count_db:
microRNA_hit_gene_counts[microRNA] = len(microRNA_hit_gene_count_db[microRNA])
for gene in microRNA_hit_gene_count_db[microRNA]:
try: gene_to_miR_db[gene].append(microRNA)
except KeyError: gene_to_miR_db[gene] = [microRNA]
gene_to_miR_db = eliminate_redundant_dict_values(gene_to_miR_db)
if perform_element_permutation_analysis == 'yes':
###Begin Domain/microRNA Permute Analysis
input_count = len(splice_event_list) ### Number of probesets or probeset pairs (junction array) alternatively regulated
original_increment = int(permutations/20); increment = original_increment
start_time = time.time(); print 'Permuting the Domain/miRBS analysis %d times' % permutations
x=0; permute_domain_inputs=[]; permute_miR_inputs=[]
while x<permutations:
if x == increment: increment+=original_increment; print '*',
permute_input_list = random.sample(denominator_list,input_count); x+=1
permute_domain_input_gene_counts = countGenesForElement(permute_input_list,probeset_to_gene,probeset_domain_db)
permute_domain_inputs.append(permute_domain_input_gene_counts)
permute_miR_input_gene_counts = countGenesForElement(permute_input_list,probeset_to_gene,probeset_mirBS_db)
permute_miR_inputs.append(permute_miR_input_gene_counts)
calculatePermuteZScores(permute_domain_inputs,domain_gene_counts,Nd,Rd)
calculatePermuteZScores(permute_miR_inputs,microRNA_hit_gene_counts,Nm,Rm)
calculatePermuteStats(original_domain_z_score_data)
calculatePermuteStats(original_microRNA_z_score_data)
adjustPermuteStats(original_domain_z_score_data)
adjustPermuteStats(original_microRNA_z_score_data)
exportZScoreData(original_domain_z_score_data,'ft-domain')
exportZScoreData(original_microRNA_z_score_data,'microRNA')
end_time = time.time(); time_diff = int(end_time-start_time)
print "Enrichment p-values for Domains/miRBS calculated in %d seconds" % time_diff
denominator_list=[]
try: clearObjectsFromMemory(original_microRNA_z_score_data)
except Exception: null=[]
microRNA_hit_gene_count_db={}; microRNA_hit_gene_counts={};
clearObjectsFromMemory(permuted_z_scores); permuted_z_scores=[]; original_domain_z_score_data=[]
if (array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null')) and analysis_method != 'splicing-index':
critical_probeset_annotation_db = getJunctionSplicingAnnotations(regulated_exon_junction_db)
probeset_aligning_db = importProbesetAligningDomains(regulated_exon_junction_db,'perfect_match')
else: probeset_aligning_db = importProbesetAligningDomains(exon_db,'perfect_match')
############ Export exon/junction level results ############
splice_event_db={}; protein_length_list=[]; aspire_gene_results={}
critical_gene_exons={}; unique_exon_event_db={}; comparison_count={}; direct_domain_gene_alignments={}
functional_attribute_db2={}; protein_exon_feature_db2={}; microRNA_exon_feature_db2={}
external_exon_annot={}; gene_exon_region={}; gene_smallest_p={}; gene_splice_event_score={}; alternatively_reg_tc={}
aspire_output = root_dir+'AltResults/AlternativeOutput/' + dataset_name + analysis_method+'-exon-inclusion-results.txt'
data = export.ExportFile(aspire_output)
goelite_output = root_dir+'GO-Elite/AltExon/AS.'+ dataset_name + analysis_method+'.txt'
goelite_data = export.ExportFile(goelite_output); gcn=0
#print 'LENGTH OF THE GENE ANNOTATION DATABASE',len(annotate_db)
if array_type != 'AltMouse':
DG_output = root_dir+'AltResults/DomainGraph/' + dataset_name + analysis_method+'-DomainGraph.txt'
DG_data = export.ExportFile(DG_output)
### Write out only the inclusion hits to a subdir
SRFinder_inclusion = root_dir+'GO-Elite/exon/' + dataset_name + analysis_method+'-inclusion.txt'
SRFinder_in_data = export.ExportFile(SRFinder_inclusion)
SRFinder_in_data.write('probeset\tSystemCode\tdeltaI\tp-value\n')
### Write out only the exclusion hits to a subdir
SRFinder_exclusion = root_dir+'GO-Elite/exon/' + dataset_name + analysis_method+'-exclusion.txt'
SRFinder_ex_data = export.ExportFile(SRFinder_exclusion)
SRFinder_ex_data.write('probeset\tSystemCode\tdeltaI\tp-value\n')
### Write out only the denominator set to a subdir
SRFinder_denom = root_dir+'GO-Elite/exon_denominator/' + species+'-'+array_type+'.txt'
SRFinder_denom_data = export.ExportFile(SRFinder_denom)
SRFinder_denom_data.write('probeset\tSystemCode\n')
ens_version = unique.getCurrentGeneDatabaseVersion()
ProcessedSpliceData_output = string.replace(DG_output,'DomainGraph','ProcessedSpliceData') ### This is the same as the DG export but without converting the probeset IDs for non-exon arrays
ProcessedSpliceData_data = export.ExportFile(ProcessedSpliceData_output)
if ens_version == '':
try:
elite_db_versions = UI.returnDirectoriesNoReplace('/AltDatabase')
if len(elite_db_versions)>0: ens_version = elite_db_versions[0]
except Exception: null=[]
ens_version = string.replace(ens_version,'EnsMart','ENS_')
DG_data.write(ens_version+"\n")
DG_data.write("Probeset\tGeneID\tRegulation call\tSI\tSI p-value\tMiDAS p-value\n")
ProcessedSpliceData_data.write("ExonID(s)\tGeneID\tRegulation call\t"+analysis_method+"\t"+analysis_method+" p-value\tMiDAS p-value\n")
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
if perform_permutation_analysis == 'yes': p_value_type = 'permutation-values'
else: p_value_type = 'FDR-'+p_value_call
if array_type == 'AltMouse': gene_name = 'AffyGene'; extra_transcript_annotation = 'block_structure'; extra_exon_annotation = 'splice_event_description'
if array_type == 'junction' or array_type == 'RNASeq':
gene_name = 'Ensembl'; extra_transcript_annotation = 'transcript cluster ID'; extra_exon_annotation = 'distal exon-region-ID'
goelite_data.write("GeneID\tSystemCode\tscore\tp-value\tSymbol\tExonIDs\n")
if array_type == 'RNASeq':
id1='junctionID-1'; id2='junctionID-2'; loc_column='exon/junction locations'
extra_transcript_annotation = 'Known/Novel Feature'
else: id1='probeset1'; id2='probeset2'; loc_column='probeset locations'
title = [gene_name,analysis_method,'symbol','description','exons1','exons2','regulation_call','event_call',id1,'norm-p1',id2,'norm-p2','fold1','fold2']
title +=['adj-fold1' ,'adj-fold2' ,extra_transcript_annotation,'critical_up_exons','critical_down_exons','functional_prediction','uniprot-ens_feature_predictions']
title +=['peptide_predictions','exp1','exp2','ens_overlapping_domains','constitutive_baseline_exp',p_value_call,p_value_type,'permutation-false-positives']
title +=['gene-expression-change', extra_exon_annotation ,'ExternalExonIDs','ExonRegionID','SplicingEvent','ExonAnnotationScore','large_splicing_diff',loc_column]
else:
goelite_data.write("GeneID\tSystemCode\tSI\tSI p-value\tMiDAS p-value\tSymbol\tExonID\n")
if analysis_method == 'splicing-index':
NIpval = 'SI_rawp'; splicing_score = 'Splicing-Index'; lowestp = 'lowest_p (MIDAS or SI)'; AdjPcolumn = 'Deviation-Value'; #AdjPcolumn = 'SI_adjp'
else:
NIpval = 'FIRMA_rawp'; splicing_score = 'FIRMA_fold'; lowestp = 'lowest_p (MIDAS or FIRMA)'; AdjPcolumn = 'Deviation-Value'; #AdjPcolumn = 'FIRMA_adjp'
if array_type == 'RNASeq':
id1='junctionID'; pval_column='junction p-value'; loc_column='junction location'
else: id1='probeset'; pval_column='probeset p-value'; loc_column='probeset location'
if array_type == 'RNASeq': secondary_ID_title = 'Known/Novel Feature'
else: secondary_ID_title = 'alternative gene ID'
title= ['Ensembl',splicing_score,'symbol','description','exons','regulation_call',id1,pval_column,lowestp,'midas p-value','fold','adjfold']
title+=['up_exons','down_exons','functional_prediction','uniprot-ens_feature_predictions','peptide_predictions','ens_overlapping_domains','baseline_probeset_exp']
title+=['constitutive_baseline_exp',NIpval,AdjPcolumn,'gene-expression-change']
title+=[secondary_ID_title, 'ensembl exons', 'consitutive exon', 'exon-region-ID', 'exon annotations','distal exon-region-ID',loc_column]
title = string.join(title,'\t') + '\n'
try:
if original_conditions>2: title = string.replace(title,'regulation_call','conditions_compared')
except Exception: null=[]
data.write(title)
### Calculate adjusted normalized intensity p-values
fdr_exon_stats={}
if analysis_method != 'ASPIRE' and 'linearregres' not in analysis_method:
for (score,entry) in splice_event_list: ### These are all "significant entries"
fds = FDRStats(entry.TTestNormalizedRatios())
fdr_exon_stats[entry.Probeset1()] = fds
for probeset in excluded_probeset_db: ### These are all "non-significant entries"
fds = FDRStats(excluded_probeset_db[probeset].TTestNormalizedRatios())
fdr_exon_stats[probeset] = fds
try: adjustPermuteStats(fdr_exon_stats)
except Exception: null=[]
### Calculate score average and stdev for each gene to alter get a Deviation Value
gene_deviation_db={}
for (score,entry) in splice_event_list:
dI = entry.Score(); geneID = entry.GeneID()
try: gene_deviation_db[geneID].append(dI)
except Exception: gene_deviation_db[geneID] = [dI]
for i in excluded_probeset_db:
entry = excluded_probeset_db[i]
try: dI = entry.Score(); geneID = entry.GeneID()
except Exception: geneID = entry[1]; dI = entry[-1]
try: gene_deviation_db[geneID].append(dI)
except Exception: None ### Don't include genes with no hits
for geneID in gene_deviation_db:
try:
avg_dI=statistics.avg(gene_deviation_db[geneID])
stdev_dI=statistics.stdev(gene_deviation_db[geneID])
gene_deviation_db[geneID] = avg_dI,stdev_dI
except Exception:
gene_deviation_db[geneID] = 'NA','NA'
event_count = 0
for (score,entry) in splice_event_list:
event_count += 1
dI = entry.Score(); probeset1 = entry.Probeset1(); regulation_call = entry.RegulationCall(); event_call = entry.EventCall();critical_exon_list = entry.CriticalExonTuple()
probeset1_display = probeset1; selected_probeset = probeset1
if agglomerate_inclusion_probesets == 'yes':
if array_type == 'AltMouse':
exons1 = original_exon_db[probeset1].ExonID()
try: probeset1 = original_exon_db[probeset1].Probeset()
except Exception: null=[]
else:
probeset1 = probeset1; exons1 = original_exon_db[probeset1].ExonID()
try: selected_probeset = original_exon_db[probeset1].Probeset()
except Exception: selected_probeset = probeset1
else:
try: exons1 = exon_db[probeset1].ExonID()
except Exception:
print probeset1, len(exon_db)
for i in exon_db: print i; break
kill
critical_probeset_list = [selected_probeset]
affygene = entry.GeneID()
### Calculate deviation value for each exon
avg_dI,stdev_dI = gene_deviation_db[affygene]
try: DV = deviation(dI,avg_dI,stdev_dI) ### Note: the dI values are always in log2 space, independent of platform
except Exception: DV = 'NA'
if affygene in annotate_db: description = annotate_db[affygene].Description(); symbol = annotate_db[affygene].Symbol()
else: description = ''; symbol = ''
ped1 = entry.ProbesetExprData1(); adjfold1 = ped1.AdjFold(); exp1 = ped1.BaselineExp(); fold1 = ped1.FoldChange(); rawp1 = ped1.ExpPval()
### Get Constitutive expression values
baseline_const_exp = entry.ConstitutiveExpression() ### For multiple group comparisosn
#if affygene in gene_expression_diff_db: mean_fold_change = gene_expression_diff_db[affygene].ConstitutiveFoldStr()
try: mean_fold_change = str(entry.ConstitutiveFold()) ### For multi-condition analyses, the gene expression is dependent on the conditions compared
except Exception: mean_fold_change = gene_expression_diff_db[affygene].ConstitutiveFoldStr()
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
probeset2 = entry.Probeset2(); exons2 = exon_db[probeset2].ExonID(); rawp1 = str(entry.TTestNormalizedRatios()); rawp2 = str(entry.TTestNormalizedRatios2()); critical_probeset_list.append(probeset2)
ped2 = entry.ProbesetExprData2(); adjfold2 = ped2.AdjFold(); exp2 = ped2.BaselineExp(); fold2 = ped2.FoldChange()
try: location_summary=original_exon_db[selected_probeset].LocationSummary()+'|'+original_exon_db[probeset2].LocationSummary()
except Exception:
try: location_summary=exon_db[selected_probeset].LocationSummary()+'|'+exon_db[probeset2].LocationSummary()
except Exception: location_summary=''
if array_type == 'AltMouse':
extra_transcript_annotation = exon_db[probeset1].GeneStructure()
else:
try: extra_exon_annotation = last_exon_region_db[affygene]
except KeyError: extra_exon_annotation = ''
try:
tc1 = original_exon_db[probeset1].SecondaryGeneID()
tc2 = original_exon_db[probeset2].SecondaryGeneID() ### Transcript Cluster
probeset_tc = makeUnique([tc1,tc2])
extra_transcript_annotation = string.join(probeset_tc,'|')
try: alternatively_reg_tc[affygene] += probeset_tc
except KeyError: alternatively_reg_tc[affygene] = probeset_tc
except Exception: extra_transcript_annotation=''
if array_type == 'RNASeq':
try: extra_transcript_annotation = entry.NovelEvent() ### Instead of secondary gene ID, list known vs. novel reciprocal junction annotation
except Exception: None
exp_list = [float(exp1),float(exp2),float(exp1)+float(fold1),float(exp2)+float(fold2)]; exp_list.sort(); exp_list.reverse()
probeset_tuple = (probeset1,probeset2)
else:
try: exp_list = [float(exp1),float(exp1)+float(fold1)]; exp_list.sort(); exp_list.reverse()
except Exception: exp_list = ['']
probeset_tuple = (probeset1)
highest_exp = exp_list[0]
###Use permuted p-value or lowest expression junction p-value based on the situtation
###This p-value is used to filter out aspire events for further analyses
if len(p_value_call)>0:
if probeset_tuple in permute_p_values:
lowest_raw_p, pos_permute, total_permute, false_pos = permute_p_values[probeset_tuple]
else: lowest_raw_p = "NA"; pos_permute = "NA"; total_permute = "NA"; false_pos = "NA"
else:
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method: raw_p_list = [entry.TTestNormalizedRatios(),entry.TTestNormalizedRatios2()] #raw_p_list = [float(rawp1),float(rawp2)]; raw_p_list.sort()
else:
try: raw_p_list = [float(entry.TTestNormalizedRatios())] ###Could also be rawp1, but this is more appropriate
except Exception: raw_p_list = [1] ### Occurs when p='NA'
raw_p_list.sort()
lowest_raw_p = raw_p_list[0]; pos_permute = "NA"; total_permute = "NA"; false_pos = "NA"
if perform_permutation_analysis == 'yes':
p_value_extra = str(pos_permute)+' out of '+str(total_permute)
else: p_value_extra = str(pos_permute)
up_exons = ''; down_exons = ''; up_exon_list = []; down_exon_list = []; gene_exon_list=[]
exon_data = critical_exon_list
variable = exon_data[0]
if variable == 1 and regulation_call == 'upregulated':
for exon in exon_data[1]:
up_exons = up_exons + exon + ',';up_exon_list.append(exon)
key = affygene,exon+'|'; gene_exon_list.append(key)
elif variable == 1 and regulation_call == 'downregulated':
for exon in exon_data[1]:
down_exons = down_exons + exon + ',';down_exon_list.append(exon)
key = affygene,exon+'|';gene_exon_list.append(key)
else:
try: exon1 = exon_data[1][0]; exon2 = exon_data[1][1]
except Exception: print exon_data;kill
if adjfold1 > 0:
up_exons = up_exons + exon1 + ',';down_exons = down_exons + exon2 + ','
up_exon_list.append(exon1); down_exon_list.append(exon2)
key = affygene,exon1+'|'; gene_exon_list.append(key);key = affygene,exon2+'|'; gene_exon_list.append(key)
else:
up_exons = up_exons + exon2 + ',';down_exons = down_exons + exon1 + ','
up_exon_list.append(exon2); down_exon_list.append(exon1)
key = affygene,exon1+'|'; gene_exon_list.append(key); key = affygene,exon2+'|'; gene_exon_list.append(key)
up_exons = up_exons[0:-1];down_exons = down_exons[0:-1]
try: ### Get comparisons group annotation data for multigroup comparison analyses
if original_conditions>2:
try: regulation_call = ped1.Annotation()
except Exception: null=[]
except Exception: null=[]
###Format functional results based on exon level fold change
null = []
#global a; a = exon_hits; global b; b=microRNA_attribute_db; kill
"""if 'G7100684@J934332_RC@j_at' in critical_probeset_list:
print probeset1, probeset2, gene, critical_probeset_list, 'blah'
if ('G7100684', ('G7100684@J934333_RC@j_at', 'G7100684@J934332_RC@j_at')) in functional_attribute_db:
print functional_attribute_db[('G7100684', ('G7100684@J934333_RC@j_at', 'G7100684@J934332_RC@j_at'))];blah
blah"""
new_functional_attribute_str, functional_attribute_list2, seq_attribute_str,protein_length_list = format_exon_functional_attributes(affygene,critical_probeset_list,functional_attribute_db,up_exon_list,down_exon_list,protein_length_list)
new_uniprot_exon_feature_str, uniprot_exon_feature_list, null, null = format_exon_functional_attributes(affygene,critical_probeset_list,protein_exon_feature_db,up_exon_list,down_exon_list,null)
null, microRNA_exon_feature_list, null, null = format_exon_functional_attributes(affygene,critical_probeset_list,microRNA_attribute_db,up_exon_list,down_exon_list,null)
if len(new_functional_attribute_str) == 0: new_functional_attribute_str = ' '
if len(new_uniprot_exon_feature_str) == 0: new_uniprot_exon_feature_str = ' '
if len(seq_attribute_str) > 12000: seq_attribute_str = 'The sequence is too long to report for spreadsheet analysis'
### Add entries to a database to quantify the number of reciprocal isoforms regulated
reciprocal_isoform_data = [len(critical_exon_list[1]),critical_exon_list[1],event_call,regulation_call]
try: float((lowest_raw_p))
except ValueError: lowest_raw_p=0
if (float((lowest_raw_p))<=p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try: unique_exon_event_db[affygene].append(reciprocal_isoform_data)
except KeyError: unique_exon_event_db[affygene] = [reciprocal_isoform_data]
### Add functional attribute information to a new database
for item in uniprot_exon_feature_list:
attribute = item[0]
exon = item[1]
if (float((lowest_raw_p))<=p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try: protein_exon_feature_db2[affygene,attribute].append(exon)
except KeyError: protein_exon_feature_db2[affygene,attribute]=[exon]
### Add functional attribute information to a new database
"""Database not used for exon/junction data export but for over-representation analysis (direction specific)"""
for item in microRNA_exon_feature_list:
attribute = item[0]
exon = item[1]
if (float((lowest_raw_p))<=p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try: microRNA_exon_feature_db2[affygene,attribute].append(exon)
except KeyError: microRNA_exon_feature_db2[affygene,attribute]=[exon]
### Add functional attribute information to a new database
for item in functional_attribute_list2:
attribute = item[0]
exon = item[1]
if (float((lowest_raw_p))<=p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try: functional_attribute_db2[affygene,attribute].append(exon)
except KeyError: functional_attribute_db2[affygene,attribute]=[exon]
try:
abs_fold = abs(float(mean_fold_change)); fold_direction = 'down'; fold1_direction = 'down'; fold2_direction = 'down'
large_splicing_diff1 = 0; large_splicing_diff2 = 0; large_splicing_diff = 'null'; opposite_splicing_pattern = 'no'
if float(mean_fold_change)>0: fold_direction = 'up'
if float(fold1)>0: fold1_direction = 'up'
if fold1_direction != fold_direction:
if float(fold1)>float(mean_fold_change): large_splicing_diff1 = float(fold1)-float(mean_fold_change)
except Exception:
fold_direction = ''; large_splicing_diff = ''; opposite_splicing_pattern = ''
if analysis_method != 'ASPIRE' and 'linearregres' not in analysis_method: ed = exon_db[probeset1]
else:
try: ed = critical_probeset_annotation_db[selected_probeset,probeset2]
except KeyError:
try: ed = exon_db[selected_probeset] ###not useful data here, but the objects need to exist
except IOError: ed = original_exon_db[probeset1]
ucsc_splice_annotations = ["retainedIntron","cassetteExon","strangeSplice","altFivePrime","altThreePrime","altPromoter","bleedingExon"]
custom_annotations = ["alt-3'","alt-5'","alt-C-term","alt-N-term","cassette-exon","cassette-exon","exon-region-exclusion","intron-retention","mutually-exclusive-exon","trans-splicing"]
custom_exon_annotations_found='no'; ucsc_annotations_found = 'no'; exon_annot_score=0
if len(ed.SplicingEvent())>0:
for annotation in ucsc_splice_annotations:
if annotation in ed.SplicingEvent(): ucsc_annotations_found = 'yes'
for annotation in custom_annotations:
if annotation in ed.SplicingEvent(): custom_exon_annotations_found = 'yes'
if custom_exon_annotations_found == 'yes' and ucsc_annotations_found == 'no': exon_annot_score = 3
elif ucsc_annotations_found == 'yes' and custom_exon_annotations_found == 'no': exon_annot_score = 4
elif ucsc_annotations_found == 'yes' and custom_exon_annotations_found == 'yes': exon_annot_score = 5
else: exon_annot_score = 2
try: gene_splice_event_score[affygene].append(exon_annot_score) ###store for gene level results
except KeyError: gene_splice_event_score[affygene] = [exon_annot_score]
try: gene_exon_region[affygene].append(ed.ExonRegionID()) ###store for gene level results
except KeyError: gene_exon_region[affygene] = [ed.ExonRegionID()]
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
if float(fold2)>0: fold2_direction = 'up'
if fold2_direction != fold_direction:
if float(fold2)>float(mean_fold_change):
large_splicing_diff2 = float(fold2)-float(mean_fold_change)
if abs(large_splicing_diff2) > large_splicing_diff1: large_splicing_diff = str(large_splicing_diff2)
else: large_splicing_diff = str(large_splicing_diff1)
if fold1_direction != fold2_direction and abs(float(fold1))>0.4 and abs(float(fold2))>0.4 and abs(float(mean_fold_change))< max([float(fold2),float(fold1)]):
opposite_splicing_pattern = 'yes'
### Annotate splicing events based on exon_strucuture data
if array_type == 'AltMouse':
extra_exon_annotation = ExonAnnotate_module.annotate_splice_event(exons1,exons2,extra_transcript_annotation)
try: splice_event_db[extra_exon_annotation] += 1
except KeyError: splice_event_db[extra_exon_annotation] = 1
try:
direct_domain_alignments = probeset_aligning_db[selected_probeset,probeset2]
try: direct_domain_gene_alignments[affygene]+=', '+direct_domain_alignments
except KeyError: direct_domain_gene_alignments[affygene]=direct_domain_alignments
except KeyError: direct_domain_alignments = ' '
splicing_event = ed.SplicingEvent()
if array_type == 'RNASeq':
splicing_event = checkForTransSplicing(probeset1_display,splicing_event)
splicing_event = checkForTransSplicing(probeset2,splicing_event)
exp1 = covertLogExpressionToNonLog(exp1)
exp2 = covertLogExpressionToNonLog(exp2)
baseline_const_exp = covertLogExpressionToNonLog(baseline_const_exp)
fold1 = covertLogFoldToNonLog(fold1)
fold2 = covertLogFoldToNonLog(fold2)
adjfold1 = covertLogFoldToNonLog(adjfold1)
adjfold2 = covertLogFoldToNonLog(adjfold2)
mean_fold_change = covertLogFoldToNonLog(mean_fold_change)
### Annotate splicing events based on pre-computed and existing annotations
values= [affygene,dI,symbol,fs(description),exons1,exons2,regulation_call,event_call,probeset1_display,rawp1,probeset2,rawp2,fold1,fold2,adjfold1,adjfold2]
values+=[extra_transcript_annotation,up_exons,down_exons,fs(new_functional_attribute_str),fs(new_uniprot_exon_feature_str),fs(seq_attribute_str),exp1,exp2,fs(direct_domain_alignments)]
values+=[str(baseline_const_exp),str(lowest_raw_p),p_value_extra,str(false_pos),mean_fold_change,extra_exon_annotation]
values+=[ed.ExternalExonIDs(),ed.ExonRegionID(),splicing_event,str(exon_annot_score),large_splicing_diff,location_summary]
exon_sets = abs(float(dI)),regulation_call,event_call,exons1,exons2,''
### Export significant reciprocol junction pairs and scores
values_ps = [probeset1+'|'+probeset2,affygene,'changed',dI,'NA',str(lowest_raw_p)]; values_ps = string.join(values_ps,'\t')+'\n'
try: ProcessedSpliceData_data.write(values_ps)
except Exception: None
values_ge = [affygene,'En',dI,str(lowest_raw_p),symbol,probeset1_display+' | '+probeset2]; values_ge = string.join(values_ge,'\t')+'\n'
if array_type == 'junction' or array_type == 'RNASeq': ### Only applies to reciprocal junction sensitive platforms (but not currently AltMouse)
goelite_data.write(values_ge)
if array_type == 'junction' or array_type == 'RNASeq': ### Only applies to reciprocal junction sensitive platforms (but not currently AltMouse)
try: exon_probeset = exon_array_translation_db[affygene+':'+exon_data[1][0]][0]; probeset1 = exon_probeset; gcn+=1
except Exception: probeset1 = None #probeset1 = affygene+':'+exon_data[1][0]
try:
null = int(probeset1) ### Must be an int to work in DomainGraph
values_dg = [probeset1,affygene,'changed',dI,'NA',str(lowest_raw_p)]; values_dg = string.join(values_dg,'\t')+'\n'
if array_type == 'junction' or array_type == 'RNASeq':
DG_data.write(values_dg)
values_srf = string.join([probeset1,'Ae',dI,str(lowest_raw_p)],'\t')+'\n'
if float(dI)>0:
SRFinder_ex_data.write(values_srf)
elif float(dI)<0:
SRFinder_in_data.write(values_srf)
except Exception: null=[]
else:
si_pvalue = lowest_raw_p
if si_pvalue == 1: si_pvalue = 'NA'
if probeset1 in midas_db:
midas_p = str(midas_db[probeset1])
if float(midas_p)<lowest_raw_p: lowest_raw_p = float(midas_p) ###This is the lowest and SI-pvalue
else: midas_p = ''
###Determine what type of exon-annotations are present to assign a confidence score
if affygene in annotate_db: ###Determine the transcript clusters used to comprise a splice event (genes and exon specific)
try:
gene_tc = annotate_db[affygene].TranscriptClusterIDs()
try: probeset_tc = [ed.SecondaryGeneID()]
except Exception: probeset_tc = [affygene]
for transcript_cluster in gene_tc: probeset_tc.append(transcript_cluster)
probeset_tc = makeUnique(probeset_tc)
except Exception: probeset_tc = ''; gene_tc=''
else:
try:
try: probeset_tc = [ed.SecondaryGeneID()]
except Exception: probeset_tc = [affygene]
probeset_tc = makeUnique(probeset_tc)
except Exception: probeset_tc = ''; gene_tc=''
cluster_number = len(probeset_tc)
try: alternatively_reg_tc[affygene] += probeset_tc
except KeyError: alternatively_reg_tc[affygene] = probeset_tc
try: last_exon_region = last_exon_region_db[affygene]
except KeyError: last_exon_region = ''
if cluster_number>1: exon_annot_score = 1
direct_domain_alignments = ' '
if array_type == 'exon' or array_type == 'gene' or explicit_data_type != 'null':
try:
direct_domain_alignments = probeset_aligning_db[probeset1]
try: direct_domain_gene_alignments[affygene]+=', '+direct_domain_alignments
except KeyError: direct_domain_gene_alignments[affygene]=direct_domain_alignments
except KeyError: direct_domain_alignments = ' '
else:
try: direct_domain_alignments = probeset_aligning_db[affygene+':'+exons1]
except KeyError: direct_domain_alignments = ''
if array_type == 'RNASeq':
exp1 = covertLogExpressionToNonLog(exp1)
baseline_const_exp = covertLogExpressionToNonLog(baseline_const_exp)
fold1 = covertLogFoldToNonLog(fold1)
adjfold1 = covertLogFoldToNonLog(adjfold1)
mean_fold_change = covertLogFoldToNonLog(mean_fold_change)
try: adj_SIp=fdr_exon_stats[probeset1].AdjP()
except Exception: adj_SIp = 'NA'
try: secondary_geneid = ed.SecondaryGeneID()
except Exception: secondary_geneid = affygene
if array_type == 'RNASeq':
secondary_geneid = ed.NovelExon()
### Write Splicing Index results
values= [affygene,dI,symbol,fs(description),exons1,regulation_call,probeset1,rawp1,str(lowest_raw_p),midas_p,fold1,adjfold1]
values+=[up_exons,down_exons,fs(new_functional_attribute_str),fs(new_uniprot_exon_feature_str),fs(seq_attribute_str),fs(direct_domain_alignments),exp1]
values+=[str(baseline_const_exp),str(si_pvalue),DV,mean_fold_change,secondary_geneid, ed.ExternalExonIDs()]
values+=[ed.Constitutive(),ed.ExonRegionID(),ed.SplicingEvent(),last_exon_region,ed.LocationSummary()] #str(exon_annot_score)
if probeset1 in filtered_probeset_db: values += filtered_probeset_db[probeset1]
exon_sets = abs(float(dI)),regulation_call,event_call,exons1,exons1,midas_p
probeset = probeset1 ### store original ID (gets converted below)
### Write DomainGraph results
try: midas_p = str(midas_db[probeset1])
except KeyError: midas_p = 'NA'
### Export significant exon/junction IDs and scores
values_ps = [probeset1,affygene,'changed',dI,'NA',str(lowest_raw_p)]; values_ps = string.join(values_ps,'\t')+'\n'
try: ProcessedSpliceData_data.write(values_ps)
except Exception: None
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
if (array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null':
try: exon_probeset = exon_array_translation_db[affygene+':'+exon_data[1][0]][0]; probeset1 = exon_probeset; gcn+=1
except Exception: probeset1 = None ### don't write out a line
else:
try: exon_probeset = exon_array_translation_db[probeset1][0]; probeset1 = exon_probeset; gcn+=1
except Exception: probeset1=None; #null=[]; #print gcn, probeset1;kill - force an error - new in version 2.0.8
try:
null = int(probeset1)
values_dg = [probeset1,affygene,'changed',dI,str(si_pvalue),midas_p]; values_dg = string.join(values_dg,'\t')+'\n'
DG_data.write(values_dg)
values_srf = string.join([probeset1,'Ae',dI,str(lowest_raw_p)],'\t')+'\n'
if float(dI)>0:
SRFinder_ex_data.write(values_srf)
elif float(dI)<0:
SRFinder_in_data.write(values_srf)
except Exception: null=[]
values_ge = [affygene,'En',dI,str(si_pvalue),midas_p,symbol,probeset]; values_ge = string.join(values_ge,'\t')+'\n'
goelite_data.write(values_ge)
if len(ed.SplicingEvent())>2:
try: external_exon_annot[affygene].append(ed.SplicingEvent())
except KeyError: external_exon_annot[affygene] = [ed.SplicingEvent()]
try: values = string.join(values,'\t')+'\n'
except Exception: print values;kill
data.write(values)
###Process data for gene level reports
if float((lowest_raw_p))<=p_threshold or false_pos < 2 or lowest_raw_p == 1:
try: comparison_count[affygene] += 1
except KeyError: comparison_count[affygene] = 1
try: aspire_gene_results[affygene].append(exon_sets)
except KeyError: aspire_gene_results[affygene] = [exon_sets]
for exon in up_exon_list:
exon_info = exon,'upregulated'
try: critical_gene_exons[affygene].append(exon_info)
except KeyError: critical_gene_exons[affygene] = [exon_info]
for exon in down_exon_list:
exon_info = exon,'downregulated'
try: critical_gene_exons[affygene].append(exon_info)
except KeyError: critical_gene_exons[affygene] = [exon_info]
data.close()
print event_count, analysis_method, "results written to:", aspire_output,'\n'
try: clearObjectsFromMemory(original_exon_db)
except Exception: null=[]
exon_array_translation_db=[]; original_exon_db=[]; probeset_to_gene=[]
### Finish writing the DomainGraph export file with non-significant probesets
if array_type != 'AltMouse':
for probeset in excluded_probeset_db:
eed = excluded_probeset_db[probeset]
try: midas_p = str(midas_db[probeset])
except KeyError: midas_p = 'NA'
### Export significant exon/junction IDs and scores
try: values_ps = [probeset,eed.GeneID(),'UC',eed.Score(),str(eed.TTestNormalizedRatios()),midas_p]
except Exception: excl_probeset, geneid, score, rawp, pvalue = eed; values_ps = [probeset,geneid,'UC', str(score), str(rawp), str(pvalue)]
values_ps = string.join(values_ps,'\t')+'\n'; ProcessedSpliceData_data.write(values_ps)
### Write DomainGraph results
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
try: exon_probeset = exon_array_translation_db[probeset][0]; probeset = exon_probeset; gcn+=1
except Exception: probeset=None; # null=[] - force an error - new in version 2.0.8
try: values_dg = [probeset,eed.GeneID(),'UC',eed.Score(),str(eed.TTestNormalizedRatios()),midas_p]
except Exception:
try:
excl_probeset, geneid, score, rawp, pvalue = eed
if ':' in probeset: probeset = excl_probeset ### Example: ENSMUSG00000029213:E2.1, make this just the numeric exclusion probeset - Not sure if DG handles non-numeric
values_dg = [probeset,geneid,'UC', str(score), str(rawp), str(pvalue)]
except Exception: None
try:
null=int(probeset)
values_dg = string.join(values_dg,'\t')+'\n'; DG_data.write(values_dg)
except Exception: null=[]
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
for id in exon_array_translation_db:
SRFinder_denom_data.write(exon_array_translation_db[id]+'\tAe\n')
else:
for probeset in original_exon_db:
SRFinder_denom_data.write(probeset+'\tAe\n')
DG_data.close()
SRFinder_in_data.close()
SRFinder_ex_data.close()
SRFinder_denom_data.close()
for affygene in direct_domain_gene_alignments:
domains = string.split(direct_domain_gene_alignments[affygene],', ')
domains = unique.unique(domains); domains = string.join(domains,', ')
direct_domain_gene_alignments[affygene] = domains
### functional_attribute_db2 will be reorganized so save the database with another. Use this
functional_attribute_db = functional_attribute_db2
functional_attribute_db2 = reorganize_attribute_entries(functional_attribute_db2,'no')
external_exon_annot = eliminate_redundant_dict_values(external_exon_annot)
protein_exon_feature_db = protein_exon_feature_db2
protein_exon_feature_db2 = reorganize_attribute_entries(protein_exon_feature_db2,'no')
############ Export Gene Data ############
up_splice_val_genes = 0; down_dI_genes = 0; diff_exp_spliced_genes = 0; diff_spliced_rna_factor = 0
ddI = 0; udI = 0
summary_data_db['direct_domain_genes']=len(direct_domain_gene_alignments)
summary_data_db['alt_genes']=len(aspire_gene_results)
critical_gene_exons = eliminate_redundant_dict_values(critical_gene_exons)
aspire_output_gene = root_dir+'AltResults/AlternativeOutput/' + dataset_name + analysis_method + '-exon-inclusion-GENE-results.txt'
data = export.ExportFile(aspire_output_gene)
if array_type == 'AltMouse': goelite_data.write("GeneID\tSystemCode\n")
title = ['AffyGene','max_dI','midas-p (corresponding)','symbol','external gene ID','description','regulation_call','event_call']
title +=['number_of_comparisons','num_effected_exons','up_exons','down_exons','functional_attribute','uniprot-ens_exon_features','direct_domain_alignments']
title +=['pathways','mean_fold_change','exon-annotations','exon-region IDs','alternative gene ID','splice-annotation score']
title = string.join(title,'\t')+'\n'
data.write(title)
for affygene in aspire_gene_results:
if affygene in annotate_db:
description = annotate_db[affygene].Description()
symbol = annotate_db[affygene].Symbol()
ensembl = annotate_db[affygene].ExternalGeneID()
if array_type != 'AltMouse' and array_type != 'RNASeq': transcript_clusters = alternatively_reg_tc[affygene]; transcript_clusters = makeUnique(transcript_clusters); transcript_clusters = string.join(transcript_clusters,'|')
else: transcript_clusters = affygene
rna_processing_factor = annotate_db[affygene].RNAProcessing()
else: description='';symbol='';ensembl=affygene;rna_processing_factor=''; transcript_clusters=''
if ensembl in go_annotations: wpgo = go_annotations[ensembl]; goa = wpgo.Combined()
else: goa = ''
if array_type == 'AltMouse':
if len(ensembl) >0: goelite_data.write(ensembl+'\tL\n')
try: gene_splice_event_score[affygene].sort(); top_se_score = str(gene_splice_event_score[affygene][-1])
except KeyError: top_se_score = 'NA'
try: gene_regions = gene_exon_region[affygene]; gene_regions = makeUnique(gene_regions); gene_regions = string.join(gene_regions,'|')
except KeyError: gene_regions = 'NA'
if analysis_method == 'ASPIRE' or analysis_method == 'linearregres': number_of_comparisons = str(comparison_count[affygene])
else: number_of_comparisons = 'NA'
results_list = aspire_gene_results[affygene]
results_list.sort(); results_list.reverse()
max_dI = str(results_list[0][0])
regulation_call = results_list[0][1]
event_call = results_list[0][2]
midas_p = results_list[0][-1]
num_critical_exons = str(len(critical_gene_exons[affygene]))
try: direct_domain_annots = direct_domain_gene_alignments[affygene]
except KeyError: direct_domain_annots = ' '
down_exons = ''; up_exons = ''; down_list=[]; up_list=[]
for exon_info in critical_gene_exons[affygene]:
exon = exon_info[0]; call = exon_info[1]
if call == 'downregulated':
down_exons = down_exons + exon + ','
down_list.append(exon)
ddI += 1
if call == 'upregulated':
up_exons = up_exons + exon + ','
up_list.append(exon)
udI += 1
down_exons = down_exons[0:-1]
up_exons = up_exons[0:-1]
up_exons = add_a_space(up_exons); down_exons = add_a_space(down_exons)
functional_annotation =''
if affygene in functional_attribute_db2:
number_of_functional_attributes = str(len(functional_attribute_db2[affygene]))
attribute_list = functional_attribute_db2[affygene]
attribute_list.sort()
for attribute_exon_info in attribute_list:
exon_attribute = attribute_exon_info[0]
exon_list = attribute_exon_info[1]
functional_annotation = functional_annotation + exon_attribute
exons = '('
for exon in exon_list: exons = exons + exon + ','
exons = exons[0:-1] + '),'
if add_exons_to_annotations == 'yes': functional_annotation = functional_annotation + exons
else: functional_annotation = functional_annotation + ','
functional_annotation = functional_annotation[0:-1]
uniprot_exon_annotation = ''
if affygene in protein_exon_feature_db2:
number_of_functional_attributes = str(len(protein_exon_feature_db2[affygene]))
attribute_list = protein_exon_feature_db2[affygene]; attribute_list.sort()
for attribute_exon_info in attribute_list:
exon_attribute = attribute_exon_info[0]
exon_list = attribute_exon_info[1]
uniprot_exon_annotation = uniprot_exon_annotation + exon_attribute
exons = '('
for exon in exon_list: exons = exons + exon + ','
exons = exons[0:-1] + '),'
if add_exons_to_annotations == 'yes': uniprot_exon_annotation = uniprot_exon_annotation + exons
else: uniprot_exon_annotation = uniprot_exon_annotation + ','
uniprot_exon_annotation = uniprot_exon_annotation[0:-1]
if len(uniprot_exon_annotation) == 0: uniprot_exon_annotation = ' '
if len(functional_annotation) == 0: functional_annotation = ' '
if affygene in gene_expression_diff_db:
mean_fold_change = gene_expression_diff_db[affygene].ConstitutiveFoldStr()
try:
if abs(float(mean_fold_change)) > log_fold_cutoff: diff_exp_spliced_genes += 1
except Exception: diff_exp_spliced_genes = diff_exp_spliced_genes
else: mean_fold_change = 'NC'
if len(rna_processing_factor) > 2: diff_spliced_rna_factor +=1
###Add annotations for where in the gene structure these exons are (according to Ensembl)
if affygene in external_exon_annot: external_gene_annot = string.join(external_exon_annot[affygene],', ')
else: external_gene_annot = ''
if array_type == 'RNASeq':
mean_fold_change = covertLogFoldToNonLog(mean_fold_change)
values =[affygene,max_dI,midas_p,symbol,ensembl,fs(description),regulation_call,event_call,number_of_comparisons]
values+=[num_critical_exons,up_exons,down_exons,functional_annotation]
values+=[fs(uniprot_exon_annotation),fs(direct_domain_annots),fs(goa),mean_fold_change,external_gene_annot,gene_regions,transcript_clusters,top_se_score]
values = string.join(values,'\t')+'\n'
data.write(values)
### Use results for summary statistics
if len(up_list)>len(down_list): up_splice_val_genes +=1
else: down_dI_genes +=1
data.close()
print "Gene-level results written"
###yes here indicates that although the truncation events will initially be filtered out, later they will be added
###back in without the non-truncation annotations....if there is no second database (in this case functional_attribute_db again)
###IF WE WANT TO FILTER OUT NON-NMD ENTRIES WHEN NMD IS PRESENT (FOR A GENE) MUST INCLUDE functional_attribute_db AS THE SECOND VARIABLE!!!!
###Currently, yes does nothing
functional_annotation_db, null = grab_summary_dataset_annotations(functional_attribute_db,'','yes')
upregulated_genes = 0; downregulated_genes = 0
###Calculate the number of upregulated and downregulated genes
for affygene in gene_expression_diff_db:
fold_val = gene_expression_diff_db[affygene].ConstitutiveFold()
try:
if float(fold_val) > log_fold_cutoff: upregulated_genes += 1
elif abs(float(fold_val)) > log_fold_cutoff: downregulated_genes += 1
except Exception: null=[]
upregulated_rna_factor = 0; downregulated_rna_factor = 0
###Calculate the total number of putative RNA-processing/binding factors differentially regulated
for affygene in gene_expression_diff_db:
gene_fold = gene_expression_diff_db[affygene].ConstitutiveFold()
rna_processing_factor = gene_expression_diff_db[affygene].RNAProcessing()
if len(rna_processing_factor) > 1:
if gene_fold>log_fold_cutoff: upregulated_rna_factor += 1
elif abs(gene_fold)>log_fold_cutoff: downregulated_rna_factor += 1
###Generate three files for downstream functional summary
### functional_annotation_db2 is output to the same function as functional_annotation_db, ranked_uniprot_list_all to get all ranked uniprot annotations,
### and ranked_uniprot_list_coding_only to get only coding ranked uniprot annotations
functional_annotation_db2, ranked_uniprot_list_all = grab_summary_dataset_annotations(protein_exon_feature_db,'','') #functional_attribute_db
null, ranked_uniprot_list_coding_only = grab_summary_dataset_annotations(protein_exon_feature_db,functional_attribute_db,'') #functional_attribute_db
functional_attribute_db=[]; protein_exon_feature_db=[]
###Sumarize changes in avg protein length for each splice event
up_protein_list=[];down_protein_list=[]; protein_length_fold_diff=[]
for [down_protein,up_protein] in protein_length_list:
up_protein = float(up_protein); down_protein = float(down_protein)
down_protein_list.append(down_protein); up_protein_list.append(up_protein)
if up_protein > 10 and down_protein > 10:
fold_change = up_protein/down_protein; protein_length_fold_diff.append(fold_change)
median_fold_diff = statistics.median(protein_length_fold_diff)
try: down_avg=int(statistics.avg(down_protein_list)); up_avg=int(statistics.avg(up_protein_list))
except Exception: down_avg=0; up_avg=0
try:
try:
down_std=int(statistics.stdev(down_protein_list)); up_std=int(statistics.stdev(up_protein_list))
except ValueError: ###If 'null' is returned fro stdev
down_std = 0;up_std = 0
except Exception:
down_std = 0;up_std = 0
if len(down_protein_list)>1 and len(up_protein_list)>1:
try:
#t,df,tails = statistics.ttest(down_protein_list,up_protein_list,2,3)
#t = abs(t);df = round(df)
#print 'ttest t:',t,'df:',df
#p = str(statistics.t_probability(t,df))
p = str(statistics.runComparisonStatistic(down_protein_list,up_protein_list,probability_statistic))
#print dataset_name,p
except Exception: p = 'NA'
if p == 1: p = 'NA'
else: p = 'NA'
###Calculate unique reciprocal isoforms for exon-inclusion, exclusion and mutual-exclusive events
unique_exon_inclusion_count=0;unique_exon_exclusion_count=0;unique_mutual_exclusive_count=0;
unique_exon_event_db = eliminate_redundant_dict_values(unique_exon_event_db)
for affygene in unique_exon_event_db:
isoform_entries = unique_exon_event_db[affygene]
possibly_redundant=[]; non_redundant=[]; check_for_redundant=[]
for entry in isoform_entries:
if entry[0] == 1: ### If there is only one regulated exon
possibly_redundant.append(entry)
else:
non_redundant.append(entry)
critical_exon_list = entry[1]
for exon in critical_exon_list:
check_for_redundant.append(exon)
for entry in possibly_redundant:
exon = entry[1][0]
if exon not in check_for_redundant:
non_redundant.append(entry)
for entry in non_redundant:
if entry[2] == 'ei-ex':
if entry[3] == 'upregulated': unique_exon_inclusion_count += 1
else: unique_exon_exclusion_count += 1
else: unique_mutual_exclusive_count += 1
udI = unique_exon_inclusion_count; ddI = unique_exon_exclusion_count; mx = unique_mutual_exclusive_count
###Add splice event information to the functional_annotation_db
for splice_event in splice_event_db:count = splice_event_db[splice_event]; functional_annotation_db.append((splice_event,count))
if analysis_method == 'splicing-index' or analysis_method == 'FIRMA': udI='NA'; ddI='NA'
summary_results_db[dataset_name[0:-1]] = udI,ddI,mx,up_splice_val_genes,down_dI_genes,(up_splice_val_genes + down_dI_genes),upregulated_genes, downregulated_genes, diff_exp_spliced_genes, upregulated_rna_factor,downregulated_rna_factor,diff_spliced_rna_factor,down_avg,down_std,up_avg,up_std,p,median_fold_diff,functional_annotation_db
result_list = exportComparisonSummary(dataset_name,summary_data_db,'log')
###Re-set this variable (useful for testing purposes)
clearObjectsFromMemory(gene_expression_diff_db)
clearObjectsFromMemory(splice_event_list); clearObjectsFromMemory(si_db); si_db=[]
clearObjectsFromMemory(fdr_exon_stats)
try: clearObjectsFromMemory(excluded_probeset_db); clearObjectsFromMemory(ex_db); ex_db=[]
except Exception: ex_db=[]
clearObjectsFromMemory(exon_db)
#clearObjectsFromMemory(annotate_db)
critical_probeset_annotation_db=[]; gene_expression_diff_db=[]; domain_associated_genes=[]; permute_p_values=[]
permute_miR_inputs=[]; seq_attribute_str=[]; microRNA_count_db=[]; excluded_probeset_db=[]; fdr_exon_stats=[]
splice_event_list=[]; critical_exon_db_len=len(critical_exon_db)#; critical_exon_db=[] deleting here will cause a global instance problem
all_domain_gene_hits=[]; gene_splice_event_score=[]; unique_exon_event_db=[]; probeset_aligning_db=[]; ranked_uniprot_list_all=[];
filtered_microRNA_exon_db=[]; permute_domain_inputs=[]; functional_annotation_db2=[]; functional_attribute_db2=[]; protein_length_list=[];
ranked_uniprot_list_coding_only=[]; miR_str=[]; permute_input_list=[]; microRNA_exon_feature_db2=[]; alternatively_reg_tc=[];
direct_domain_gene_alignments=[]; aspire_gene_results=[]; domain_gene_counts=[]; functional_annotation=[]; protein_exon_feature_db2=[];
microRNA_attribute_db=[]; probeset_mirBS_db=[]; exon_hits=[]; critical_gene_exons=[]; gene_exon_region=[]; exon_db=[]; external_exon_annot=[];
values=[]; down_protein_list=[]; functional_annotation_db=[]; protein_length_fold_diff=[]; comparison_count=[]; filtered_arrayids=[];
domain_hit_gene_count_db=[]; up_protein_list=[]; probeset_domain_db=[]
try: goelite_data.close()
except Exception: null=[]
"""
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
return summary_results_db, summary_results_db2, aspire_output, aspire_output_gene, critical_exon_db_len
def deviation(dI,avg_dI,stdev_dI):
dI = covertLogFoldToNonLogFloat(dI)
avg_dI = covertLogFoldToNonLogFloat(avg_dI)
stdev_dI = covertLogFoldToNonLogFloat(stdev_dI)
return str(abs((dI-avg_dI)/stdev_dI))
def covertLogExpressionToNonLog(log_val):
if normalization_method == 'RPKM':
nonlog_val = (math.pow(2,float(log_val)))
else:
nonlog_val = (math.pow(2,float(log_val)))-1
return str(nonlog_val)
def covertLogFoldToNonLog(log_val):
try:
if float(log_val)<0: nonlog_val = (-1/math.pow(2,(float(log_val))))
else: nonlog_val = (math.pow(2,float(log_val)))
except Exception: nonlog_val = log_val
return str(nonlog_val)
def covertLogFoldToNonLogFloat(log_val):
if float(log_val)<0: nonlog_val = (-1/math.pow(2,(float(log_val))))
else: nonlog_val = (math.pow(2,float(log_val)))
return nonlog_val
def checkForTransSplicing(uid,splicing_event):
pl = string.split(uid,':')
if len(pl)>2:
if pl[0] not in pl[1]: ### Two different genes
if len(splicing_event)>0: splicing_event+= '|trans-splicing'
else: splicing_event = '|trans-splicing'
return splicing_event
def fs(text):
### Formats a text entry to prevent delimiting a comma
return '"'+text+'"'
def analyzeSplicingIndex(fold_dbase):
"""The Splicing Index (SI) represents the log ratio of the exon intensities between the two tissues after normalization
to the gene intensities in each sample: SIi = log2((e1i/g1j)/(e2i/g2j)), for the i-th exon of the j-th gene in tissue
type 1 or 2. The splicing indices are then subjected to a t-test to probe for differential inclusion of the exon into the gene.
In order to determine if the change in isoform expression was statistically significant, a simple two-tailed t-test was carried
out on the isoform ratios by grouping the 10 samples from either "tumor" or "normal" tissue.
The method ultimately producing the highest proportion of true positives was to retain only: a) exons with a DABG p-value < 0.05,
b) genes with a signal > 70, c) exons with a log ratio between tissues (i.e., the gene-level normalized fold change) > 0.5,
d) Splicing Index p-values < 0.005 and e) Core exons.
Gardina PJ, Clark TA, Shimada B, Staples MK, Yang Q, Veitch J, Schweitzer A, Awad T, Sugnet C, Dee S, Davies C, Williams A, Turpaz Y.
Alternative splicing and differential gene expression in colon cancer detected by a whole genome exon array.
BMC Genomics. 2006 Dec 27;7:325. PMID: 17192196
"""
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db)>0:
temp_db={}
for probeset in fold_dbase: temp_db[probeset]=[]
for probeset in temp_db:
try: filtered_probeset_db[probeset]
except KeyError: del fold_dbase[probeset]
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing annotation)
if filter_for_AS == 'yes':
proceed = 0
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try: del fold_dbase[probeset]
except KeyError: null=[]
### Used to the export relative individual adjusted probesets fold changes used for splicing index values
if export_NI_values == 'yes':
summary_output = root_dir+'AltResults/RawSpliceData/'+species+'/'+analysis_method+'/'+dataset_name[:-1]+'.txt'
data = export.ExportFile(summary_output)
title = string.join(['gene\tExonID\tprobesets']+original_array_names,'\t')+'\n'; data.write(title)
print 'Calculating splicing-index values (please be patient)...',
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
print len(fold_dbase),id_name,'beging examined'
###original_avg_const_exp_db contains constitutive mean expression values per group: G6953871 [7.71, 7.66]
###array_raw_group_values: Raw expression values in list of groups: G7072464@J935416_RC@j_at ([1.79, 2.16, 2.22], [1.68, 2.24, 1.97, 1.92, 2.12])
###avg_const_exp_db contains the raw constitutive expression values in a single list
splicing_index_hash=[]; excluded_probeset_db={}; denominator_probesets=0; interaction = 0
original_increment = int(len(exon_db)/20); increment = original_increment
for probeset in exon_db:
ed = exon_db[probeset]
#include_probeset = ed.IncludeProbeset()
if interaction == increment: increment+=original_increment; print '*',
interaction +=1
include_probeset = 'yes' ###Moved this filter to import of the probeset relationship file
###Examines user input parameters for inclusion of probeset types in the analysis
if include_probeset == 'yes':
geneid = ed.GeneID()
if probeset in fold_dbase and geneid in original_avg_const_exp_db: ###used to search for array_raw_group_values, but when filtered by expression changes, need to filter by adj_fold_dbase
denominator_probesets+=1
###Includes probesets with a calculated constitutive expression value for each gene and expression data for that probeset
group_index = 0; si_interim_group_db={}; si_interim_group_str_db={}; ge_threshold_count=0; value_count = 0
for group_values in array_raw_group_values[probeset]:
"""gene_expression_value = math.pow(2,original_avg_const_exp_db[geneid][group_index])
###Check to see if gene expression is > threshod for both conditions
if gene_expression_value>gene_expression_threshold:ge_threshold_count+=1"""
value_index = 0; ratio_hash=[]; ratio_str_hash=[]
for value in group_values: ###Calculate normalized ratio's for each condition and save raw values for later permutation
#exp_val = math.pow(2,value);ge_val = math.pow(2,avg_const_exp_db[geneid][value_count]) ###To calculate a ttest we need the raw constitutive expression values, these are not in group list form but are all in a single list so keep count.
exp_val = value;ge_val = avg_const_exp_db[geneid][value_count]
exp_ratio = exp_val-ge_val; ratio_hash.append(exp_ratio); ratio_str_hash.append(str(exp_ratio))
value_index +=1; value_count +=1
si_interim_group_db[group_index] = ratio_hash
si_interim_group_str_db[group_index] = ratio_str_hash
group_index+=1
group1_ratios = si_interim_group_db[0]; group2_ratios = si_interim_group_db[1]
group1_mean_ratio = statistics.avg(group1_ratios); group2_mean_ratio = statistics.avg(group2_ratios)
if export_NI_values == 'yes':
try: er = ed.ExonID()
except Exception: er = 'NA'
ev = string.join([geneid+'\t'+er+'\t'+probeset]+si_interim_group_str_db[0]+si_interim_group_str_db[1],'\t')+'\n'; data.write(ev)
#if ((math.log(group1_mean_ratio,2))*(math.log(group2_mean_ratio,2)))<0: opposite_SI_log_mean = 'yes'
if (group1_mean_ratio*group2_mean_ratio)<0: opposite_SI_log_mean = 'yes'
else: opposite_SI_log_mean = 'no'
try:
if calculate_normIntensity_p == 'yes':
try:
normIntensityP = statistics.runComparisonStatistic(group1_ratios,group2_ratios,probability_statistic)
except Exception: normIntensityP = 'NA' ### Occurs when analyzing two groups with no variance
else: normIntensityP = 'NA' ### Set to an always signficant value
if normIntensityP == 1: normIntensityP = 'NA'
splicing_index = group1_mean_ratio-group2_mean_ratio; abs_splicing_index = abs(splicing_index)
#if probeset == '3061323': print abs_splicing_index,normIntensityP,ed.ExonID(),group1_mean_ratio,group2_mean_ratio,math.log(group1_mean_ratio,2),math.log(group2_mean_ratio,2),((math.log(group1_mean_ratio,2))*(math.log(group2_mean_ratio,2))),opposite_SI_log_mean; kill
if probeset in midas_db:
try: midas_p = float(midas_db[probeset])
except ValueError:
midas_p = 0
#if abs_splicing_index>1 and normIntensityP < 0.05: print probeset,normIntensityP, abs_splicing_index;kill
else: midas_p = 0
#print ed.GeneID(),ed.ExonID(),probeset,splicing_index,normIntensityP,midas_p,group1_ratios,group2_ratios
if abs_splicing_index>alt_exon_logfold_cutoff and (normIntensityP < p_threshold or normIntensityP == 'NA' or normIntensityP == 1) and midas_p < p_threshold:
exonid = ed.ExonID(); critical_exon_list = [1,[exonid]]
constit_exp1 = original_avg_const_exp_db[geneid][0]
constit_exp2 = original_avg_const_exp_db[geneid][1]
ge_fold=constit_exp2-constit_exp1
### Re-define all of the pairwise values now that the two Splicing-Index groups to report have been determined
data_list1 = array_raw_group_values[probeset][0]; data_list2 = array_raw_group_values[probeset][1]
baseline_exp = statistics.avg(data_list1); experimental_exp = statistics.avg(data_list2); fold_change = experimental_exp - baseline_exp
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
except Exception: ttest_exp_p = 1
normInt1 = (baseline_exp-constit_exp1); normInt2 = (experimental_exp-constit_exp2); adj_fold = normInt2 - normInt1
ped = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, '')
sid = ExonData(splicing_index,probeset,critical_exon_list,geneid,group1_ratios,group2_ratios,normIntensityP,opposite_SI_log_mean)
sid.setConstitutiveExpression(constit_exp1); sid.setConstitutiveFold(ge_fold); sid.setProbesetExpressionData(ped)
splicing_index_hash.append((splicing_index,sid))
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(splicing_index,geneid,normIntensityP)
excluded_probeset_db[probeset] = eed
except Exception:
null = [] ###If this occurs, then most likely, the exon and constitutive probeset are the same
print 'Splicing Index analysis complete'
if export_NI_values == 'yes': data.close()
splicing_index_hash.sort(); splicing_index_hash.reverse()
print len(splicing_index_hash),id_name,"with evidence of Alternative expression"
p_value_call=''; permute_p_values = {}; summary_data_db['denominator_exp_events']=denominator_probesets
return splicing_index_hash,p_value_call,permute_p_values, excluded_probeset_db
def importResiduals(filename,probe_probeset_db):
fn=filepath(filename); key_db = {}; x=0; prior_uid = ''; uid_gene_db={}
for line in open(fn,'rU').xreadlines():
if x == 0 and line[0] == '#': null=[]
elif x == 0: x+=1
else:
data = cleanUpLine(line)
t = string.split(data,'\t')
uid = t[0]; uid,probe = string.split(uid,'-')
try:
probeset = probe_probeset_db[probe]; residuals = t[1:]
if uid == prior_uid:
try: uid_gene_db[probeset].append(residuals) ### Don't need to keep track of the probe ID
except KeyError: uid_gene_db[probeset] = [residuals]
else: ### Hence, we have finished storing all residual data for that gene
if len(uid_gene_db)>0: calculateFIRMAScores(uid_gene_db); uid_gene_db={}
try: uid_gene_db[probeset].append(residuals) ### Don't need to keep track of the probe ID
except KeyError: uid_gene_db[probeset] = [residuals]
prior_uid = uid
except Exception: null=[]
### For the last gene imported
if len(uid_gene_db)>0: calculateFIRMAScores(uid_gene_db)
def calculateFIRMAScores(uid_gene_db):
probeset_residuals={}; all_gene_residuals=[]; total_probes=0
for probeset in uid_gene_db:
residuals_list = uid_gene_db[probeset]; sample_db={}; total_probes+=len(residuals_list)
### For all probes in a probeset, calculate the median residual for each sample
for residuals in residuals_list:
index=0
for residual in residuals:
try: sample_db[index].append(float(residual))
except KeyError: sample_db[index] = [float(residual)]
all_gene_residuals.append(float(residual))
index+=1
for index in sample_db:
median_residual = statistics.median(sample_db[index])
sample_db[index] = median_residual
probeset_residuals[probeset] = sample_db
### Calculate the Median absolute deviation
"""http://en.wikipedia.org/wiki/Absolute_deviation
The median absolute deviation (also MAD) is the median absolute deviation from the median. It is a robust estimator of dispersion.
For the example {2, 2, 3, 4, 14}: 3 is the median, so the absolute deviations from the median are {1, 1, 0, 1, 11} (or reordered as
{0, 1, 1, 1, 11}) with a median absolute deviation of 1, in this case unaffected by the value of the outlier 14.
Here, the global gene median will be expressed as res_gene_median.
"""
res_gene_median = statistics.median(all_gene_residuals); subtracted_residuals=[]
for residual in all_gene_residuals: subtracted_residuals.append(abs(res_gene_median-residual))
gene_MAD = statistics.median(subtracted_residuals)
#if '3263614' in probeset_residuals: print len(all_gene_residuals),all_gene_residuals
for probeset in probeset_residuals:
sample_db = probeset_residuals[probeset]
for index in sample_db:
median_residual = sample_db[index]
try:
firma_score = median_residual/gene_MAD
sample_db[index] = firma_score
except Exception: null=[]
#if probeset == '3263614': print index, median_residual, firma_score, gene_MAD
firma_scores[probeset] = sample_db
def importProbeToProbesets(fold_dbase):
#print "Importing probe-to-probeset annotations (please be patient)..."
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_probeset-probes.txt'
probeset_to_include={}
gene2examine={}
### Although we want to restrict the analysis to probesets in fold_dbase, we don't want to effect the FIRMA model - filter later
for probeset in fold_dbase:
try: ed = exon_db[probeset]; gene2examine[ed.GeneID()]=[]
except Exception: null=[]
for gene in original_avg_const_exp_db: gene2examine[gene]=[]
for probeset in exon_db:
ed = exon_db[probeset]; geneid = ed.GeneID()
if geneid in gene2examine:
gene2examine[geneid].append(probeset) ### Store these so we can break things up
probeset_to_include[probeset]=[]
probeset_probe_db = importGenericFilteredDBList(filename,probeset_to_include)
### Get Residuals filename and verify it's presence
#print "Importing comparison residuals..."
filename_objects = string.split(dataset_name[:-1],'.p'); filename = filename_objects[0]+'.txt'
if len(array_group_list)==2:
filename = import_dir = root_dir+'AltExpression/FIRMA/residuals/'+array_type+'/'+species+'/'+filename
else: filename = import_dir = root_dir+'AltExpression/FIRMA/FullDatasets/'+array_type+'/'+species+'/'+filename
status = verifyFile(filename)
if status != 'found':
print_out = 'The residual file:'; print_out+= filename
print_out+= 'was not found in the default location.\nPlease make re-run the analysis from the Beginning.'
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out
print traceback.format_exc(); badExit()
print "Calculating FIRMA scores..."
input_count = len(gene2examine) ### Number of probesets or probeset pairs (junction array) alternatively regulated
original_increment = int(input_count/20); increment = original_increment
start_time = time.time(); x=0
probe_probeset_db={}; gene_count=0; total_gene_count = 0; max_gene_count=3000; round = 1
for gene in gene2examine:
gene_count+=1; total_gene_count+=1; x+=1
#if x == increment: increment+=original_increment; print '*',
for probeset in gene2examine[gene]:
for probe in probeset_probe_db[probeset]: probe_probeset_db[probe] = probeset
if gene_count == max_gene_count:
### Import residuals and calculate primary sample/probeset FIRMA scores
importResiduals(filename,probe_probeset_db)
#print max_gene_count*round,"genes"
print '*',
gene_count=0; probe_probeset_db={}; round+=1 ### Reset these variables and re-run
probeset_probe_db={}
### Analyze residuals for the remaining probesets (< max_gene_count)
importResiduals(filename,probe_probeset_db)
end_time = time.time(); time_diff = int(end_time-start_time)
print "FIRMA scores calculted for",total_gene_count, "genes in %d seconds" % time_diff
def FIRMAanalysis(fold_dbase):
"""The FIRMA method calculates a score for each probeset and for each samples within a group of arrays, independent
of group membership. However, in AltAnalyze, these analyses are performed dependent on group. The FIRMA score is calculated
by obtaining residual values (residuals is a variable for each probe that can't be explained by the GC content or intensity
of that probe) from APT, for all probes corresponding to a metaprobeset (Ensembl gene in AltAnalyze). These probe residuals
are imported and the ratio of the median residual per probeset per sample divided by the absolute standard deviation of the
median of all probes for all samples for that gene."""
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db)>0:
temp_db={}
for probeset in fold_dbase: temp_db[probeset]=[]
for probeset in temp_db:
try: filtered_probeset_db[probeset]
except KeyError: del fold_dbase[probeset]
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing annotation)
if filter_for_AS == 'yes':
proceed = 0
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try: del fold_dbase[probeset]
except KeyError: null=[]
#print 'Beginning FIRMA analysis (please be patient)...'
### Used to the export relative individual adjusted probesets fold changes used for splicing index values
if export_NI_values == 'yes':
sample_names_ordered = [] ### note: Can't use original_array_names since the order is potentially different (FIRMA stores sample data as indeces within dictionary keys)
for group_name in array_group_list: ### THIS LIST IS USED TO MAINTAIN CONSISTENT GROUP ORDERING DURING ANALYSIS
for sample_name in array_group_name_db[group_name]: sample_names_ordered.append(sample_name)
summary_output = root_dir+'AltResults/RawSpliceData/'+species+'/'+analysis_method+'/'+dataset_name[:-1]+'.txt'
data = export.ExportFile(summary_output)
title = string.join(['gene-probesets']+sample_names_ordered,'\t')+'\n'; data.write(title)
### Import probes for probesets to be analyzed
global firma_scores; firma_scores = {}
importProbeToProbesets(fold_dbase)
print 'FIRMA scores obtained for',len(firma_scores),'probests.'
### Group sample scores for each probeset and calculate statistics
firma_hash=[]; excluded_probeset_db={}; denominator_probesets=0; interaction = 0
original_increment = int(len(firma_scores)/20); increment = original_increment
for probeset in firma_scores:
if probeset in fold_dbase: ### Filter based on expression
ed = exon_db[probeset]; geneid = ed.GeneID()
if interaction == increment: increment+=original_increment; print '*',
interaction +=1; denominator_probesets+=1
sample_db = firma_scores[probeset]
###Use the index values from performExpressionAnalysis to assign each expression value to a new database
firma_group_array = {}
for group_name in array_group_db:
for array_index in array_group_db[group_name]:
firma_score = sample_db[array_index]
try: firma_group_array[group_name].append(firma_score)
except KeyError: firma_group_array[group_name] = [firma_score]
###array_group_list should already be unique and correctly sorted (see above)
firma_lists=[]; index=0
for group_name in array_group_list:
firma_list = firma_group_array[group_name]
if len(array_group_list)>2: firma_list = statistics.avg(firma_list), firma_list, index
firma_lists.append(firma_list); index+=1
if export_NI_values == 'yes': ### DO THIS HERE SINCE firma_lists IS SORTED BELOW!!!!
try: er = ed.ExonID()
except Exception: er = 'NA'
export_list = [geneid+'\t'+er+'\t'+probeset]; export_list2=[]
for firma_ls in firma_lists:
if len(array_group_list)>2: firma_ls =firma_ls[1] ### See above modification of firma_list object for multiple group anlaysis
export_list+=firma_ls
for i in export_list: export_list2.append(str(i))
ev = string.join(export_list2,'\t')+'\n'; data.write(ev)
if len(array_group_list)==2:
firma_list1 = firma_lists[0]; firma_list2 = firma_lists[-1]; firma_avg1 = statistics.avg(firma_list1); firma_avg2 = statistics.avg(firma_list2)
index1=0; index2=1 ### Only two groups, thus only two indeces
else: ### The below code deals with identifying the comparisons which yeild the greatest FIRMA difference
firma_lists.sort(); index1=firma_lists[0][-1]; index2 = firma_lists[-1][-1]
firma_list1 = firma_lists[0][1]; firma_list2 = firma_lists[-1][1]; firma_avg1 = firma_lists[0][0]; firma_avg2 = firma_lists[-1][0]
if calculate_normIntensity_p == 'yes':
try:
normIntensityP = statistics.runComparisonStatistic(firma_list1,firma_list2,probability_statistic)
except Exception: normIntensityP = 'NA' ### Occurs when analyzing two groups with no variance
else: normIntensityP = 'NA'
if normIntensityP == 1: normIntensityP = 'NA'
firma_fold_change = firma_avg2 - firma_avg1
firma_fold_change = -1*firma_fold_change ### Make this equivalent to Splicing Index fold which is also relative to experimental not control
if (firma_avg2*firma_avg1)<0: opposite_FIRMA_scores = 'yes'
else: opposite_FIRMA_scores = 'no'
if probeset in midas_db:
try: midas_p = float(midas_db[probeset])
except ValueError: midas_p = 0
else: midas_p = 0
#if probeset == '3263614': print firma_fold_change, normIntensityP, midas_p,'\n',firma_list1, firma_list2, [p_threshold];kill
if abs(firma_fold_change)>alt_exon_logfold_cutoff and (normIntensityP < p_threshold or normIntensityP == 'NA') and midas_p < p_threshold:
exonid = ed.ExonID(); critical_exon_list = [1,[exonid]]
#gene_expression_values = original_avg_const_exp_db[geneid]
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
### Re-define all of the pairwise values now that the two FIRMA groups to report have been determined
data_list1 = array_raw_group_values[probeset][index1]; data_list2 = array_raw_group_values[probeset][index2]
baseline_exp = statistics.avg(data_list1); experimental_exp = statistics.avg(data_list2); fold_change = experimental_exp - baseline_exp
group_name1 = array_group_list[index1]; group_name2 = array_group_list[index2]
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
except Exception: ttest_exp_p = 1
normInt1 = (baseline_exp-constit_exp1); normInt2 = (experimental_exp-constit_exp2); adj_fold = normInt2 - normInt1
ped = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, group_name2+'_vs_'+group_name1)
fid = ExonData(firma_fold_change,probeset,critical_exon_list,geneid,data_list1,data_list2,normIntensityP,opposite_FIRMA_scores)
fid.setConstitutiveExpression(constit_exp1); fid.setConstitutiveFold(ge_fold); fid.setProbesetExpressionData(ped)
firma_hash.append((firma_fold_change,fid))
#print [[[probeset,firma_fold_change,normIntensityP,p_threshold]]]
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(firma_fold_change,geneid,normIntensityP)
excluded_probeset_db[probeset] = eed
print 'FIRMA analysis complete'
if export_NI_values == 'yes': data.close()
firma_hash.sort(); firma_hash.reverse()
print len(firma_hash),"Probesets with evidence of Alternative expression out of",len(excluded_probeset_db)+len(firma_hash)
p_value_call=''; permute_p_values = {}; summary_data_db['denominator_exp_events']=denominator_probesets
return firma_hash,p_value_call,permute_p_values, excluded_probeset_db
def getFilteredFilename(filename):
if array_type == 'junction':
filename = string.replace(filename,'.txt','-filtered.txt')
return filename
def getExonVersionFilename(filename):
original_filename = filename
if array_type == 'junction' or array_type == 'RNASeq':
if explicit_data_type != 'null':
filename = string.replace(filename,array_type,array_type+'/'+explicit_data_type)
### Make sure the file exists, otherwise, use the original
file_status = verifyFile(filename)
#print [[filename,file_status]]
if file_status != 'found': filename = original_filename
return filename
def importProbesetAligningDomains(exon_db,report_type):
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_domain_aligning_probesets.txt'
filename=getFilteredFilename(filename)
probeset_aligning_db = importGenericDBList(filename)
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_indirect_domain_aligning_probesets.txt'
filename=getFilteredFilename(filename)
probeset_indirect_aligning_db = importGenericDBList(filename)
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
new_exon_db={}; splicing_call_db={}
for probeset_pair in exon_db:
### For junction analyses exon_db is really regulated_exon_junction_db, containing the inclusion,exclusion probeset tuple and an object as values
ed = exon_db[probeset_pair]; geneid = ed.GeneID(); critical_exons = ed.CriticalExons()
for exon in critical_exons:
new_key = geneid+':'+exon
try: new_exon_db[new_key].append(probeset_pair)
except KeyError: new_exon_db[new_key] = [probeset_pair]
try: splicing_call_db[new_key].append(ed.SplicingCall())
except KeyError: splicing_call_db[new_key] = [ed.SplicingCall()]
for key in new_exon_db:
probeset_pairs = new_exon_db[key]; probeset_pair = probeset_pairs[0] ### grab one of the probeset pairs
ed = exon_db[probeset_pair]; geneid = ed.GeneID()
jd = SimpleJunctionData(geneid,'','','',probeset_pairs) ### use only those necessary fields for this function (probeset pairs will be called as CriticalExons)
splicing_call_db[key].sort(); splicing_call = splicing_call_db[key][-1]; jd.setSplicingCall(splicing_call) ### Bug from 1.15 to have key be new_key?
new_exon_db[key] = jd
exon_db = new_exon_db
gene_protein_ft_db={};domain_gene_count_db={};protein_functional_attribute_db={}; probeset_aligning_db2={}
splicing_call_db=[]; new_exon_db=[] ### Clear memory
for probeset in exon_db:
#if probeset == '107650':
#if probeset in probeset_aligning_db: print probeset_aligning_db[probeset];kill
if probeset in probeset_aligning_db:
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else: proceed = 'yes'
gene = exon_db[probeset].GeneID()
new_domain_list=[]; new_domain_list2=[]
if report_type == 'gene' and proceed == 'yes':
for domain in probeset_aligning_db[probeset]:
try: domain_gene_count_db[domain].append(gene)
except KeyError: domain_gene_count_db[domain] = [gene]
try: gene_protein_ft_db[gene].append(domain)
except KeyError: gene_protein_ft_db[gene]=[domain]
elif proceed == 'yes':
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
probeset_list = exon_db[probeset].CriticalExons()
else: probeset_list = [probeset]
for id in probeset_list:
for domain in probeset_aligning_db[probeset]:
new_domain_list.append('(direct)'+domain)
new_domain_list2.append((domain,'+'))
new_domain_list = unique.unique(new_domain_list)
new_domain_list_str = string.join(new_domain_list,', ')
gene_protein_ft_db[gene,id] = new_domain_list2
probeset_aligning_db2[id] = new_domain_list_str
#print exon_db['107650']
for probeset in exon_db:
if probeset in probeset_indirect_aligning_db:
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else: proceed = 'yes'
gene = exon_db[probeset].GeneID()
new_domain_list=[]; new_domain_list2=[]
if report_type == 'gene' and proceed == 'yes':
for domain in probeset_indirect_aligning_db[probeset]:
try: domain_gene_count_db[domain].append(gene)
except KeyError: domain_gene_count_db[domain] = [gene]
try: gene_protein_ft_db[gene].append(domain)
except KeyError: gene_protein_ft_db[gene]=[domain]
elif proceed == 'yes':
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
probeset_list = exon_db[probeset].CriticalExons()
else: probeset_list = [probeset]
for id in probeset_list:
for domain in probeset_indirect_aligning_db[probeset]:
new_domain_list.append('(indirect)'+domain)
new_domain_list2.append((domain,'-'))
new_domain_list = unique.unique(new_domain_list)
new_domain_list_str = string.join(new_domain_list,', ')
gene_protein_ft_db[gene,id] = new_domain_list2
probeset_aligning_db2[id] = new_domain_list_str
domain_gene_count_db = eliminate_redundant_dict_values(domain_gene_count_db)
gene_protein_ft_db = eliminate_redundant_dict_values(gene_protein_ft_db)
if analysis_method == 'ASPIRE' or analysis_method == 'linearregres':
clearObjectsFromMemory(exon_db);exon_db=[]
try: clearObjectsFromMemory(new_exon_db)
except Exception: null=[]
probeset_indirect_aligning_db=[]; probeset_aligning_db=[]
if report_type == 'perfect_match':
gene_protein_ft_db=[];domain_gene_count_db=[];protein_functional_attribute_db=[]
return probeset_aligning_db2
elif report_type == 'probeset':
probeset_aligning_db2=[]
return gene_protein_ft_db,domain_gene_count_db,protein_functional_attribute_db
else:
probeset_aligning_db2=[]; protein_functional_attribute_db=[]; probeset_aligning_db2=[]
len_gene_protein_ft_db = len(gene_protein_ft_db); gene_protein_ft_db=[]
return len_gene_protein_ft_db,domain_gene_count_db
def importProbesetProteinCompDomains(exon_db,report_type,comp_type):
filename = 'AltDatabase/'+species+'/'+array_type+'/probeset-domain-annotations-'+comp_type+'.txt'
if (array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type != 'null': filename=getFilteredFilename(filename)
filename=getExonVersionFilename(filename)
probeset_aligning_db = importGeneric(filename)
filename = 'AltDatabase/'+species+'/'+array_type+'/probeset-protein-annotations-'+comp_type+'.txt'
if (array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type != 'null': filename=getFilteredFilename(filename)
filename=getExonVersionFilename(filename)
gene_protein_ft_db={};domain_gene_count_db={}
for probeset in exon_db:
initial_proceed = 'no'; original_probeset = probeset
if probeset in probeset_aligning_db: initial_proceed = 'yes'
elif array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
### For junction analyses exon_db is really regulated_exon_junction_db, containing the inclusion,exclusion probeset tuple and an object as values
if '|' in probeset[0]: probeset1 = string.split(probeset[0],'|')[0]; probeset = probeset1,probeset[1]
try: alternate_probeset_id = exon_db[probeset].InclusionLookup(); probeset = alternate_probeset_id,probeset[1]
except Exception: null=[]
probeset_joined = string.join(probeset,'|')
#print [probeset_joined],[probeset]
if probeset_joined in probeset_aligning_db: initial_proceed = 'yes'; probeset = probeset_joined
elif probeset[0] in probeset_aligning_db: initial_proceed = 'yes'; probeset = probeset[0]
elif probeset[1] in probeset_aligning_db: initial_proceed = 'yes'; probeset = probeset[1]
#else: for i in probeset_aligning_db: print [i];kill
if initial_proceed == 'yes':
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[original_probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else: proceed = 'yes'
new_domain_list = []
gene = exon_db[original_probeset].GeneID()
if report_type == 'gene' and proceed == 'yes':
for domain_data in probeset_aligning_db[probeset]:
try:
domain,call = string.split(domain_data,'|')
except Exception:
values = string.split(domain_data,'|')
domain = values[0]; call = values[-1] ### occurs when a | exists in the annotations from UniProt
try: domain_gene_count_db[domain].append(gene)
except KeyError: domain_gene_count_db[domain] = [gene]
try: gene_protein_ft_db[gene].append(domain)
except KeyError: gene_protein_ft_db[gene]=[domain]
elif proceed == 'yes':
for domain_data in probeset_aligning_db[probeset]:
domain,call = string.split(domain_data,'|')
new_domain_list.append((domain,call))
#new_domain_list = string.join(new_domain_list,', ')
gene_protein_ft_db[gene,original_probeset] = new_domain_list
domain_gene_count_db = eliminate_redundant_dict_values(domain_gene_count_db)
probeset_aligning_db=[] ### Clear memory
probeset_aligning_protein_db = importGeneric(filename)
probeset_pairs={} ### Store all possible probeset pairs as single probesets for protein-protein associations
for probeset in exon_db:
if len(probeset)==2:
for p in probeset: probeset_pairs[p] = probeset
if report_type == 'probeset':
### Below code was re-written to be more memory efficient by not storing all data in probeset-domain-annotations-*comp*.txt via generic import
protein_functional_attribute_db={}; probeset_protein_associations={}; protein_db={}
for probeset in exon_db:
initial_proceed = 'no'; original_probeset = probeset
if probeset in probeset_aligning_protein_db: initial_proceed = 'yes'
elif array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
if '|' in probeset[0]: probeset1 = string.split(probeset[0],'|')[0]; probeset = probeset1,probeset[1]
try: alternate_probeset_id = exon_db[probeset].InclusionLookup(); probeset = alternate_probeset_id,probeset[1]
except Exception: null=[]
probeset_joined = string.join(probeset,'|')
#print [probeset_joined],[probeset]
if probeset_joined in probeset_aligning_protein_db: initial_proceed = 'yes'; probeset = probeset_joined
elif probeset[0] in probeset_aligning_protein_db: initial_proceed = 'yes'; probeset = probeset[0]
elif probeset[1] in probeset_aligning_protein_db: initial_proceed = 'yes'; probeset = probeset[1]
#else: for i in probeset_aligning_db: print [i];kill
if initial_proceed == 'yes':
protein_data_list=probeset_aligning_protein_db[probeset]
new_protein_list = []
gene = exon_db[original_probeset].GeneID()
for protein_data in protein_data_list:
protein_info,call = string.split(protein_data,'|')
if 'AA:' in protein_info:
protein_info_r = string.replace(protein_info,')','*')
protein_info_r = string.replace(protein_info_r,'(','*')
protein_info_r = string.split(protein_info_r,'*')
null_protein = protein_info_r[1]; hit_protein = protein_info_r[3]
probeset_protein_associations[original_probeset] = null_protein,hit_protein,call
protein_db[null_protein] = []; protein_db[hit_protein] = []
new_protein_list.append((protein_info,call))
#new_protein_list = string.join(new_domain_list,', ')
protein_functional_attribute_db[gene,original_probeset] = new_protein_list
filename = 'AltDatabase/'+species+'/'+array_type+'/SEQUENCE-protein-dbase_'+comp_type+'.txt'
filename=getExonVersionFilename(filename)
protein_seq_db = importGenericFiltered(filename,protein_db)
for key in protein_functional_attribute_db:
gene,probeset = key
try:
null_protein,hit_protein,call = probeset_protein_associations[probeset]
null_seq = protein_seq_db[null_protein][0]; hit_seq = protein_seq_db[hit_protein][0]
seq_attr = 'sequence: ' +'('+null_protein+')'+null_seq +' -> '+'('+hit_protein+')'+hit_seq
protein_functional_attribute_db[key].append((seq_attr,call))
except KeyError: null=[]
protein_seq_db=[]; probeset_aligning_protein_db=[]
return gene_protein_ft_db,domain_gene_count_db,protein_functional_attribute_db
else:
probeset_aligning_protein_db=[]; len_gene_protein_ft_db = len(gene_protein_ft_db); gene_protein_ft_db=[]
return len_gene_protein_ft_db,domain_gene_count_db
class SimpleJunctionData:
def __init__(self, geneid, probeset1, probeset2, probeset1_display, critical_exon_list):
self._geneid = geneid; self._probeset1 = probeset1; self._probeset2 = probeset2
self._probeset1_display = probeset1_display; self._critical_exon_list = critical_exon_list
def GeneID(self): return self._geneid
def Probeset1(self): return self._probeset1
def Probeset2(self): return self._probeset2
def InclusionDisplay(self): return self._probeset1_display
def CriticalExons(self): return self._critical_exon_list
def setSplicingCall(self,splicing_call):
#self._splicing_call = EvidenceOfAltSplicing(slicing_annot)
self._splicing_call = splicing_call
def setSymbol(self,symbol): self.symbol = symbol
def Symbol(self): return self.symbol
def SplicingCall(self): return self._splicing_call
def setInclusionLookup(self,incl_junction_probeset): self.incl_junction_probeset = incl_junction_probeset
def InclusionLookup(self): return self.incl_junction_probeset
def formatJunctionData(probesets,affygene,critical_exon_list):
if '|' in probesets[0]: ### Only return the first inclusion probeset (agglomerated probesets)
incl_list = string.split(probesets[0],'|')
incl_probeset = incl_list[0]; excl_probeset = probesets[1]
else: incl_probeset = probesets[0]; excl_probeset = probesets[1]
jd = SimpleJunctionData(affygene,incl_probeset,excl_probeset,probesets[0],critical_exon_list)
key = incl_probeset,excl_probeset
return key,jd
class JunctionExpressionData:
def __init__(self, baseline_norm_exp, exper_norm_exp, pval, ped):
self.baseline_norm_exp = baseline_norm_exp; self.exper_norm_exp = exper_norm_exp; self.pval = pval; self.ped = ped
def ConNI(self):
ls=[]
for i in self.logConNI():
ls.append(math.pow(2,i))
return ls
def ExpNI(self):
ls=[]
for i in self.logExpNI():
ls.append(math.pow(2,i))
return ls
def ConNIAvg(self): return math.pow(2,statistics.avg(self.logConNI()))
def ExpNIAvg(self): return math.pow(2,statistics.avg(self.logExpNI()))
def logConNI(self): return self.baseline_norm_exp
def logExpNI(self): return self.exper_norm_exp
def Pval(self): return self.pval
def ProbesetExprData(self): return self.ped
def __repr__(self): return self.ConNI()+'|'+self.ExpNI()
def calculateAllASPIREScores(p1,p2):
b1o = p1.ConNIAvg(); b2o = p2.ConNIAvg()
e1o = p1.ExpNIAvg(); e2o = p2.ExpNIAvg(); original_score = statistics.aspire_stringent(b1o,e1o,b2o,e2o)
index=0; baseline_scores=[] ### Loop through each control ratio and compare to control ratio mean
for b1 in p1.ConNI():
b2 = p2.ConNI()[index]
score = statistics.aspire_stringent(b2,e2o,b1,e1o); index+=1
baseline_scores.append(score)
index=0; exp_scores=[] ### Loop through each experimental ratio and compare to control ratio mean
for e1 in p1.ExpNI():
e2 = p2.ExpNI()[index]
score = statistics.aspire_stringent(b1o,e1,b2o,e2); index+=1
exp_scores.append(score)
try:
aspireP = statistics.runComparisonStatistic(baseline_scores,exp_scores,probability_statistic)
except Exception: aspireP = 'NA' ### Occurs when analyzing two groups with no variance
if aspireP == 1: aspireP = 'NA'
"""
if aspireP<0.05 and oscore>0.2 and statistics.avg(exp_scores)<0:
index=0
for e1 in p1.ExpNI():
e2 = p2.ExpNI()[index]
score = statistics.aspire_stringent(b1,e1,b2,e2)
print p1.ExpNI(), p2.ExpNI(); print e1, e2
print e1o,e2o; print b1, b2; print score, original_score
print exp_scores, statistics.avg(exp_scores); kill"""
return baseline_scores, exp_scores, aspireP
def stringListConvert(ls):
ls2=[]
for i in ls: ls2.append(str(i))
return ls2
def analyzeJunctionSplicing(nonlog_NI_db):
group_sizes = []; original_array_indices = permute_lists[0] ###p[0] is the original organization of the group samples prior to permutation
for group in original_array_indices: group_sizes.append(len(group))
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db)>0:
temp_db={}
for probeset in nonlog_NI_db: temp_db[probeset]=[]
for probeset in temp_db:
try: filtered_probeset_db[probeset]
except KeyError: del nonlog_NI_db[probeset]
### Used to the export relative individual adjusted probesets fold changes used for splicing index values
if export_NI_values == 'yes':
global NIdata_export
summary_output = root_dir+'AltResults/RawSpliceData/'+species+'/'+analysis_method+'/'+dataset_name[:-1]+'.txt'
NIdata_export = export.ExportFile(summary_output)
title = string.join(['inclusion-probeset','exclusion-probeset']+original_array_names,'\t')+'\n'; NIdata_export.write(title)
### Calculate a probeset p-value adjusted for constitutive expression levels (taken from splicing index method)
xl=0
probeset_normIntensity_db={}
for probeset in array_raw_group_values:
ed = exon_db[probeset]; geneid = ed.GeneID(); xl+=1
#if geneid in alt_junction_db and geneid in original_avg_const_exp_db: ### Don't want this filter since it causes problems for Trans-splicing
group_index = 0; si_interim_group_db={}; ge_threshold_count=0; value_count = 0
### Prepare normalized expression lists for recipricol-junction algorithms
if geneid in avg_const_exp_db:
for group_values in array_raw_group_values[probeset]:
value_index = 0; ratio_hash=[]
for value in group_values: ###Calculate normalized ratio's for each condition and save raw values for later permutation
exp_val = value;ge_val = avg_const_exp_db[geneid][value_count]; exp_ratio = exp_val-ge_val
ratio_hash.append(exp_ratio); value_index +=1; value_count +=1
si_interim_group_db[group_index] = ratio_hash
group_index+=1
group1_ratios = si_interim_group_db[0]; group2_ratios = si_interim_group_db[1]
### Calculate and store simple expression summary stats
data_list1 = array_raw_group_values[probeset][0]; data_list2 = array_raw_group_values[probeset][1]
baseline_exp = statistics.avg(data_list1); experimental_exp = statistics.avg(data_list2); fold_change = experimental_exp - baseline_exp
#group_name1 = array_group_list[0]; group_name2 = array_group_list[1]
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1,data_list2,probability_statistic)
except Exception: ttest_exp_p = 'NA'
if ttest_exp_p == 1: ttest_exp_p = 'NA'
adj_fold = statistics.avg(group2_ratios) - statistics.avg(group1_ratios)
ped = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, '')
try:
try:
normIntensityP = statistics.runComparisonStatistic(group1_ratios,group2_ratios,probability_statistic)
except Exception:
#print group1_ratios,group2_ratios,array_raw_group_values[probeset],avg_const_exp_db[geneid];kill
normIntensityP = 'NA' ###occurs for constitutive probesets
except Exception: normIntensityP = 0
if normIntensityP == 1: normIntensityP = 'NA'
ji = JunctionExpressionData(group1_ratios, group2_ratios, normIntensityP, ped)
probeset_normIntensity_db[probeset]=ji ### store and access this below
#if probeset == 'G6899622@J916374@j_at': print normIntensityP,group1_ratios,group2_ratios;kill
###Concatenate the two raw expression groups into a single list for permutation analysis
ls_concatenated = []
for group in array_raw_group_values[probeset]:
for entry in group: ls_concatenated.append(entry)
if analysis_method == 'linearregres': ###Convert out of log space
ls_concatenated = statistics.log_fold_conversion_fraction(ls_concatenated)
array_raw_group_values[probeset] = ls_concatenated
s = 0; t = 0; y = ''; denominator_events=0; excluded_probeset_db = {}
splice_event_list=[]; splice_event_list_mx=[]; splice_event_list_non_mx=[]; event_mx_temp = []; permute_p_values={} #use this to exclude duplicate mx events
for affygene in alt_junction_db:
if affygene in original_avg_const_exp_db:
constit_exp1 = original_avg_const_exp_db[affygene][0]
constit_exp2 = original_avg_const_exp_db[affygene][1]
ge_fold=constit_exp2-constit_exp1
for event in alt_junction_db[affygene]:
if array_type == 'AltMouse':
#event = [('ei', 'E16-E17'), ('ex', 'E16-E18')]
#critical_exon_db[affygene,tuple(critical_exons)] = [1,'E'+str(e1a),'E'+str(e2b)] --- affygene,tuple(event) == key, 1 indicates both are either up or down together
event_call = event[0][0] + '-' + event[1][0]
exon_set1 = event[0][1]; exon_set2 = event[1][1]
probeset1 = exon_dbase[affygene,exon_set1]
probeset2 = exon_dbase[affygene,exon_set2]
critical_exon_list = critical_exon_db[affygene,tuple(event)]
if array_type == 'junction' or array_type == 'RNASeq':
event_call = 'ei-ex' ### Below objects from JunctionArrayEnsemblRules - class JunctionInformation
probeset1 = event.InclusionProbeset(); probeset2 = event.ExclusionProbeset()
exon_set1 = event.InclusionJunction(); exon_set2 = event.ExclusionJunction()
try: novel_event = event.NovelEvent()
except Exception: novel_event = 'known'
critical_exon_list = [1,event.CriticalExonSets()]
key,jd = formatJunctionData([probeset1,probeset2],affygene,critical_exon_list[1])
if array_type == 'junction' or array_type == 'RNASeq':
try: jd.setSymbol(annotate_db[affygene].Symbol())
except Exception:null=[]
#if '|' in probeset1: print probeset1, key,jd.InclusionDisplay();kill
probeset_comp_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
#print probeset1,probeset2, critical_exon_list,event_call,exon_set1,exon_set2;kill
if probeset1 in nonlog_NI_db and probeset2 in nonlog_NI_db:
denominator_events+=1
try: p1 = probeset_normIntensity_db[probeset1]; p2 = probeset_normIntensity_db[probeset2]
except Exception:
print probeset1, probeset2
p1 = probeset_normIntensity_db[probeset1]
p2 = probeset_normIntensity_db[probeset2]
#if '|' in probeset1: print
pp1 = p1.Pval(); pp2 = p2.Pval()
baseline_ratio1 = p1.ConNIAvg()
experimental_ratio1 = p1.ExpNIAvg()
baseline_ratio2 = p2.ConNIAvg()
experimental_ratio2 = p2.ExpNIAvg()
ped1 = p1.ProbesetExprData()
ped2 = p2.ProbesetExprData()
Rin = ''; Rex = ''
r = 0 ###Variable used to determine if we should take the absolute value of dI for mutually exlcusive events
if event_call == 'ei-ex': #means probeset1 is an exon inclusion and probeset2 is an exon exclusion
Rin = baseline_ratio1/experimental_ratio1 # Rin=A/C
Rex = baseline_ratio2/experimental_ratio2 # Rin=B/D
I1=baseline_ratio1/(baseline_ratio1+baseline_ratio2)
I2=experimental_ratio1/(experimental_ratio1+experimental_ratio2)
###When Rex is larger, the exp_ratio for exclusion is decreased in comparison to baseline.
###Thus, increased inclusion (when Rin is small, inclusion is big)
if (Rin>1 and Rex<1): y = 'downregulated'
elif (Rin<1 and Rex>1): y = 'upregulated'
elif (Rex<Rin): y = 'downregulated'
else: y = 'upregulated'
temp_list = []
if event_call == 'mx-mx':
temp_list.append(exon_set1); temp_list.append(exon_set2);temp_list.sort()
if (affygene,temp_list) not in event_mx_temp: #use this logic to prevent mx entries being added more than once
event_mx_temp.append((affygene,temp_list))
###Arbitrarily choose which exon-set will be Rin or Rex, does matter for mutually exclusive events
Rin = baseline_ratio1/experimental_ratio1 # Rin=A/C
Rex = baseline_ratio2/experimental_ratio2 # Rin=B/D
I1=baseline_ratio1/(baseline_ratio1+baseline_ratio2)
I2=experimental_ratio1/(experimental_ratio1+experimental_ratio2)
y = 'mutually-exclusive'; r = 1
if analysis_method == 'ASPIRE' and Rex != '':
#if affygene == 'ENSMUSG00000000126': print Rin, Rex, probeset1, probeset2
if (Rin>1 and Rex<1) or (Rin<1 and Rex>1):
s +=1
in1=((Rex-1.0)*Rin)/(Rex-Rin); in2=(Rex-1.0)/(Rex-Rin)
dI = ((in2-in1)+(I2-I1))/2.0 #modified to give propper exon inclusion
dI = dI*(-1) ### Reverse the fold to make equivalent to splicing-index and FIRMA scores
try: baseline_scores, exp_scores, aspireP = calculateAllASPIREScores(p1,p2)
except Exception: baseline_scores = [0]; exp_scores=[dI]; aspireP = 0
if export_NI_values == 'yes':
baseline_scores = stringListConvert(baseline_scores); exp_scores = stringListConvert(exp_scores)
ev = string.join([probeset1,probeset2]+baseline_scores+exp_scores,'\t')+'\n'; NIdata_export.write(ev)
if max_replicates >2 or equal_replicates==2:
permute_p_values[(probeset1,probeset2)] = [aspireP, 'NA', 'NA', 'NA']
if r == 1: dI = abs(dI) ###Occurs when event is mutually exclusive
#if abs(dI)>alt_exon_logfold_cutoff: print [dI],pp1,pp2,aspireP;kill
#print [affygene,dI,pp1,pp2,aspireP,event.CriticalExonSets(),probeset1,probeset2,alt_exon_logfold_cutoff,p_threshold]
if ((pp1<p_threshold or pp2<p_threshold) or pp1==1 or pp1=='NA') and abs(dI) > alt_exon_logfold_cutoff: ###Require that the splice event have a constitutive corrected p less than the user defined threshold
ejd = ExonJunctionData(dI,probeset1,probeset2,pp1,pp2,y,event_call,critical_exon_list,affygene,ped1,ped2)
"""if probeset1 == 'ENSMUSG00000033335:E16.1-E17.1' and probeset2 == 'ENSMUSG00000033335:E16.1-E19.1':
print [dI,pp1,pp2,p_threshold,alt_exon_logfold_cutoff]
print baseline_scores, exp_scores, [aspireP]#;sys.exit()"""
ejd.setConstitutiveExpression(constit_exp1); ejd.setConstitutiveFold(ge_fold)
if perform_permutation_analysis == 'yes': splice_event_list.append((dI,ejd))
elif aspireP < permute_p_threshold or aspireP=='NA': splice_event_list.append((dI,ejd))
#if abs(dI)>.2: print probeset1, probeset2, critical_exon_list, [exon_set1], [exon_set2]
#if dI>.2 and aspireP<0.05: print baseline_scores,exp_scores,aspireP, statistics.avg(exp_scores), dI
elif array_type == 'junction' or array_type == 'RNASeq':
excluded_probeset_db[affygene+':'+event.CriticalExonSets()[0]] = probeset1, affygene, dI, 'NA', aspireP
if array_type == 'RNASeq':
try: ejd.setNovelEvent(novel_event)
except Exception: None
if analysis_method == 'linearregres' and Rex != '':
s+=1
log_fold,linregressP,rsqrd_status = getLinearRegressionScores(probeset1,probeset2,group_sizes)
log_fold = log_fold ### Reverse the fold to make equivalent to splicing-index and FIRMA scores
if max_replicates >2 or equal_replicates==2: permute_p_values[(probeset1,probeset2)] = [linregressP, 'NA', 'NA', 'NA']
if rsqrd_status == 'proceed':
if ((pp1<p_threshold or pp2<p_threshold) or pp1==1 or pp1=='NA') and abs(log_fold) > alt_exon_logfold_cutoff: ###Require that the splice event have a constitutive corrected p less than the user defined threshold
ejd = ExonJunctionData(log_fold,probeset1,probeset2,pp1,pp2,y,event_call,critical_exon_list,affygene,ped1,ped2)
ejd.setConstitutiveExpression(constit_exp1); ejd.setConstitutiveFold(ge_fold)
if perform_permutation_analysis == 'yes': splice_event_list.append((log_fold,ejd))
elif linregressP < permute_p_threshold: splice_event_list.append((log_fold,ejd))
#if probeset1 == 'G6990053@762121_762232_at' and probeset2 == 'G6990053@J926254@j_at':
#print event_call, critical_exon_list,affygene, Rin, Rex, y, temp_list;kill
elif array_type == 'junction' or array_type == 'RNASeq':
excluded_probeset_db[affygene+':'+event.CriticalExonSets()[0]] = probeset1, affygene, log_fold, 'NA', linregressP
if array_type == 'RNASeq':
try: ejd.setNovelEvent(novel_event)
except Exception: None
else: t +=1
clearObjectsFromMemory(probeset_normIntensity_db)
probeset_normIntensity_db={}; ### Potentially large memory object containing summary stats for all probesets
statistics.adjustPermuteStats(permute_p_values)
summary_data_db['denominator_exp_events']=denominator_events
print "Number of exon-events analyzed:", s
print "Number of exon-events excluded:", t
return splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db
def maxReplicates():
replicates=0; greater_than_two=0; greater_than_one=0; group_sizes=[]
for probeset in array_raw_group_values:
for group_values in array_raw_group_values[probeset]:
try:
replicates+=len(group_values); group_sizes.append(len(group_values))
if len(group_values)>2: greater_than_two+=1
elif len(group_values)>1: greater_than_one+=1
except Exception: replicates+=len(array_raw_group_values[probeset]); break
break
group_sizes = unique.unique(group_sizes)
if len(group_sizes) == 1: equal_replicates = group_sizes[0]
else: equal_replicates = 0
max_replicates = replicates/float(original_conditions)
if max_replicates<2.01:
if greater_than_two>0 and greater_than_one>0: max_replicates=3
return max_replicates, equal_replicates
def furtherProcessJunctionScores(splice_event_list, probeset_comp_db, permute_p_values):
splice_event_list.sort(); splice_event_list.reverse()
print "filtered %s scores:" % analysis_method, len(splice_event_list)
if perform_permutation_analysis == 'yes':
###*********BEGIN PERMUTATION ANALYSIS*********
if max_replicates >2 or equal_replicates==2:
splice_event_list, p_value_call, permute_p_values = permuteSplicingScores(splice_event_list)
else:
print "WARNING...Not enough replicates to perform permutation analysis."
p_value_call=''; permute_p_values = {}
else:
if max_replicates >2 or equal_replicates==2:
if probability_statistic == 'unpaired t-test':
p_value_call=analysis_method+'-OneWayAnova'
else:
p_value_call=analysis_method+'-'+probability_statistic
else:
if probability_statistic == 'unpaired t-test':
p_value_call='OneWayAnova'; permute_p_values = {}
else:
p_value_call=probability_statistic; permute_p_values = {}
print len(splice_event_list), 'alternative events after subsequent filtering (optional)'
### Get ExonJunction annotaitons
junction_splicing_annot_db = getJunctionSplicingAnnotations(probeset_comp_db)
regulated_exon_junction_db={}; new_splice_event_list=[]
if filter_for_AS == 'yes': print "Filtering for evidence of Alternative Splicing"
for (fold,ejd) in splice_event_list:
proceed = 'no'
if filter_for_AS == 'yes':
try:
ja = junction_splicing_annot_db[ejd.Probeset1(),ejd.Probeset2()]; splicing_call = ja.SplicingCall()
if splicing_call == 1: proceed = 'yes'
except KeyError: proceed = 'no'
else: proceed = 'yes'
if proceed == 'yes':
key,jd = formatJunctionData([ejd.Probeset1(),ejd.Probeset2()],ejd.GeneID(),ejd.CriticalExons())
regulated_exon_junction_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
new_splice_event_list.append((fold,ejd))
### Add junction probeset lookup for reciprocal junctions composed of an exonid (not in protein database currently)
if array_type == 'RNASeq' and '-' not in key[0]: ### Thus, it is an exon compared to a junction
events = alt_junction_db[ejd.GeneID()]
for ji in events:
if (ji.InclusionProbeset(),ji.ExclusionProbeset()) == key:
jd.setInclusionLookup(ji.InclusionLookup()) ### This is the source junction from which the exon ID comes from
probeset_comp_db[ji.InclusionLookup(),ji.ExclusionProbeset()]=jd
#print ji.InclusionProbeset(),ji.ExclusionProbeset(),' ',ji.InclusionLookup()
if filter_for_AS == 'yes': print len(new_splice_event_list), "remaining after filtering for evidence of Alternative splicing"
filtered_exon_db = {}
for junctions in probeset_comp_db:
rj = probeset_comp_db[junctions] ### Add splicing annotations to the AltMouse junction DBs (needed for permutation analysis statistics and filtering)
try: ja = junction_splicing_annot_db[junctions]; splicing_call = ja.SplicingCall(); rj.setSplicingCall(ja.SplicingCall())
except KeyError: rj.setSplicingCall(0)
if filter_for_AS == 'yes': filtered_exon_db[junctions] = rj
for junctions in regulated_exon_junction_db:
rj = regulated_exon_junction_db[junctions]
try: ja = junction_splicing_annot_db[junctions]; rj.setSplicingCall(ja.SplicingCall())
except KeyError: rj.setSplicingCall(0)
if filter_for_AS == 'yes': probeset_comp_db = filtered_exon_db
try: clearObjectsFromMemory(alt_junction_db)
except Exception: null=[]
return new_splice_event_list, p_value_call, permute_p_values, probeset_comp_db, regulated_exon_junction_db
class SplicingScoreData:
def Method(self):
###e.g. ASPIRE
return self._method
def Score(self): return str(self._score)
def Probeset1(self): return self._probeset1
def Probeset2(self): return self._probeset2
def RegulationCall(self): return self._regulation_call
def GeneID(self): return self._geneid
def CriticalExons(self): return self._critical_exon_list[1]
def CriticalExonTuple(self): return self._critical_exon_list
def TTestNormalizedRatios(self): return self._normIntensityP
def TTestNormalizedRatios2(self): return self._normIntensityP2
def setConstitutiveFold(self,exp_log_ratio): self._exp_log_ratio = exp_log_ratio
def ConstitutiveFold(self): return str(self._exp_log_ratio)
def setConstitutiveExpression(self,const_baseline): self.const_baseline = const_baseline
def ConstitutiveExpression(self): return str(self.const_baseline)
def setProbesetExpressionData(self,ped): self.ped1 = ped
def ProbesetExprData1(self): return self.ped1
def ProbesetExprData2(self): return self.ped2
def setNovelEvent(self,novel_event): self._novel_event = novel_event
def NovelEvent(self): return self._novel_event
def EventCall(self):
###e.g. Exon inclusion (ei) Exon exclusion (ex), ei-ex, reported in that direction
return self._event_call
def Report(self):
output = self.Method() +'|'+ self.GeneID() +'|'+ string.join(self.CriticalExons(),'|')
return output
def __repr__(self): return self.Report()
class ExonJunctionData(SplicingScoreData):
def __init__(self,score,probeset1,probeset2,probeset1_p,probeset2_p,regulation_call,event_call,critical_exon_list,affygene,ped1,ped2):
self._score = score; self._probeset1 = probeset1; self._probeset2 = probeset2; self._regulation_call = regulation_call
self._event_call = event_call; self._critical_exon_list = critical_exon_list; self._geneid = affygene
self._method = analysis_method; self._normIntensityP = probeset1_p; self._normIntensityP2 = probeset2_p
self.ped1 = ped1; self.ped2=ped2
class ExonData(SplicingScoreData):
def __init__(self,splicing_index,probeset,critical_exon_list,geneid,group1_ratios,group2_ratios,normIntensityP,opposite_SI_log_mean):
self._score = splicing_index; self._probeset1 = probeset; self._opposite_SI_log_mean = opposite_SI_log_mean
self._critical_exon_list = critical_exon_list; self._geneid = geneid
self._baseline_ratio1 = group1_ratios; self._experimental_ratio1 = group2_ratios
self._normIntensityP = normIntensityP
self._method = analysis_method; self._event_call = 'exon-inclusion'
if splicing_index > 0: regulation_call = 'downregulated' ###Since baseline is the numerator ratio
else: regulation_call = 'upregulated'
self._regulation_call = regulation_call
def OppositeSIRatios(self): return self._opposite_SI_log_mean
class ExcludedExonData(ExonData):
def __init__(self,splicing_index,geneid,normIntensityP):
self._score = splicing_index; self._geneid = geneid; self._normIntensityP = normIntensityP
def getAllPossibleLinearRegressionScores(probeset1,probeset2,positions,group_sizes):
### Get Raw expression values for the two probests
p1_exp = array_raw_group_values[probeset1]
p2_exp = array_raw_group_values[probeset2]
all_possible_scores=[]; index1=0 ### Perform all possible pairwise comparisons between groups (not sure how this will work for 10+ groups)
for (pos1a,pos2a) in positions:
index2=0
for (pos1b,pos2b) in positions:
if pos1a != pos1b:
p1_g1 = p1_exp[pos1a:pos2a]; p1_g2 = p1_exp[pos1b:pos2b]
p2_g1 = p2_exp[pos1a:pos2a]; p2_g2 = p2_exp[pos1b:pos2b]
#log_fold, linregressP, rsqrd = getAllLinearRegressionScores(probeset1,probeset2,p1_g1,p2_g1,p1_g2,p2_g2,len(group_sizes)) ### Used to calculate a pairwise group pvalue
log_fold, rsqrd = performLinearRegression(p1_g1,p2_g1,p1_g2,p2_g2)
if log_fold<0: i1,i2 = index2,index1 ### all scores should indicate upregulation
else: i1,i2=index1,index2
all_possible_scores.append((abs(log_fold),i1,i2))
index2+=1
index1+=1
all_possible_scores.sort()
try: log_fold,index1,index2 = all_possible_scores[-1]
except Exception: log_fold=0; index1=0; index2=0
return log_fold, index1, index2
def getLinearRegressionScores(probeset1,probeset2,group_sizes):
### Get Raw expression values for the two probests
p1_exp = array_raw_group_values[probeset1]
p2_exp = array_raw_group_values[probeset2]
try:
p1_g1 = p1_exp[:group_sizes[0]]; p1_g2 = p1_exp[group_sizes[0]:]
p2_g1 = p2_exp[:group_sizes[0]]; p2_g2 = p2_exp[group_sizes[0]:]
except Exception:
print probeset1,probeset2
print p1_exp
print p2_exp
print group_sizes
force_kill
log_fold, linregressP, rsqrd = getAllLinearRegressionScores(probeset1,probeset2,p1_g1,p2_g1,p1_g2,p2_g2,2)
return log_fold, linregressP, rsqrd
def getAllLinearRegressionScores(probeset1,probeset2,p1_g1,p2_g1,p1_g2,p2_g2,groups):
log_fold, rsqrd = performLinearRegression(p1_g1,p2_g1,p1_g2,p2_g2)
try:
### Repeat for each sample versus baselines to calculate a p-value
index=0; group1_scores=[]
for p1_g1_sample in p1_g1:
p2_g1_sample = p2_g1[index]
log_f, rs = performLinearRegression(p1_g1,p2_g1,[p1_g1_sample],[p2_g1_sample])
group1_scores.append(log_f); index+=1
index=0; group2_scores=[]
for p1_g2_sample in p1_g2:
p2_g2_sample = p2_g2[index]
log_f, rs = performLinearRegression(p1_g1,p2_g1,[p1_g2_sample],[p2_g2_sample])
group2_scores.append(log_f); index+=1
try:
linregressP = statistics.runComparisonStatistic(group1_scores,group2_scores,probability_statistic)
except Exception:
linregressP = 0; group1_scores = [0]; group2_scores = [log_fold]
if linregressP == 1: linregressP = 0
except Exception:
linregressP = 0; group1_scores = [0]; group2_scores = [log_fold]
if export_NI_values == 'yes' and groups==2:
group1_scores = stringListConvert(group1_scores)
group2_scores = stringListConvert(group2_scores)
ev = string.join([probeset1,probeset2]+group1_scores+group2_scores,'\t')+'\n'; NIdata_export.write(ev)
return log_fold, linregressP, rsqrd
def performLinearRegression(p1_g1,p2_g1,p1_g2,p2_g2):
return_rsqrd = 'no'
if use_R == 'yes': ###Uses the RLM algorithm
#print "Performing Linear Regression analysis using rlm."
g1_slope = statistics.LinearRegression(p1_g1,p2_g1,return_rsqrd)
g2_slope = statistics.LinearRegression(p1_g2,p2_g2,return_rsqrd)
else: ###Uses a basic least squared method
#print "Performing Linear Regression analysis using python specific methods."
g1_slope = statistics.simpleLinRegress(p1_g1,p2_g1)
g2_slope = statistics.simpleLinRegress(p1_g2,p2_g2)
log_fold = statistics.convert_to_log_fold(g2_slope/g1_slope)
rsqrd = 'proceed'
#if g1_rsqrd > 0 and g2_rsqrd > 0: rsqrd = 'proceed'
#else: rsqrd = 'hault'
return log_fold, rsqrd
########### Permutation Analysis Functions ###########
def permuteLinearRegression(probeset1,probeset2,p):
p1_exp = array_raw_group_values[probeset1]
p2_exp = array_raw_group_values[probeset2]
p1_g1, p1_g2 = permute_samples(p1_exp,p)
p2_g1, p2_g2 = permute_samples(p2_exp,p)
return_rsqrd = 'no'
if use_R == 'yes': ###Uses the RLM algorithm
g1_slope = statistics.LinearRegression(p1_g1,p2_g1,return_rsqrd)
g2_slope = statistics.LinearRegression(p1_g2,p2_g2,return_rsqrd)
else: ###Uses a basic least squared method
g1_slope = statistics.simpleLinRegress(p1_g1,p2_g1)
g2_slope = statistics.simpleLinRegress(p1_g2,p2_g2)
log_fold = statistics.convert_to_log_fold(g2_slope/g1_slope)
return log_fold
def permuteSplicingScores(splice_event_list):
p_value_call = 'lowest_raw_p'
permute_p_values = {}; splice_event_list2=[]
if len(permute_lists) > 0:
#tuple_data in splice_event_list = dI,probeset1,probeset2,y,event_call,critical_exon_list
all_samples = []; a = 0
for (score,x) in splice_event_list:
###NOTE: This reference dI differs slightly from the below calculated, since the values are calculated from raw relative ratios rather than the avg
###Solution: Use the first calculated dI as the reference
score = score*(-1) ### Reverse the score to make equivalent to splicing-index and FIRMA scores
ref_splice_val = score; probeset1 = x.Probeset1(); probeset2 = x.Probeset2(); affygene = x.GeneID()
y = 0; p_splice_val_dist = []; count = 0; return_rsqrd = 'no'
for p in permute_lists: ###There are two lists in each entry
count += 1
permute = 'yes'
if analysis_method == 'ASPIRE':
p_splice_val = permute_ASPIRE_filtered(affygene, probeset1,probeset2,p,y,ref_splice_val,x)
elif analysis_method == 'linearregres':
slope_ratio = permuteLinearRegression(probeset1,probeset2,p)
p_splice_val = slope_ratio
if p_splice_val != 'null': p_splice_val_dist.append(p_splice_val)
y+=1
p_splice_val_dist.sort()
new_ref_splice_val = str(abs(ref_splice_val)); new_ref_splice_val = float(new_ref_splice_val[0:8]) #otherwise won't match up the scores correctly
if analysis_method == 'linearregres':
if ref_splice_val<0:
p_splice_val_dist2=[]
for val in p_splice_val_dist: p_splice_val_dist2.append(-1*val)
p_splice_val_dist=p_splice_val_dist2; p_splice_val_dist.reverse()
p_val, pos_permute, total_permute, greater_than_true_permute = statistics.permute_p(p_splice_val_dist,new_ref_splice_val,len(permute_lists))
#print p_val,ref_splice_val, pos_permute, total_permute, greater_than_true_permute,p_splice_val_dist[-3:];kill
###When two groups are of equal size, there will be 2 pos_permutes rather than 1
if len(permute_lists[0][0]) == len(permute_lists[0][1]): greater_than_true_permute = (pos_permute/2) - 1 #size of the two groups are equal
else:greater_than_true_permute = (pos_permute) - 1
if analysis_method == 'linearregres': greater_than_true_permute = (pos_permute) - 1 ###since this is a one sided test, unlike ASPIRE
###Below equation is fine if the population is large
permute_p_values[(probeset1,probeset2)] = [p_val, pos_permute, total_permute, greater_than_true_permute]
###Remove non-significant linear regression results
if analysis_method == 'linearregres':
if p_val <= permute_p_threshold or greater_than_true_permute < 2: splice_event_list2.append((score,x)) ###<= since many p=0.05
print "Number of permutation p filtered splice event:",len(splice_event_list2)
if len(permute_p_values)>0: p_value_call = 'permuted_aspire_p-value'
if analysis_method == 'linearregres': splice_event_list = splice_event_list2
return splice_event_list, p_value_call, permute_p_values
def permute_ASPIRE_filtered(affygene,probeset1,probeset2,p,y,ref_splice_val,x):
### Get raw expression values for each permuted group for the two probesets
b1,e1 = permute_dI(array_raw_group_values[probeset1],p)
try: b2,e2 = permute_dI(array_raw_group_values[probeset2],p)
except IndexError: print probeset2, array_raw_group_values[probeset2],p; kill
### Get the average constitutive expression values (averaged per-sample across probesets) for each permuted group
try: bc,ec = permute_dI(avg_const_exp_db[affygene],p)
except IndexError: print affygene, avg_const_exp_db[affygene],p; kill
if factor_out_expression_changes == 'no':
ec = bc
### Analyze the averaged ratio's of junction expression relative to permuted constitutive expression
try: p_splice_val = abs(statistics.aspire_stringent(b1/bc,e1/ec,b2/bc,e2/ec)) ### This the permuted ASPIRE score
except Exception: p_splice_val = 0
#print p_splice_val, ref_splice_val, probeset1, probeset2, affygene; dog
if y == 0: ###The first permutation is always the real one
### Grab the absolute number with small number of decimal places
try:
new_ref_splice_val = str(p_splice_val); new_ref_splice_val = float(new_ref_splice_val[0:8])
ref_splice_val = str(abs(ref_splice_val)); ref_splice_val = float(ref_splice_val[0:8]); y += 1
except ValueError:
###Only get this error if your ref_splice_val is a null
print y, probeset1, probeset2; print ref_splice_val, new_ref_splice_val, p
print b1/bc,e1/ec,b2/bc,e2/ec; print (b1/bc)/(e1/ec), (b2/bc)/(e2/ec)
print x[7],x[8],x[9],x[10]; kill
return p_splice_val
def permute_samples(a,p):
baseline = []; experimental = []
for p_index in p[0]:
baseline.append(a[p_index]) ###Append expression values for each permuted list
for p_index in p[1]:
experimental.append(a[p_index])
return baseline, experimental
def permute_dI(all_samples,p):
baseline, experimental = permute_samples(all_samples,p)
#if get_non_log_avg == 'no':
gb = statistics.avg(baseline); ge = statistics.avg(experimental) ###Group avg baseline, group avg experimental value
gb = statistics.log_fold_conversion_fraction(gb); ge = statistics.log_fold_conversion_fraction(ge)
#else:
#baseline = statistics.log_fold_conversion_fraction(baseline); experimental = statistics.log_fold_conversion_fraction(experimental)
#gb = statistics.avg(baseline); ge = statistics.avg(experimental) ###Group avg baseline, group avg experimental value
return gb,ge
def format_exon_functional_attributes(affygene,critical_probeset_list,functional_attribute_db,up_exon_list,down_exon_list,protein_length_list):
### Add functional attributes
functional_attribute_list2=[]
new_functional_attribute_str=''
new_seq_attribute_str=''
new_functional_attribute_list=[]
if array_type == 'exon' or array_type == 'gene' or explicit_data_type != 'null': critical_probesets = critical_probeset_list[0]
else: critical_probesets = tuple(critical_probeset_list)
key = affygene,critical_probesets
if key in functional_attribute_db:
###Grab exon IDs corresponding to the critical probesets
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
try: critical_exons = regulated_exon_junction_db[critical_probesets].CriticalExons() ###For junction arrays
except Exception: print key, functional_attribute_db[key];kill
else: critical_exons = [exon_db[critical_probesets].ExonID()] ###For exon arrays
for exon in critical_exons:
for entry in functional_attribute_db[key]:
x = 0
functional_attribute = entry[0]
call = entry[1] # +, -, or ~
if ('AA:' in functional_attribute) or ('ref' in functional_attribute):
x = 1
if exon in up_exon_list:
### design logic to determine whether up or down regulation promotes the functional change (e.g. NMD)
if 'ref' in functional_attribute:
new_functional_attribute = '(~)'+functional_attribute
data_tuple = new_functional_attribute,exon
elif call == '+' or call == '~':
new_functional_attribute = '(+)'+functional_attribute
data_tuple = new_functional_attribute,exon
elif call == '-':
new_functional_attribute = '(-)'+functional_attribute
data_tuple = new_functional_attribute,exon
if 'AA:' in functional_attribute and '?' not in functional_attribute:
functional_attribute_temp = functional_attribute[3:]
if call == '+' or call == '~':
val1,val2 = string.split(functional_attribute_temp,'->')
else:
val2,val1 = string.split(functional_attribute_temp,'->')
val1,null = string.split(val1,'(')
val2,null = string.split(val2,'(')
protein_length_list.append([val1,val2])
elif exon in down_exon_list:
if 'ref' in functional_attribute:
new_functional_attribute = '(~)'+functional_attribute
data_tuple = new_functional_attribute,exon
elif call == '+' or call == '~':
new_functional_attribute = '(-)'+functional_attribute
data_tuple = new_functional_attribute,exon
elif call == '-':
new_functional_attribute = '(+)'+functional_attribute
data_tuple = new_functional_attribute,exon
if 'AA:' in functional_attribute and '?' not in functional_attribute:
functional_attribute_temp = functional_attribute[3:]
if call == '+' or call == '~':
val2,val1 = string.split(functional_attribute_temp,'->')
else:
val1,val2 = string.split(functional_attribute_temp,'->')
val1,null = string.split(val1,'(')
val2,null = string.split(val2,'(')
protein_length_list.append([val1,val2])
if x == 0 or (exclude_protein_details != 'yes'):
try: new_functional_attribute_list.append(new_functional_attribute)
except UnboundLocalError:
print entry
print up_exon_list,down_exon_list
print exon, critical_exons
print critical_probesets, (key, affygene,critical_probesets)
for i in functional_attribute_db:
print i, functional_attribute_db[i]; kill
###remove protein sequence prediction_data
if 'sequence' not in data_tuple[0]:
if x == 0 or exclude_protein_details == 'no':
functional_attribute_list2.append(data_tuple)
###Get rid of duplicates, but maintain non-alphabetical order
new_functional_attribute_list2=[]
for entry in new_functional_attribute_list:
if entry not in new_functional_attribute_list2:
new_functional_attribute_list2.append(entry)
new_functional_attribute_list = new_functional_attribute_list2
#new_functional_attribute_list = unique.unique(new_functional_attribute_list)
#new_functional_attribute_list.sort()
for entry in new_functional_attribute_list:
if 'sequence' in entry: new_seq_attribute_str = new_seq_attribute_str + entry + ','
else: new_functional_attribute_str = new_functional_attribute_str + entry + ','
new_seq_attribute_str = new_seq_attribute_str[0:-1]
new_functional_attribute_str = new_functional_attribute_str[0:-1]
return new_functional_attribute_str, functional_attribute_list2, new_seq_attribute_str,protein_length_list
def grab_summary_dataset_annotations(functional_attribute_db,comparison_db,include_truncation_results_specifically):
###If a second filtering database present, filter the 1st database based on protein length changes
fa_db={}; cp_db={} ###index the geneids for efficient recall in the next segment of code
for (affygene,annotation) in functional_attribute_db:
try: fa_db[affygene].append(annotation)
except KeyError: fa_db[affygene]= [annotation]
for (affygene,annotation) in comparison_db:
try: cp_db[affygene].append(annotation)
except KeyError: cp_db[affygene]= [annotation]
functional_attribute_db_exclude = {}
for affygene in fa_db:
if affygene in cp_db:
for annotation2 in cp_db[affygene]:
if ('trunc' in annotation2) or ('frag' in annotation2) or ('NMDs' in annotation2):
try: functional_attribute_db_exclude[affygene].append(annotation2)
except KeyError: functional_attribute_db_exclude[affygene] = [annotation2]
functional_annotation_db = {}
for (affygene,annotation) in functional_attribute_db:
### if we wish to filter the 1st database based on protein length changes
if affygene not in functional_attribute_db_exclude:
try: functional_annotation_db[annotation] += 1
except KeyError: functional_annotation_db[annotation] = 1
elif include_truncation_results_specifically == 'yes':
for annotation_val in functional_attribute_db_exclude[affygene]:
try: functional_annotation_db[annotation_val] += 1
except KeyError: functional_annotation_db[annotation_val] = 1
annotation_list = []
annotation_list_ranked = []
for annotation in functional_annotation_db:
if 'micro' not in annotation:
count = functional_annotation_db[annotation]
annotation_list.append((annotation,count))
annotation_list_ranked.append((count,annotation))
annotation_list_ranked.sort(); annotation_list_ranked.reverse()
return annotation_list, annotation_list_ranked
def reorganize_attribute_entries(attribute_db1,build_attribute_direction_databases):
attribute_db2 = {}; inclusion_attributes_hit_count={}; exclusion_attributes_hit_count={}
genes_with_inclusion_attributes={}; genes_with_exclusion_attributes={};
###This database has unique gene, attribute information. No attribute will now be represented more than once per gene
for key in attribute_db1:
###Make gene the key and attribute (functional elements or protein information), along with the associated exons the values
affygene = key[0];exon_attribute = key[1];exon_list = attribute_db1[key]
exon_list = unique.unique(exon_list);exon_list.sort()
attribute_exon_info = exon_attribute,exon_list #e.g. 5'UTR, [E1,E2,E3]
try: attribute_db2[affygene].append(attribute_exon_info)
except KeyError: attribute_db2[affygene] = [attribute_exon_info]
###Separate out attribute data by direction for over-representation analysis
if build_attribute_direction_databases == 'yes':
direction=exon_attribute[1:2];unique_gene_attribute=exon_attribute[3:]
if direction == '+':
try: inclusion_attributes_hit_count[unique_gene_attribute].append(affygene)
except KeyError: inclusion_attributes_hit_count[unique_gene_attribute] = [affygene]
genes_with_inclusion_attributes[affygene]=[]
if direction == '-':
try: exclusion_attributes_hit_count[unique_gene_attribute].append(affygene)
except KeyError: exclusion_attributes_hit_count[unique_gene_attribute] = [affygene]
genes_with_exclusion_attributes[affygene]=[]
inclusion_attributes_hit_count = eliminate_redundant_dict_values(inclusion_attributes_hit_count)
exclusion_attributes_hit_count = eliminate_redundant_dict_values(exclusion_attributes_hit_count)
"""for key in inclusion_attributes_hit_count:
inclusion_attributes_hit_count[key] = len(inclusion_attributes_hit_count[key])
for key in exclusion_attributes_hit_count:
exclusion_attributes_hit_count[key] = len(exclusion_attributes_hit_count[key])"""
if build_attribute_direction_databases == 'yes': return attribute_db2,inclusion_attributes_hit_count,genes_with_inclusion_attributes,exclusion_attributes_hit_count,genes_with_exclusion_attributes
else: return attribute_db2
########### Misc. Functions ###########
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def add_a_space(string):
if len(string)<1:
string = ' '
return string
def convertToLog2(data_list):
return map(lambda x: math.log(float(x), 2), data_list)
def addGlobalFudgeFactor(data_list,data_type):
new_list = []
if data_type == 'log':
for item in data_list:
new_item = statistics.log_fold_conversion_fraction(item)
new_list.append(float(new_item) + global_addition_factor)
new_list = convertToLog2(new_list)
else:
for item in data_list: new_list.append(float(item) + global_addition_factor)
return new_list
def copyDirectoryPDFs(root_dir,AS='AS'):
directories = ['AltResults/AlternativeOutputDirectoryDescription.pdf',
'AltResultsDirectoryDescription.pdf',
'ClusteringDirectoryDescription.pdf',
'ExpressionInputDirectoryDescription.pdf',
'ExpressionOutputDirectoryDescription.pdf',
'GO-Elite/GO-Elite_resultsDirectoryDescription.pdf',
'GO-EliteDirectoryDescription.pdf',
'RootDirectoryDescription.pdf']
import shutil
for dir in directories:
file = string.split(dir,'/')[-1]
proceed=True
if 'AltResult' in dir and AS!='AS': proceed=False
if proceed:
try: shutil.copyfile(filepath('Documentation/DirectoryDescription/'+file), filepath(root_dir+dir))
except Exception: pass
def restrictProbesets(dataset_name):
### Take a file with probesets and only perform the splicing-analysis on these (e.g. those already identified from a previous run with a specific pattern)
### Allows for propper denominator when calculating z-scores for microRNA and protein-domain ORA
probeset_list_filename = import_dir = '/AltDatabaseNoVersion/filtering'; filtered_probeset_db={}
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
try:
dir_list = read_directory(import_dir)
fn_dir = filepath(import_dir[1:])
except Exception: dir_list=[]; fn_dir=''
if len(dir_list)>0:
for file in dir_list:
if file[:-4] in dataset_name:
fn = fn_dir+'/'+file; fn = string.replace(fn,'AltDatabase','AltDatabaseNoVersion')
filtered_probeset_db = importGeneric(fn)
print len(filtered_probeset_db), id_name,"will be used to restrict analysis..."
return filtered_probeset_db
def RunAltAnalyze():
#print altanalyze_files
#print '!!!!!starting to run alt-exon analysis'
#returnLargeGlobalVars()
global annotate_db; annotate_db={}; global splice_event_list; splice_event_list=[]; residuals_dirlist=[]
global dataset_name; global constitutive_probeset_db; global exon_db; dir_list2=[]; import_dir2=''
if array_type == 'AltMouse': import_dir = root_dir+'AltExpression/'+array_type
elif array_type == 'exon':
import_dir = root_dir+'AltExpression/ExonArray/'+species+'/'
elif array_type == 'gene':
import_dir = root_dir+'AltExpression/GeneArray/'+species+'/'
elif array_type == 'junction':
import_dir = root_dir+'AltExpression/JunctionArray/'+species+'/'
else:
import_dir = root_dir+'AltExpression/'+array_type+'/'+species+'/'
#if analysis_method == 'ASPIRE' or analysis_method == 'linearregres' or analysis_method == 'splicing-index':
if array_type != 'AltMouse': gene_annotation_file = "AltDatabase/ensembl/"+species+"/"+species+"_Ensembl-annotations.txt"
else: gene_annotation_file = "AltDatabase/"+species+"/"+array_type+"/"+array_type+"_gene_annotations.txt"
annotate_db = ExonAnalyze_module.import_annotations(gene_annotation_file,array_type)
###Import probe-level associations
exon_db={}; filtered_arrayids={};filter_status='no'
try: constitutive_probeset_db,exon_db,genes_being_analyzed = importSplicingAnnotationDatabase(probeset_annotations_file,array_type,filtered_arrayids,filter_status)
except IOError:
print_out = 'The annotation database: \n'+probeset_annotations_file+'\nwas not found. Ensure this file was not deleted and that the correct species has been selected.'
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
print traceback.format_exc()
badExit()
run=0
### Occurs when analyzing multiple conditions rather than performing a simple pair-wise comparison
if run_from_scratch == 'Annotate External Results': import_dir = root_dir
elif analyze_all_conditions == 'all groups':
import_dir = string.replace(import_dir,'AltExpression','AltExpression/FullDatasets')
if array_type == 'AltMouse':
import_dir = string.replace(import_dir,'FullDatasets/AltMouse','FullDatasets/AltMouse/Mm')
elif analyze_all_conditions == 'both':
import_dir2 = string.replace(import_dir,'AltExpression','AltExpression/FullDatasets')
if array_type == 'AltMouse':
import_dir2 = string.replace(import_dir2,'FullDatasets/AltMouse','FullDatasets/AltMouse/Mm')
try: dir_list2 = read_directory(import_dir2) #send a sub_directory to a function to identify all files in a directory
except Exception:
try:
if array_type == 'exon': array_type_dir = 'ExonArray'
elif array_type == 'gene': array_type_dir = 'GeneArray'
elif array_type == 'junction': array_type_dir = 'GeneArray'
else: array_type_dir = array_type
import_dir2 = string.replace(import_dir2,'AltExpression/'+array_type_dir+'/'+species+'/','')
import_dir2 = string.replace(import_dir2,'AltExpression/'+array_type_dir+'/','');
dir_list2 = read_directory(import_dir2)
except Exception:
print_out = 'The expression files were not found. Please make\nsure you selected the correct species and array type.\n\nselected species: '+species+'\nselected array type: '+array_type+'\nselected directory:'+import_dir2
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
print traceback.format_exc()
badExit()
try: dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
except Exception:
try:
if array_type == 'exon': array_type_dir = 'ExonArray'
elif array_type == 'gene': array_type_dir = 'GeneArray'
elif array_type == 'junction': array_type_dir = 'JunctionArray'
else: array_type_dir = array_type
import_dir = string.replace(import_dir,'AltExpression/'+array_type_dir+'/'+species+'/','')
import_dir = string.replace(import_dir,'AltExpression/'+array_type_dir+'/','');
try: dir_list = read_directory(import_dir)
except Exception:
import_dir = root_dir
dir_list = read_directory(root_dir) ### Occurs when reading in an AltAnalyze filtered file under certain conditions
except Exception:
print_out = 'The expression files were not found. Please make\nsure you selected the correct species and array type.\n\nselected species: '+species+'\nselected array type: '+array_type+'\nselected directory:'+import_dir
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out
print traceback.format_exc()
badExit()
dir_list+=dir_list2
### Capture the corresponding files in the residual dir to make sure these files exist for all comparisons - won't if FIRMA was run on some files
if analysis_method == 'FIRMA':
try:
residual_dir = root_dir+'AltExpression/FIRMA/residuals/'+array_type+'/'+species+'/'
residuals_dirlist = read_directory(residual_dir)
except Exception: null=[]
try:
residual_dir = root_dir+'AltExpression/FIRMA/FullDatasets/'+array_type+'/'+species+'/'
residuals_dirlist += read_directory(residual_dir)
except Exception: null=[]
dir_list_verified=[]
for file in residuals_dirlist:
for filename in dir_list:
if file[:-4] in filename: dir_list_verified.append(filename)
dir_list = unique.unique(dir_list_verified)
junction_biotype = 'no'
if array_type == 'RNASeq':
### Check to see if user data includes junctions or just exons
for probeset in exon_db:
if '-' in probeset: junction_biotype = 'yes'; break
if junction_biotype == 'no' and analysis_method != 'splicing-index' and array_type == 'RNASeq':
dir_list=[] ### DON'T RUN ALTANALYZE WHEN JUST ANALYZING EXON DATA
print 'No junction data to summarize... proceeding with exon analysis\n'
elif len(dir_list)==0:
print_out = 'No expression files available in the input directory:\n'+root_dir
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
badExit()
dir_list = filterAltExpressionFiles(dir_list,altanalyze_files) ### Looks to see if the AltExpression files are for this run or from an older run
for altanalyze_input in dir_list: #loop through each file in the directory to output results
###Import probe-level associations
if 'cel_files' in altanalyze_input:
print_out = 'The AltExpression directory containing the necessary import file(s) is missing. Please verify the correct parameters and input directory were selected. If this error persists, contact us.'
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
badExit()
if run>0: ### Only re-set these databases after the run when batch analysing multiple files
exon_db={}; filtered_arrayids={};filter_status='no' ###Use this as a means to save memory (import multiple times - only storing different types relevant information)
constitutive_probeset_db,exon_db,genes_being_analyzed = importSplicingAnnotationDatabase(probeset_annotations_file,array_type,filtered_arrayids,filter_status)
if altanalyze_input in dir_list2: dataset_dir = import_dir2 +'/'+ altanalyze_input ### Then not a pairwise comparison
else: dataset_dir = import_dir +'/'+ altanalyze_input
dataset_name = altanalyze_input[:-4] + '-'
print "Beginning to process",dataset_name[0:-1]
### If the user want's to restrict the analysis to preselected probesets (e.g., limma or FIRMA analysis selected)
global filtered_probeset_db; filtered_probeset_db={}
try: filtered_probeset_db = restrictProbesets(dataset_name)
except Exception: null=[]
if run_from_scratch != 'Annotate External Results':
###Import expression data and stats and filter the expression data based on fold and p-value OR expression threshold
try: conditions,adj_fold_dbase,nonlog_NI_db,dataset_name,gene_expression_diff_db,midas_db,ex_db,si_db = performExpressionAnalysis(dataset_dir,constitutive_probeset_db,exon_db,annotate_db,dataset_name)
except IOError:
#except Exception,exception:
#print exception
print traceback.format_exc()
print_out = 'The AltAnalyze filtered expression file "'+dataset_name+'" is not propperly formatted. Review formatting requirements if this file was created by another application.'
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
badExit()
else:
conditions = 0; adj_fold_dbase={}; nonlog_NI_db={}; gene_expression_diff_db={}; ex_db={}; si_db={}
defineEmptyExpressionVars(exon_db); adj_fold_dbase = original_fold_dbase
###Run Analysis
summary_results_db, summary_results_db2, aspire_output, aspire_output_gene, number_events_analyzed = splicingAnalysisAlgorithms(nonlog_NI_db,adj_fold_dbase,dataset_name,gene_expression_diff_db,exon_db,ex_db,si_db,dataset_dir)
aspire_output_list.append(aspire_output); aspire_output_gene_list.append(aspire_output_gene)
try: clearObjectsFromMemory(exon_db); clearObjectsFromMemory(constitutive_probeset_db); constitutive_probeset_db=[]
except Exception: null=[]
try: clearObjectsFromMemory(last_exon_region_db);last_exon_region_db=[]
except Exception: null=[]
try: clearObjectsFromMemory(adj_fold_dbase);adj_fold_dbase=[]; clearObjectsFromMemory(nonlog_NI_db);nonlog_NI_db=[]
except Exception: null=[]
try: clearObjectsFromMemory(gene_expression_diff_db);gene_expression_diff_db=[]; clearObjectsFromMemory(midas_db);midas_db=[]
except Exception: null=[]
try: clearObjectsFromMemory(ex_db);ex_db=[]; clearObjectsFromMemory(si_db);si_db=[]
except Exception: null=[]
try: run+=1
except Exception: run = 1
if run>0: ###run = 0 if no filtered expression data present
try: return summary_results_db, aspire_output_gene_list, number_events_analyzed
except Exception:
print_out = 'AltAnalyze was unable to find an expression dataset to analyze in:\n',import_dir,'\nor\n',import_dir2,'\nPlease re-run and select a valid input directory.'
try: UI.WarningWindow(print_out,'Exit'); print print_out
except Exception: print print_out
badExit()
else:
try: clearObjectsFromMemory(exon_db); clearObjectsFromMemory(constitutive_probeset_db); constitutive_probeset_db=[]
except Exception: null=[]
try: clearObjectsFromMemory(last_exon_region_db);last_exon_region_db=[]
except Exception: null=[]
return None
def filterAltExpressionFiles(dir_list,current_files):
dir_list2=[]
try:
if len(current_files) == 0: current_files = dir_list ###if no filenames input
for altanalzye_input in dir_list: #loop through each file in the directory to output results
if altanalzye_input in current_files:
dir_list2.append(altanalzye_input)
dir_list = dir_list2
except Exception: dir_list = dir_list
return dir_list
def defineEmptyExpressionVars(exon_db):
global fold_dbase; fold_dbase={}; global original_fold_dbase; global critical_exon_db; critical_exon_db={}
global midas_db; midas_db = {}; global max_replicates; global equal_replicates; max_replicates=0; equal_replicates=0
for probeset in exon_db: fold_dbase[probeset]='',''
original_fold_dbase = fold_dbase
def universalPrintFunction(print_items):
log_report = open(log_file,'a')
for item in print_items:
if commandLineMode == 'no': ### Command-line has it's own log file write method (Logger)
log_report.write(item+'\n')
else: print item
log_report.close()
class StatusWindow:
def __init__(self,root,expr_var,alt_var,goelite_var,additional_var,exp_file_location_db):
root.title('AltAnalyze version 2.1.0')
statusVar = StringVar() ### Class method for Tkinter. Description: "Value holder for strings variables."
self.root = root
height = 450; width = 500
if os.name != 'nt': height = 500; width = 600
self.sf = PmwFreeze.ScrolledFrame(root,
labelpos = 'n', label_text = 'Results Status Window',
usehullsize = 1, hull_width = width, hull_height = height)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(),tag_text = 'Output')
group.pack(fill = 'both', expand = 1, padx = 10, pady = 0)
Label(group.interior(),width=190,height=552,justify=LEFT, bg='black', fg = 'white',anchor=NW,padx = 5,pady = 5, textvariable=statusVar).pack(fill=X,expand=Y)
status = StringVarFile(statusVar,root) ### Likely captures the stdout
sys.stdout = status
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]; fl.setSTDOUT(sys.stdout)
root.after(100, AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var, exp_file_location_db, root))
try:
root.protocol("WM_DELETE_WINDOW", self.deleteWindow)
root.mainloop()
except Exception: pass
def deleteWindow(self):
try: self.root.destroy()
except Exception: pass
def quit(self):
try:
self.root.quit()
self.root.destroy()
except Exception: pass
sys.exit()
def exportComparisonSummary(dataset_name,summary_data_dbase,return_type):
log_report = open(log_file,'a')
result_list=[]
for key in summary_data_dbase:
if key != 'QC': ### The value is a list of strings
summary_data_dbase[key] = str(summary_data_dbase[key])
d = 'Dataset name: '+ dataset_name[:-1]; result_list.append(d+'\n')
d = summary_data_dbase['gene_assayed']+':\tAll genes examined'; result_list.append(d)
d = summary_data_dbase['denominator_exp_genes']+':\tExpressed genes examined for AS'; result_list.append(d)
if explicit_data_type == 'exon-only':
d = summary_data_dbase['alt_events']+':\tAlternatively regulated probesets'; result_list.append(d)
d = summary_data_dbase['denominator_exp_events']+':\tExpressed probesets examined'; result_list.append(d)
elif (array_type == 'AltMouse' or array_type == 'junction' or array_type == 'RNASeq') and (explicit_data_type == 'null' or return_type == 'print'):
d = summary_data_dbase['alt_events']+':\tAlternatively regulated junction-pairs'; result_list.append(d)
d = summary_data_dbase['denominator_exp_events']+':\tExpressed junction-pairs examined'; result_list.append(d)
else:
d = summary_data_dbase['alt_events']+':\tAlternatively regulated probesets'; result_list.append(d)
d = summary_data_dbase['denominator_exp_events']+':\tExpressed probesets examined'; result_list.append(d)
d = summary_data_dbase['alt_genes']+':\tAlternatively regulated genes (ARGs)'; result_list.append(d)
d = summary_data_dbase['direct_domain_genes']+':\tARGs - overlaping with domain/motifs'; result_list.append(d)
d = summary_data_dbase['miRNA_gene_hits']+':\tARGs - overlaping with microRNA binding sites'; result_list.append(d)
result_list2=[]
for d in result_list:
if explicit_data_type == 'exon-only': d = string.replace(d,'probeset','exon')
elif array_type == 'RNASeq': d = string.replace(d,'probeset','junction')
result_list2.append(d)
result_list = result_list2
if return_type == 'log':
for d in result_list: log_report.write(d+'\n')
log_report.write('\n')
log_report.close()
return result_list
class SummaryResultsWindow:
def __init__(self,tl,analysis_type,output_dir,dataset_name,output_type,summary_data_dbase):
def showLink(event):
try:
idx = int(event.widget.tag_names(CURRENT)[1]) ### This is just the index provided below (e.g., str(0))
#print [self.LINKS[idx]]
if 'http://' in self.LINKS[idx]:
webbrowser.open(self.LINKS[idx])
elif self.LINKS[idx][-1] == '/':
self.openSuppliedDirectory(self.LINKS[idx])
else:
### Instead of using this option to open a hyperlink (which is what it should do), we can open another Tk window
try: self.viewPNGFile(self.LINKS[idx]) ### ImageTK PNG viewer
except Exception:
try: self.ShowImageMPL(self.LINKS[idx]) ### MatPlotLib based dispaly
except Exception:
self.openPNGImage(self.LINKS[idx]) ### Native OS PNG viewer
#self.DisplayPlots(self.LINKS[idx]) ### GIF based dispaly
except Exception:
null=[] ### anomalous error
self.emergency_exit = False
self.LINKS = []
self.tl = tl
self.tl.title('AltAnalyze version 2.1.0')
self.analysis_type = analysis_type
filename = 'Config/icon.gif'
fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(tl); can.pack(side='top'); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
use_scroll = 'yes'
try: runGOElite = run_GOElite
except Exception: runGOElite='decide_later'
if 'QC' in summary_data_dbase:
graphic_links = summary_data_dbase['QC'] ### contains hyperlinks to QC and Clustering plots
if len(graphic_links)==0: del summary_data_dbase['QC'] ### This can be added if an analysis fails
else:
graphic_links = []
label_text_str = 'AltAnalyze Result Summary'; height = 150; width = 500
if analysis_type == 'AS' or 'QC' in summary_data_dbase: height = 330
if analysis_type == 'AS' and 'QC' in summary_data_dbase: height = 330
self.sf = PmwFreeze.ScrolledFrame(tl,
labelpos = 'n', label_text = label_text_str,
usehullsize = 1, hull_width = width, hull_height = height)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
txt=Text(self.frame,bg='gray',width=150, height=80)
txt.pack(expand=True, fill="both")
#txt.insert(END, 'Primary Analysis Finished....\n')
txt.insert(END, 'Results saved to:\n'+output_dir+'\n')
f = Font(family="System", size=12, weight="bold")
txt.tag_config("font", font=f)
i=0
copyDirectoryPDFs(output_dir,AS=analysis_type)
if analysis_type == 'AS':
txt.insert(END, '\n')
result_list = exportComparisonSummary(dataset_name,summary_data_dbase,'print')
for d in result_list: txt.insert(END, d+'\n')
if 'QC' in summary_data_dbase and len(graphic_links)>0:
txt.insert(END, '\nQC and Expression Clustering Plots',"font")
txt.insert(END, '\n\n 1) ')
for (name,file_dir) in graphic_links:
txt.insert(END, name, ('link', str(i)))
if len(graphic_links) > (i+1):
txt.insert(END, '\n %s) ' % str(i+2))
self.LINKS.append(file_dir)
i+=1
txt.insert(END, '\n\nView all primary plots in the folder ')
txt.insert(END, 'DataPlots',('link', str(i))); i+=1
self.LINKS.append(output_dir+'DataPlots/')
else:
url = 'http://code.google.com/p/altanalyze/'
self.LINKS=(url,'')
txt.insert(END, '\nFor more information see the ')
txt.insert(END, "AltAnalyze Online Help", ('link', str(0)))
txt.insert(END, '\n\n')
if runGOElite == 'run-immediately':
txt.insert(END, '\n\nView all pathway enrichment results in the folder ')
txt.insert(END, 'GO-Elite',('link', str(i))); i+=1
self.LINKS.append(output_dir+'GO-Elite/')
if analysis_type == 'AS':
txt.insert(END, '\n\nView all splicing plots in the folder ')
txt.insert(END, 'ExonPlots',('link', str(i))); i+=1
try: self.LINKS.append(output_dir+'ExonPlots/')
except Exception: pass
txt.tag_config('link', foreground="blue", underline = 1)
txt.tag_bind('link', '<Button-1>', showLink)
txt.insert(END, '\n\n')
open_results_folder = Button(tl, text = 'Results Folder', command = self.openDirectory)
open_results_folder.pack(side = 'left', padx = 5, pady = 5);
if analysis_type == 'AS':
#self.dg_url = 'http://www.altanalyze.org/domaingraph.htm'
self.dg_url = 'http://www.altanalyze.org/domaingraph.htm'
dg_pdf_file = 'Documentation/domain_graph.pdf'; dg_pdf_file = filepath(dg_pdf_file); self.dg_pdf_file = dg_pdf_file
text_button = Button(tl, text='Start DomainGraph in Cytoscape', command=self.SelectCytoscapeTopLevel)
text_button.pack(side = 'right', padx = 5, pady = 5)
self.output_dir = output_dir + "AltResults"
self.whatNext_url = 'http://code.google.com/p/altanalyze/wiki/AnalyzingASResults' #http://www.altanalyze.org/what_next_altexon.htm'
whatNext_pdf = 'Documentation/what_next_alt_exon.pdf'; whatNext_pdf = filepath(whatNext_pdf); self.whatNext_pdf = whatNext_pdf
if output_type == 'parent': self.output_dir = output_dir ###Used for fake datasets
else:
if pathway_permutations == 'NA':
self.output_dir = output_dir + "ExpressionOutput"
else: self.output_dir = output_dir
self.whatNext_url = 'http://code.google.com/p/altanalyze/wiki/AnalyzingGEResults' #'http://www.altanalyze.org/what_next_expression.htm'
whatNext_pdf = 'Documentation/what_next_GE.pdf'; whatNext_pdf = filepath(whatNext_pdf); self.whatNext_pdf = whatNext_pdf
what_next = Button(tl, text='What Next?', command=self.whatNextlinkout)
what_next.pack(side = 'right', padx = 5, pady = 5)
quit_buttonTL = Button(tl,text='Close View', command=self.close)
quit_buttonTL.pack(side = 'right', padx = 5, pady = 5)
continue_to_next_win = Button(text = 'Continue', command = self.continue_win)
continue_to_next_win.pack(side = 'right', padx = 10, pady = 10)
quit_button = Button(root,text='Quit', command=self.quit)
quit_button.pack(side = 'right', padx = 5, pady = 5)
button_text = 'Help'; help_url = 'http://www.altanalyze.org/help_main.htm'; self.help_url = filepath(help_url)
pdf_help_file = 'Documentation/AltAnalyze-Manual.pdf'; pdf_help_file = filepath(pdf_help_file); self.pdf_help_file = pdf_help_file
help_button = Button(root, text=button_text, command=self.Helplinkout)
help_button.pack(side = 'left', padx = 5, pady = 5)
if self.emergency_exit == False:
self.tl.protocol("WM_DELETE_WINDOW", self.tldeleteWindow)
self.tl.mainloop() ###Needed to show graphic
else:
""" This shouldn't have to be called, but is when the topLevel window isn't closed first
specifically if a PNG file is opened. the sys.exitfunc() should work but doesn't.
work on this more later """
#AltAnalyzeSetup('no')
try: self._tls.quit(); self._tls.destroy()
except Exception: None
try: self._tlx.quit(); self._tlx.destroy()
except Exception: None
try: self._tlx.quit(); self._tlx.destroy()
except Exception: None
try: self.tl.quit(); self.tl.destroy()
except Exception: None
try: root.quit(); root.destroy()
except Exception: None
UI.getUpdatedParameters(array_type,species,'Process Expression file',output_dir)
sys.exit() ### required when opening PNG files on Windows to continue (not sure why)
#sys.exitfunc()
def tldeleteWindow(self):
try: self.tl.quit(); self.tl.destroy()
except Exception: self.tl.destroy()
def deleteTLWindow(self):
self.emergency_exit = True
try: self._tls.quit(); self._tls.destroy()
except Exception: None
try: self._tlx.quit(); self._tlx.destroy()
except Exception: None
self.tl.quit()
self.tl.destroy()
sys.exitfunc()
def deleteWindow(self):
self.emergency_exit = True
try: self._tls.quit(); self._tls.destroy()
except Exception: None
try: self._tlx.quit(); self._tlx.destroy()
except Exception: None
try:
self.tl.quit()
self.tl.destroy()
except Exception: None
sys.exitfunc()
def continue_win(self):
self.emergency_exit = True
try: self._tls.quit(); self._tls.destroy()
except Exception: None
try: self._tlx.quit(); self._tlx.destroy()
except Exception: None
try: self.tl.quit(); self.tl.destroy()
except Exception: pass
root.quit()
root.destroy()
try: self.tl.grid_forget()
except Exception: None
try: root.grid_forget()
except Exception: None
sys.exitfunc()
def openDirectory(self):
if os.name == 'nt':
try: os.startfile('"'+self.output_dir+'"')
except Exception: os.system('open "'+self.output_dir+'"')
elif 'darwin' in sys.platform: os.system('open "'+self.output_dir+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+self.output_dir+'/"')
def openSuppliedDirectory(self,dir):
if os.name == 'nt':
try: os.startfile('"'+self.output_dir+'"')
except Exception: os.system('open "'+dir+'"')
elif 'darwin' in sys.platform: os.system('open "'+dir+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+dir+'/"')
def DGlinkout(self):
try:
altanalyze_path = filepath('') ### Find AltAnalye's path
altanalyze_path = altanalyze_path[:-1]
except Exception: null=[]
if os.name == 'nt':
parent_dir = 'C:/Program Files'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape.exe'
elif 'darwin' in sys.platform:
parent_dir = '/Applications'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape.app'
elif 'linux' in sys.platform:
parent_dir = '/opt'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape'
try: openCytoscape(altanalyze_path,application_dir,application_name)
except Exception: null=[]
try: self._tls.destroy()
except Exception: None
try: ###Remove this cytoscape as the default
file_location_defaults = UI.importDefaultFileLocations()
del file_location_defaults['CytoscapeDir']
UI.exportDefaultFileLocations(file_location_defaults)
except Exception: null=[]
self.GetHelpTopLevel(self.dg_url,self.dg_pdf_file)
def Helplinkout(self): self.GetHelpTopLevel(self.help_url,self.pdf_help_file)
def whatNextlinkout(self): self.GetHelpTopLevel(self.whatNext_url,self.whatNext_pdf)
def ShowImageMPL(self,file_location):
""" Visualization method using MatPlotLib """
try:
import matplotlib
import matplotlib.pyplot as pylab
except Exception:
#print 'Graphical output mode disabled (requires matplotlib, numpy and scipy)'
None
fig = pylab.figure()
pylab.subplots_adjust(left=0.0, right=1.0, top=1.0, bottom=0.00) ### Fill the plot area left to right
ax = fig.add_subplot(111)
ax.set_xticks([]) ### Hides ticks
ax.set_yticks([])
img= pylab.imread(file_location)
imgplot = pylab.imshow(img)
pylab.show()
def viewPNGFile(self,png_file_dir):
""" View PNG file within a PMW Tkinter frame """
import ImageTk
tlx = Toplevel(); self._tlx = tlx
sf = PmwFreeze.ScrolledFrame(tlx, labelpos = 'n', label_text = '',
usehullsize = 1, hull_width = 800, hull_height = 550)
sf.pack(padx = 0, pady = 0, fill = 'both', expand = 1)
frame = sf.interior()
tlx.title(png_file_dir)
img = ImageTk.PhotoImage(file=png_file_dir)
can = Canvas(frame)
can.pack(fill=BOTH, padx = 0, pady = 0)
w = img.width()
h = height=img.height()
can.config(width=w, height=h)
can.create_image(2, 2, image=img, anchor=NW)
tlx.mainloop()
def openPNGImage(self,png_file_dir):
if os.name == 'nt':
try: os.startfile('"'+png_file_dir+'"')
except Exception: os.system('open "'+png_file_dir+'"')
elif 'darwin' in sys.platform: os.system('open "'+png_file_dir+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+png_file_dir+'"')
def DisplayPlots(self,file_location):
""" Native Tkinter method - Displays a gif file in a standard TopLevel window (nothing fancy) """
tls = Toplevel(); self._tls = tls; nulls = '\t\t\t\t'; tls.title('AltAnalyze Plot Visualization')
self.sf = PmwFreeze.ScrolledFrame(self._tls,
labelpos = 'n', label_text = '', usehullsize = 1, hull_width = 520, hull_height = 500)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(),tag_text = file_location)
group.pack(fill = 'both', expand = 1, padx = 10, pady = 0)
img = PhotoImage(file=filepath(file_location))
can = Canvas(group.interior()); can.pack(side='left',padx = 10, pady = 20); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
tls.mainloop()
def GetHelpTopLevel(self,url,pdf_file):
try:
config_db = UI.importConfigFile()
ask_for_help = config_db['help'] ### hide_selection_option
except Exception: ask_for_help = 'null'; config_db={}
self.pdf_file = pdf_file; self.url = url
if ask_for_help == 'null':
message = ''; self.message = message; self.online_help = 'Online Documentation'; self.pdf_help = 'Local PDF File'
tls = Toplevel(); self._tls = tls; nulls = '\t\t\t\t'; tls.title('Please select one of the options')
self.sf = PmwFreeze.ScrolledFrame(self._tls,
labelpos = 'n', label_text = '', usehullsize = 1, hull_width = 320, hull_height = 200)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(),tag_text = 'Options')
group.pack(fill = 'both', expand = 1, padx = 10, pady = 0)
filename = 'Config/icon.gif'; fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(group.interior()); can.pack(side='left',padx = 10, pady = 20); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
l1 = Label(group.interior(), text=nulls); l1.pack(side = 'bottom')
text_button2 = Button(group.interior(), text=self.online_help, command=self.openOnlineHelp); text_button2.pack(side = 'top', padx = 5, pady = 5)
try: text_button = Button(group.interior(), text=self.pdf_help, command=self.openPDFHelp); text_button.pack(side = 'top', padx = 5, pady = 5)
except Exception: text_button = Button(group.interior(), text=self.pdf_help, command=self.openPDFHelp); text_button.pack(side = 'top', padx = 5, pady = 5)
text_button3 = Button(group.interior(), text='No Thanks', command=self.skipHelp); text_button3.pack(side = 'top', padx = 5, pady = 5)
c = Checkbutton(group.interior(), text = "Apply these settings each time", command=self.setHelpConfig); c.pack(side = 'bottom', padx = 5, pady = 0)
tls.mainloop()
try: tls.destroy()
except Exception: None
else:
file_location_defaults = UI.importDefaultFileLocations()
try:
help_choice = file_location_defaults['HelpChoice'].Location()
if help_choice == 'PDF': self.openPDFHelp()
elif help_choice == 'http': self.openOnlineHelp()
else: self.skip()
except Exception: self.openPDFHelp() ### Open PDF if there's a problem
def SelectCytoscapeTopLevel(self):
try:
config_db = UI.importConfigFile()
cytoscape_type = config_db['cytoscape'] ### hide_selection_option
except Exception: cytoscape_type = 'null'; config_db={}
if cytoscape_type == 'null':
message = ''; self.message = message
tls = Toplevel(); self._tls = tls; nulls = '\t\t\t\t'; tls.title('Cytoscape Automatic Start Options')
self.sf = PmwFreeze.ScrolledFrame(self._tls,
labelpos = 'n', label_text = '', usehullsize = 1, hull_width = 420, hull_height = 200)
self.sf.pack(padx = 5, pady = 1, fill = 'both', expand = 1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(),tag_text = 'Options')
group.pack(fill = 'both', expand = 1, padx = 10, pady = 0)
filename = 'Config/cyto-logo-smaller.gif'; fn=filepath(filename); img = PhotoImage(file=fn)
can = Canvas(group.interior()); can.pack(side='left',padx = 10, pady = 5); can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
#"""
self.local_cytoscape = 'AltAnalyze Bundled Version'; self.custom_cytoscape = 'Previously Installed Version'
l1 = Label(group.interior(), text=nulls); l1.pack(side = 'bottom')
l3 = Label(group.interior(), text='Select version of Cytoscape to open:'); l3.pack(side = 'top', pady = 5)
"""
self.local_cytoscape = ' No '; self.custom_cytoscape = ' Yes '
l1 = Label(group.interior(), text=nulls); l1.pack(side = 'bottom')
l2 = Label(group.interior(), text='Note: Cytoscape can take up-to a minute to initalize', fg="red"); l2.pack(side = 'top', padx = 5, pady = 0)
"""
text_button2 = Button(group.interior(), text=self.local_cytoscape, command=self.DGlinkout); text_button2.pack(padx = 5, pady = 5)
try: text_button = Button(group.interior(), text=self.custom_cytoscape, command=self.getPath); text_button.pack(padx = 5, pady = 5)
except Exception: text_button = Button(group.interior(), text=self.custom_cytoscape, command=self.getPath); text_button.pack(padx = 5, pady = 5)
l2 = Label(group.interior(), text='Note: Cytoscape can take up-to a minute to initalize', fg="blue"); l2.pack(side = 'bottom', padx = 5, pady = 0)
c = Checkbutton(group.interior(), text = "Apply these settings each time and don't show again", command=self.setCytoscapeConfig); c.pack(side = 'bottom', padx = 5, pady = 0)
#c2 = Checkbutton(group.interior(), text = "Open PDF of DomainGraph help rather than online help", command=self.setCytoscapeConfig); c2.pack(side = 'bottom', padx = 5, pady = 0)
tls.mainloop()
try: tls.destroy()
except Exception: None
else:
file_location_defaults = UI.importDefaultFileLocations()
try: cytoscape_app_dir = file_location_defaults['CytoscapeDir'].Location(); openFile(cytoscape_app_dir)
except Exception:
try: altanalyze_path = filepath(''); altanalyze_path = altanalyze_path[:-1]
except Exception: altanalyze_path=''
application_dir = 'Cytoscape_v'
if os.name == 'nt': application_name = 'Cytoscape.exe'
elif 'darwin' in sys.platform: application_name = 'Cytoscape.app'
elif 'linux' in sys.platform: application_name = 'Cytoscape'
try: openCytoscape(altanalyze_path,application_dir,application_name)
except Exception: null=[]
def setCytoscapeConfig(self):
config_db={}; config_db['cytoscape'] = 'hide_selection_option'
UI.exportConfigFile(config_db)
def setHelpConfig(self):
config_db={}; config_db['help'] = 'hide_selection_option'
UI.exportConfigFile(config_db)
def getPath(self):
file_location_defaults = UI.importDefaultFileLocations()
if os.name == 'nt': parent_dir = 'C:/Program Files'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape.exe'
elif 'darwin' in sys.platform: parent_dir = '/Applications'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape.app'
elif 'linux' in sys.platform: parent_dir = '/opt'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape'
try:
self.default_dir = file_location_defaults['CytoscapeDir'].Location()
self.default_dir = string.replace(self.default_dir,'//','/')
self.default_dir = string.replace(self.default_dir,'\\','/')
self.default_dir = string.join(string.split(self.default_dir,'/')[:-1],'/')
except Exception:
dir = FindDir(parent_dir,application_dir); dir = filepath(parent_dir+'/'+dir)
self.default_dir = filepath(parent_dir)
try: dirPath = tkFileDialog.askdirectory(parent=self._tls,initialdir=self.default_dir)
except Exception:
self.default_dir = ''
try: dirPath = tkFileDialog.askdirectory(parent=self._tls,initialdir=self.default_dir)
except Exception:
try: dirPath = tkFileDialog.askdirectory(parent=self._tls)
except Exception: dirPath=''
try:
#print [dirPath],application_name
app_dir = dirPath+'/'+application_name
if 'linux' in sys.platform:
try: createCytoscapeDesktop(cytoscape_dir)
except Exception: null=[]
dir_list = unique.read_directory('/usr/bin/') ### Check to see that JAVA is installed
if 'java' not in dir_list: print 'Java not referenced in "usr/bin/. If not installed,\nplease install and re-try opening Cytoscape'
try:
jar_path = dirPath+'/cytoscape.jar'
main_path = dirPath+'/cytoscape.CyMain'
plugins_path = dirPath+'/plugins'
os.system('java -Dswing.aatext=true -Xss5M -Xmx512M -jar '+jar_path+' '+main_path+' -p '+plugins_path+' &')
print 'Cytoscape jar opened:',jar_path
except Exception:
print 'OS command to open Java failed.'
try: openFile(app_dir2); print 'Cytoscape opened:',app_dir2
except Exception: openFile(app_dir)
else: openFile(app_dir)
try: file_location_defaults['CytoscapeDir'].SetLocation(app_dir)
except Exception:
fl = UI.FileLocationData('', app_dir, 'all')
file_location_defaults['CytoscapeDir'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
except Exception: null=[]
try: self._tls.destroy()
except Exception: None
self.GetHelpTopLevel(self.dg_url,self.dg_pdf_file)
def openOnlineHelp(self):
file_location_defaults = UI.importDefaultFileLocations()
try:file_location_defaults['HelpChoice'].SetLocation('http')
except Exception:
fl = UI.FileLocationData('', 'http', 'all')
file_location_defaults['HelpChoice'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
webbrowser.open(self.url)
#except Exception: null=[]
try: self._tls.destroy()
except Exception: None
def skipHelp(self):
file_location_defaults = UI.importDefaultFileLocations()
try: file_location_defaults['HelpChoice'].SetLocation('skip')
except Exception:
fl = UI.FileLocationData('', 'skip', 'all')
file_location_defaults['HelpChoice'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
try: self._tls.destroy()
except Exception: None
def openPDFHelp(self):
file_location_defaults = UI.importDefaultFileLocations()
try:file_location_defaults['HelpChoice'].SetLocation('PDF')
except Exception:
fl = UI.FileLocationData('', 'PDF', 'all')
file_location_defaults['HelpChoice'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
if os.name == 'nt':
try: os.startfile('"'+self.pdf_file+'"')
except Exception: os.system('open "'+self.pdf_file+'"')
elif 'darwin' in sys.platform: os.system('open "'+self.pdf_file+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+self.pdf_file+'"')
try: self._tls.destroy()
except Exception: None
def quit(self):
root.quit()
root.destroy()
sys.exit()
def close(self):
#self.tl.quit() #### This was causing multiple errors in 2.0.7 - evaluate more!
self.tl.destroy()
class StringVarFile:
def __init__(self,stringVar,window):
self.__newline = 0; self.__stringvar = stringVar; self.__window = window
def write(self,s):
try:
log_report = open(log_file,'a')
log_report.write(s); log_report.close() ### Variable to record each print statement
new = self.__stringvar.get()
for c in s:
#if c == '\n': self.__newline = 1
if c == '\k': self.__newline = 1### This should not be found and thus results in a continous feed rather than replacing a single line
else:
if self.__newline: new = ""; self.__newline = 0
new = new+c
self.set(new)
except Exception: pass
def set(self,s):
try: self.__stringvar.set(s); self.__window.update()
except Exception: pass
def get(self):
try:
return self.__stringvar.get()
except Exception: pass
def flush(self):
pass
def timestamp():
import datetime
today = str(datetime.date.today()); today = string.split(today,'-'); today = today[0]+''+today[1]+''+today[2]
time_stamp = string.replace(time.ctime(),':','')
time_stamp = string.replace(time_stamp,' ',' ')
time_stamp = string.split(time_stamp,' ') ###Use a time-stamp as the output dir (minus the day)
time_stamp = today+'-'+time_stamp[3]
return time_stamp
def callWXPython():
import wx
import AltAnalyzeViewer
app = wx.App(False)
AltAnalyzeViewer.remoteViewer(app)
def AltAnalyzeSetup(skip_intro):
global apt_location; global root_dir;global log_file; global summary_data_db; summary_data_db={}; reload(UI)
global probability_statistic; global commandLineMode; commandLineMode = 'no'
if 'remoteViewer' == skip_intro:
if os.name == 'nt':
callWXPython()
elif os.name == 'ntX':
package_path = filepath('python')
win_package_path = string.replace(package_path,'python','AltAnalyzeViewer.exe')
import subprocess
subprocess.call([win_package_path]);sys.exit()
elif os.name == 'posix':
package_path = filepath('python')
#mac_package_path = string.replace(package_path,'python','AltAnalyze.app/Contents/MacOS/python')
#os.system(mac_package_path+' RemoteViewer.py');sys.exit()
mac_package_path = string.replace(package_path,'python','AltAnalyzeViewer.app/Contents/MacOS/AltAnalyzeViewer')
import subprocess
subprocess.call([mac_package_path]);sys.exit()
"""
import threading
import wx
app = wx.PySimpleApp()
t = threading.Thread(target=callWXPython)
t.setDaemon(1)
t.start()
s = 1
queue = mlp.Queue()
proc = mlp.Process(target=callWXPython) ### passing sys.stdout unfortunately doesn't work to pass the Tk string
proc.start()
sys.exit()
"""
reload(UI)
expr_var, alt_var, additional_var, goelite_var, exp_file_location_db = UI.getUserParameters(skip_intro,Multi=mlp)
"""except Exception:
if 'SystemExit' not in str(traceback.format_exc()):
expr_var, alt_var, additional_var, goelite_var, exp_file_location_db = UI.getUserParameters('yes')
else: sys.exit()"""
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]
apt_location = fl.APTLocation()
root_dir = fl.RootDir()
try: probability_statistic = fl.ProbabilityStatistic()
except Exception: probability_statistic = 'unpaired t-test'
time_stamp = timestamp()
log_file = filepath(root_dir+'AltAnalyze_report-'+time_stamp+'.log')
log_report = open(log_file,'w'); log_report.close()
if use_Tkinter == 'yes' and debug_mode == 'no':
try:
global root; root = Tk()
StatusWindow(root,expr_var, alt_var, goelite_var, additional_var, exp_file_location_db)
root.destroy()
except Exception, exception:
try:
print traceback.format_exc()
badExit()
except Exception: sys.exit()
else: AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var, exp_file_location_db,'')
def badExit():
print "\n...exiting AltAnalyze due to unexpected error"
try:
time_stamp = timestamp()
print_out = "Unknown error encountered during data processing.\nPlease see logfile in:\n\n"+log_file+"\nand report to altanalyze@gmail.com."
try:
if len(log_file)>0:
if commandLineMode == 'no':
if os.name == 'nt':
try: os.startfile('"'+log_file+'"')
except Exception: os.system('open "'+log_file+'"')
elif 'darwin' in sys.platform: os.system('open "'+log_file+'"')
elif 'linux' in sys.platform: os.system('xdg-open "'+log_file+'"')
if commandLineMode == 'no':
try: UI.WarningWindow(print_out,'Error Encountered!'); root.destroy()
except Exception: print print_out
except Exception: sys.exit()
except Exception: sys.exit()
sys.exit()
kill
def AltAnalyzeMain(expr_var,alt_var,goelite_var,additional_var,exp_file_location_db,root):
### Hard-coded defaults
w = 'Agilent'; x = 'Affymetrix'; y = 'Ensembl'; z = 'any'; data_source = y; constitutive_source = z; manufacturer = x ### Constitutive source, is only really paid attention to if Ensembl, otherwise Affymetrix is used (even if default)
### Get default options for ExpressionBuilder and AltAnalyze
start_time = time.time()
test_goelite = 'no'; test_results_pannel = 'no'
global species; global array_type; global expression_data_format; global use_R; use_R = 'no'
global analysis_method; global p_threshold; global filter_probeset_types
global permute_p_threshold; global perform_permutation_analysis; global export_NI_values
global run_MiDAS; global analyze_functional_attributes; global microRNA_prediction_method
global calculate_normIntensity_p; global pathway_permutations; global avg_all_for_ss; global analyze_all_conditions
global remove_intronic_junctions
global agglomerate_inclusion_probesets; global expression_threshold; global factor_out_expression_changes
global only_include_constitutive_containing_genes; global remove_transcriptional_regulated_genes; global add_exons_to_annotations
global exclude_protein_details; global filter_for_AS; global use_direct_domain_alignments_only; global run_from_scratch
global explicit_data_type; explicit_data_type = 'null'
global altanalyze_files; altanalyze_files = []
species,array_type,manufacturer,constitutive_source,dabg_p,raw_expression_threshold,avg_all_for_ss,expression_data_format,include_raw_data, run_from_scratch, perform_alt_analysis = expr_var
analysis_method,p_threshold,filter_probeset_types,alt_exon_fold_variable,gene_expression_cutoff,remove_intronic_junctions,permute_p_threshold,perform_permutation_analysis, export_NI_values, analyze_all_conditions = alt_var
calculate_normIntensity_p, run_MiDAS, use_direct_domain_alignments_only, microRNA_prediction_method, filter_for_AS, additional_algorithms = additional_var
ge_fold_cutoffs,ge_pvalue_cutoffs,ge_ptype,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,pathway_permutations,mod,returnPathways = goelite_var
original_remove_intronic_junctions = remove_intronic_junctions
if run_from_scratch == 'Annotate External Results': analysis_method = 'external'
if returnPathways == 'no' or returnPathways == 'None':
returnPathways = None
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]
try: exon_exp_threshold = fl.ExonExpThreshold()
except Exception: exon_exp_threshold = 'NA'
try: gene_exp_threshold = fl.GeneExpThreshold()
except Exception: gene_exp_threshold = 'NA'
try: exon_rpkm_threshold = fl.ExonRPKMThreshold()
except Exception: exon_rpkm_threshold = 'NA'
try: rpkm_threshold = fl.RPKMThreshold() ### Gene-Level
except Exception: rpkm_threshold = 'NA'
fl.setJunctionExpThreshold(raw_expression_threshold) ### For RNA-Seq, this specifically applies to exon-junctions
try: predictGroups = fl.predictGroups()
except Exception: predictGroups = False
try:
if fl.excludeLowExpressionExons(): excludeLowExpExons = 'yes'
else: excludeLowExpExons = 'no'
except Exception: excludeLowExpExons = 'no'
if test_goelite == 'yes': ### It can be difficult to get error warnings from GO-Elite, unless run here
results_dir = filepath(fl.RootDir())
elite_input_dirs = ['AltExonConfirmed','AltExon','regulated','upregulated','downregulated'] ### Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
file_dirs = results_dir+'GO-Elite/'+elite_dir,results_dir+'GO-Elite/denominator',results_dir+'GO-Elite/'+elite_dir
variables = species,mod,pathway_permutations,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,returnPathways,file_dirs,root
GO_Elite.remoteAnalysis(variables,'non-UI',Multi=mlp)
global perform_element_permutation_analysis; global permutations
perform_element_permutation_analysis = 'yes'; permutations = 2000
analyze_functional_attributes = 'yes' ### Do this by default (shouldn't substantially increase runtime)
if run_from_scratch != 'Annotate External Results' and (array_type != "3'array" and array_type!='RNASeq'):
if run_from_scratch !='Process AltAnalyze filtered':
try: raw_expression_threshold = float(raw_expression_threshold)
except Exception: raw_expression_threshold = 1
if raw_expression_threshold<1:
raw_expression_threshold = 1
print "Expression threshold < 1, forcing to be a minimum of 1."
try: dabg_p = float(dabg_p)
except Exception: dabg_p = 0
if dabg_p == 0 or dabg_p > 1:
print "Invalid dabg-p value threshold entered,(",dabg_p,") setting to default of 0.05"
dabg_p = 0.05
if use_direct_domain_alignments_only == 'direct-alignment': use_direct_domain_alignments_only = 'yes'
if run_from_scratch == 'Process CEL files': expression_data_format = 'log'
print "Beginning AltAnalyze Analysis... Format:", expression_data_format
if array_type == 'RNASeq': id_name = 'exon/junction IDs'
else: id_name = 'array IDs'
print_items=[]; #print [permute_p_threshold]; sys.exit()
print_items.append("AltAnalyze version 2.1.0 - Expression Analysis Parameters Being Used...")
print_items.append('\t'+'database'+': '+unique.getCurrentGeneDatabaseVersion())
print_items.append('\t'+'species'+': '+species)
print_items.append('\t'+'method'+': '+array_type)
print_items.append('\t'+'manufacturer'+': '+manufacturer)
print_items.append('\t'+'probability_statistic'+': '+probability_statistic)
print_items.append('\t'+'constitutive_source'+': '+constitutive_source)
print_items.append('\t'+'dabg_p'+': '+str(dabg_p))
if array_type == 'RNASeq':
print_items.append('\t'+'junction expression threshold'+': '+str(raw_expression_threshold))
print_items.append('\t'+'exon_exp_threshold'+': '+str(exon_exp_threshold))
print_items.append('\t'+'gene_exp_threshold'+': '+str(gene_exp_threshold))
print_items.append('\t'+'exon_rpkm_threshold'+': '+str(exon_rpkm_threshold))
print_items.append('\t'+'gene_rpkm_threshold'+': '+str(rpkm_threshold))
print_items.append('\t'+'exclude low expressing exons for RPKM'+': '+excludeLowExpExons)
else:
print_items.append('\t'+'raw_expression_threshold'+': '+str(raw_expression_threshold))
print_items.append('\t'+'avg_all_for_ss'+': '+avg_all_for_ss)
print_items.append('\t'+'expression_data_format'+': '+expression_data_format)
print_items.append('\t'+'include_raw_data'+': '+include_raw_data)
print_items.append('\t'+'run_from_scratch'+': '+run_from_scratch)
print_items.append('\t'+'perform_alt_analysis'+': '+perform_alt_analysis)
if avg_all_for_ss == 'yes': cs_type = 'core'
else: cs_type = 'constitutive'
print_items.append('\t'+'calculate_gene_expression_using'+': '+cs_type)
print_items.append("Alternative Exon Analysis Parameters Being Used..." )
print_items.append('\t'+'analysis_method'+': '+analysis_method)
print_items.append('\t'+'p_threshold'+': '+str(p_threshold))
print_items.append('\t'+'filter_data_types'+': '+filter_probeset_types)
print_items.append('\t'+'alt_exon_fold_variable'+': '+str(alt_exon_fold_variable))
print_items.append('\t'+'gene_expression_cutoff'+': '+str(gene_expression_cutoff))
print_items.append('\t'+'remove_intronic_junctions'+': '+remove_intronic_junctions)
print_items.append('\t'+'avg_all_for_ss'+': '+avg_all_for_ss)
print_items.append('\t'+'permute_p_threshold'+': '+str(permute_p_threshold))
print_items.append('\t'+'perform_permutation_analysis'+': '+perform_permutation_analysis)
print_items.append('\t'+'export_NI_values'+': '+export_NI_values)
print_items.append('\t'+'run_MiDAS'+': '+run_MiDAS)
print_items.append('\t'+'use_direct_domain_alignments_only'+': '+use_direct_domain_alignments_only)
print_items.append('\t'+'microRNA_prediction_method'+': '+microRNA_prediction_method)
print_items.append('\t'+'analyze_all_conditions'+': '+analyze_all_conditions)
print_items.append('\t'+'filter_for_AS'+': '+filter_for_AS)
if pathway_permutations == 'NA': run_GOElite = 'decide_later'
else: run_GOElite = 'run-immediately'
print_items.append('\t'+'run_GOElite'+': '+ run_GOElite)
universalPrintFunction(print_items)
if commandLineMode == 'yes': print 'Running command line mode:',commandLineMode
summary_data_db['gene_assayed'] = 0
summary_data_db['denominator_exp_genes']=0
summary_data_db['alt_events'] = 0
summary_data_db['denominator_exp_events'] = 0
summary_data_db['alt_genes'] = 0
summary_data_db['direct_domain_genes'] = 0
summary_data_db['miRNA_gene_denom'] = 0
summary_data_db['miRNA_gene_hits'] = 0
if test_results_pannel == 'yes': ### It can be difficult to get error warnings from GO-Elite, unless run here
graphic_links = []
graphic_links.append(['test','Config/AltAnalyze_structure-RNASeq.jpg'])
summary_data_db['QC']=graphic_links
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
dataset = 'test'; results_dir=''
print "Analysis Complete\n";
if root !='' and root !=None:
UI.InfoWindow(print_out,'Analysis Completed!')
tl = Toplevel(); SummaryResultsWindow(tl,'GE',results_dir,dataset,'parent',summary_data_db)
root.destroy(); sys.exit()
global export_go_annotations; global aspire_output_list; global aspire_output_gene_list
global filter_probesets_by; global global_addition_factor; global onlyAnalyzeJunctions
global log_fold_cutoff; global aspire_cutoff; global annotation_system; global alt_exon_logfold_cutoff
"""dabg_p = 0.75; data_type = 'expression' ###used for expression analysis when dealing with AltMouse arrays
a = "3'array"; b = "exon"; c = "AltMouse"; e = "custom"; array_type = c
l = 'log'; n = 'non-log'; expression_data_format = l
hs = 'Hs'; mm = 'Mm'; dr = 'Dr'; rn = 'Rn'; species = mm
include_raw_data = 'yes'; expression_threshold = 70 ### Based on suggestion from BMC Genomics. 2006 Dec 27;7:325. PMID: 17192196, for hu-exon 1.0 st array
avg_all_for_ss = 'no' ###Default is 'no' since we don't want all probes averaged for the exon arrays"""
###### Run ExpressionBuilder ######
"""ExpressionBuilder is used to:
(1) extract out gene expression values, provide gene annotations, and calculate summary gene statistics
(2) filter probesets based DABG p-values and export to pair-wise comparison files
(3) build array annotations files matched to gene structure features (e.g. exons, introns) using chromosomal coordinates
options 1-2 are executed in remoteExpressionBuilder and option 3 is by running ExonArrayEnsembl rules"""
try:
additional_algorithm = additional_algorithms.Algorithm()
additional_score = additional_algorithms.Score()
except Exception: additional_algorithm = 'null'; additional_score = 'null'
if analysis_method == 'FIRMA': analyze_metaprobesets = 'yes'
elif additional_algorithm == 'FIRMA': analyze_metaprobesets = 'yes'
else: analyze_metaprobesets = 'no'
### Check to see if this is a real or FAKE (used for demonstration purposes) dataset
if run_from_scratch == 'Process CEL files' or 'Feature Extraction' in run_from_scratch:
for dataset in exp_file_location_db:
if run_from_scratch == 'Process CEL files':
fl = exp_file_location_db[dataset]
pgf_file=fl.InputCDFFile()
results_dir = filepath(fl.RootDir())
if '_demo' in pgf_file: ### Thus we are running demo CEL files and want to quit immediately
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
try:
print "Analysis Complete\n";
if root !='' and root !=None:
UI.InfoWindow(print_out,'Analysis Completed!')
tl = Toplevel(); SummaryResultsWindow(tl,'AS',results_dir,dataset,'parent',summary_data_db)
except Exception: null=[]
skip_intro = 'yes'
if pathway_permutations == 'NA' and run_from_scratch != 'Annotate External Results':
reload(UI)
UI.getUpdatedParameters(array_type,species,run_from_scratch,results_dir)
try: AltAnalyzeSetup('no')
except Exception: sys.exit()
if 'CEL files' in run_from_scratch:
import APT
try:
try:
APT.probesetSummarize(exp_file_location_db,analyze_metaprobesets,filter_probeset_types,species,root)
if analyze_metaprobesets == 'yes':
analyze_metaprobesets = 'no' ### Re-run the APT analysis to obtain probeset rather than gene-level results (only the residuals are needed from a metaprobeset run)
APT.probesetSummarize(exp_file_location_db,analyze_metaprobesets,filter_probeset_types,species,root)
except Exception:
import platform
print "Trying to change APT binary access privileges"
for dataset in exp_file_location_db: ### Instance of the Class ExpressionFileLocationData
fl = exp_file_location_db[dataset]; apt_dir =fl.APTLocation()
if '/bin' in apt_dir: apt_file = apt_dir +'/apt-probeset-summarize' ### if the user selects an APT directory
elif os.name == 'nt': apt_file = apt_dir + '/PC/'+platform.architecture()[0]+'/apt-probeset-summarize.exe'
elif 'darwin' in sys.platform: apt_file = apt_dir + '/Mac/apt-probeset-summarize'
elif 'linux' in sys.platform:
if '32bit' in platform.architecture(): apt_file = apt_dir + '/Linux/32bit/apt-probeset-summarize'
elif '64bit' in platform.architecture(): apt_file = apt_dir + '/Linux/64bit/apt-probeset-summarize'
apt_file = filepath(apt_file)
os.chmod(apt_file,0777)
midas_dir = string.replace(apt_file,'apt-probeset-summarize','apt-midas')
os.chmod(midas_dir,0777)
APT.probesetSummarize(exp_file_location_db,analysis_method,filter_probeset_types,species,root)
except Exception:
print_out = 'AltAnalyze encountered an un-expected error while running Affymetrix\n'
print_out += 'Power Tools (APT). Additional information may be found in the directory\n'
print_out += '"ExpressionInput/APT" in the output directory. You may also encounter issues\n'
print_out += 'if you are logged into an account with restricted priveledges.\n\n'
print_out += 'If this issue can not be resolved, contact AltAnalyze help or run RMA outside\n'
print_out += 'of AltAnalyze and import the results using the analysis option "expression file".\n'
print traceback.format_exc()
try:
UI.WarningWindow(print_out,'Exit')
root.destroy(); sys.exit()
except Exception:
print print_out; sys.exit()
elif 'Feature Extraction' in run_from_scratch:
import ProcessAgilentArrays
try: ProcessAgilentArrays.agilentSummarize(exp_file_location_db)
except Exception:
print_out = 'Agilent array import and processing failed... see error log for details...'
print traceback.format_exc()
try:
UI.WarningWindow(print_out,'Exit')
root.destroy(); sys.exit()
except Exception:
print print_out; sys.exit()
reload(ProcessAgilentArrays)
if run_from_scratch == 'Process RNA-seq reads' or run_from_scratch == 'buildExonExportFiles':
import RNASeq; reload(RNASeq); import RNASeq
for dataset in exp_file_location_db: fl = exp_file_location_db[dataset]
### The below function aligns splice-junction coordinates to Ensembl exons from BED Files and
### exports AltAnalyze specific databases that are unique to this dataset to the output directory
try: fastq_folder = fl.RunKallisto()
except Exception: print traceback.format_exc()
if len(fastq_folder)>0:
try:
RNASeq.runKallisto(species,dataset,root_dir,fastq_folder,returnSampleNames=False)
biotypes = 'ran'
except Exception: biotypes='failed'
else:
analyzeBAMs = False; bedFilesPresent = False
dir_list = unique.read_directory(fl.BEDFileDir())
for file in dir_list:
if '.bam' in string.lower(file):
analyzeBAMs=True
if '.bed' in string.lower(file):
bedFilesPresent=True
if analyzeBAMs and bedFilesPresent==False:
import multiBAMtoBED
bam_dir = fl.BEDFileDir()
refExonCoordinateFile = filepath('AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt')
outputExonCoordinateRefBEDfile = bam_dir+'/BedRef/'+species+'_'+string.replace(dataset,'exp.','')
analysisType = ['exon','junction','reference']
#analysisType = ['junction']
multiBAMtoBED.parallelBAMProcessing(bam_dir,refExonCoordinateFile,outputExonCoordinateRefBEDfile,analysisType=analysisType,useMultiProcessing=fl.multiThreading(),MLP=mlp,root=root)
biotypes = RNASeq.alignExonsAndJunctionsToEnsembl(species,exp_file_location_db,dataset,Multi=mlp)
if biotypes == 'failed':
print_out = 'No valid chromosomal positions in the input BED or BioScope files. Exiting AltAnalyze.'
if len(fastq_folder)>0:
if 'FTP' in traceback.format_exc():
print_out = 'AltAnlayze was unable to retreive a transcript fasta sequence file from the Ensembl website. '
print_out += 'Ensure you are connected to the internet and that the website http://ensembl.org is live.'
else:
print_out = 'An unexplained error was encountered with Kallisto analysis:\n'
print_out += traceback.format_exc()
try:
UI.WarningWindow(print_out,'Exit')
root.destroy(); sys.exit()
except Exception:
print print_out; sys.exit()
reload(RNASeq)
if root_dir in biotypes:
print_out = 'Exon-level BED coordinate predictions exported to:\n'+biotypes
print_out+= '\n\nAfter obtaining exon expression estimates, rename exon BED files to\n'
print_out+= 'match the junction name (e.g., Sample1__exon.bed and Sample1__junction.bed)\n'
print_out+= 'and re-run AltAnalyze (see tutorials at http://altanalyze.org for help).'
UI.InfoWindow(print_out,'Export Complete')
try: root.destroy(); sys.exit()
except Exception: sys.exit()
if predictGroups == True:
expFile = fl.ExpFile()
if array_type == 'RNASeq':
exp_threshold=100; rpkm_threshold=10
else:
exp_threshold=200; rpkm_threshold=8
RNASeq.singleCellRNASeqWorkflow(species, array_type, expFile, mlp, exp_threshold=exp_threshold, rpkm_threshold=rpkm_threshold)
goelite_run = False
if run_from_scratch == 'Process Expression file' or run_from_scratch == 'Process CEL files' or run_from_scratch == 'Process RNA-seq reads' or 'Feature Extraction' in run_from_scratch:
if (fl.NormMatrix()=='quantile' or fl.NormMatrix()=='group') and 'Feature Extraction' not in run_from_scratch:
import NormalizeDataset
try: NormalizeDataset.normalizeDataset(fl.ExpFile(),normalization=fl.NormMatrix(),platform=array_type)
except Exception: print "Normalization failed for unknown reasons..."
#"""
status = ExpressionBuilder.remoteExpressionBuilder(species,array_type,
dabg_p,raw_expression_threshold,avg_all_for_ss,expression_data_format,
manufacturer,constitutive_source,data_source,include_raw_data,
perform_alt_analysis,ge_fold_cutoffs,ge_pvalue_cutoffs,ge_ptype,
exp_file_location_db,root)
reload(ExpressionBuilder) ### Clears Memory
#"""
graphics=[]
if fl.MarkerFinder() == 'yes':
### Identify putative condition-specific marker genees
import markerFinder
fl.setOutputDir(root_dir) ### This needs to be set here
exp_file = fl.ExpFile()
if array_type != "3'array": exp_file = string.replace(exp_file,'.txt','-steady-state.txt')
markerFinder_inputs = [exp_file,fl.DatasetFile()] ### Output a replicate and non-replicate version
markerFinder_inputs = [exp_file] ### Only considers the replicate and not mean analysis (recommended)
for input_exp_file in markerFinder_inputs:
### This applies to an ExpressionOutput DATASET file compoosed of gene expression values (averages already present)
try:
output_dir = markerFinder.getAverageExpressionValues(input_exp_file,array_type) ### Either way, make an average annotated file from the DATASET file
except Exception:
print "Unknown MarkerFinder failure (possible filename issue or data incompatibility)..."
print traceback.format_exc()
continue
if 'DATASET' in input_exp_file:
group_exp_file = string.replace(input_exp_file,'DATASET','AVERAGE')
else:
group_exp_file = (input_exp_file,output_dir) ### still analyze the primary sample
compendiumType = 'protein_coding'
if expression_data_format == 'non-log': logTransform = True
else: logTransform = False
try: markerFinder.analyzeData(group_exp_file,species,array_type,compendiumType,AdditionalParameters=fl,logTransform=logTransform)
except Exception: None
### Generate heatmaps (unclustered - order by markerFinder)
try: graphics = markerFinder.generateMarkerHeatMaps(fl,array_type,graphics=graphics,Species=species)
except Exception: print traceback.format_exc()
remove_intronic_junctions = original_remove_intronic_junctions ### This var gets reset when running FilterDABG
try:
summary_data_db['QC'] = fl.GraphicLinks()+graphics ### provides links for displaying QC and clustering plots
except Exception:
null=[] ### Visualization support through matplotlib either not present or visualization options excluded
#print '!!!!!finished expression builder'
#returnLargeGlobalVars()
expression_data_format = 'log' ### This variable is set from non-log in FilterDABG when present (version 1.16)
try:
parent_dir = fl.RootDir()+'/GO-Elite/regulated/'
dir_list = read_directory(parent_dir)
for file in dir_list:
input_file_dir = parent_dir+'/'+file
inputType = 'IDs'
interactionDirs = ['WikiPathways','KEGG','BioGRID','TFTargets']
output_dir = parent_dir
degrees = 'direct'
input_exp_file = input_file_dir
gsp = UI.GeneSelectionParameters(species,array_type,manufacturer)
gsp.setGeneSet('None Selected')
gsp.setPathwaySelect('')
gsp.setGeneSelection('')
gsp.setOntologyID('')
gsp.setIncludeExpIDs(True)
UI.networkBuilder(input_file_dir,inputType,output_dir,interactionDirs,degrees,input_exp_file,gsp,'')
except Exception:
print traceback.format_exc()
if status == 'stop':
### See if the array and species are compatible with GO-Elite analysis
system_codes = UI.getSystemInfo()
go_elite_analysis_supported = 'yes'
species_names = UI.getSpeciesInfo()
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]; results_dir = filepath(fl.RootDir())
### Perform GO-Elite Analysis
if pathway_permutations != 'NA':
try:
print '\nBeginning to run GO-Elite analysis on alternative exon results'
elite_input_dirs = ['AltExonConfirmed','AltExon','regulated','upregulated','downregulated'] ### Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
file_dirs = results_dir+'GO-Elite/'+elite_dir,results_dir+'GO-Elite/denominator',results_dir+'GO-Elite/'+elite_dir
input_dir = results_dir+'GO-Elite/'+elite_dir
variables = species,mod,pathway_permutations,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,returnPathways,file_dirs,root
try: input_files = read_directory(input_dir) ### Are there any files to analyze?
except Exception: input_files=[]
if len(input_files)>0:
try: GO_Elite.remoteAnalysis(variables,'non-UI',Multi=mlp); goelite_run = True
except Exception,e:
print e
print "GO-Elite analysis failed"
try: GO_Elite.moveMAPPFinderFiles(file_dirs[0])
except Exception: print 'Input GO-Elite files could NOT be moved.'
try: GO_Elite.moveMAPPFinderFiles(file_dirs[1])
except Exception: print 'Input GO-Elite files could NOT be moved.'
except Exception: pass
if goelite_run == False:
print 'No GO-Elite input files to analyze (check your criterion).'
print_out = 'Analysis complete. Gene expression\nsummary exported to "ExpressionOutput".'
try:
if use_Tkinter == 'yes':
print "Analysis Complete\n"; UI.InfoWindow(print_out,'Analysis Completed!')
tl = Toplevel(); SummaryResultsWindow(tl,'GE',results_dir,dataset,'parent',summary_data_db)
if pathway_permutations == 'NA' and run_from_scratch != 'Annotate External Results':
if go_elite_analysis_supported == 'yes':
UI.getUpdatedParameters(array_type,species,run_from_scratch,file_dirs)
try: AltAnalyzeSetup('no')
except Exception:
print traceback.format_exc()
sys.exit()
else: print '\n'+print_out; sys.exit()
except Exception:
#print 'Failed to report status through GUI.'
sys.exit()
else: altanalyze_files = status[1] ### These files are the comparison files to analyze
elif run_from_scratch == 'update DBs':
null=[] ###Add link to new module here (possibly)
#updateDBs(species,array_type)
sys.exit()
if perform_alt_analysis != 'expression': ###Thus perform_alt_analysis = 'both' or 'alt' (default when skipping expression summary step)
###### Run AltAnalyze ######
global dataset_name; global summary_results_db; global summary_results_db2
summary_results_db={}; summary_results_db2={}; aspire_output_list=[]; aspire_output_gene_list=[]
onlyAnalyzeJunctions = 'no'; agglomerate_inclusion_probesets = 'no'; filter_probesets_by = 'NA'
if array_type == 'AltMouse' or ((array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
if filter_probeset_types == 'junctions-only': onlyAnalyzeJunctions = 'yes'
elif filter_probeset_types == 'combined-junctions': agglomerate_inclusion_probesets = 'yes'; onlyAnalyzeJunctions = 'yes'
elif filter_probeset_types == 'exons-only': analysis_method = 'splicing-index'; filter_probesets_by = 'exon'
if filter_probeset_types == 'combined-junctions' and array_type == 'junction' or array_type == 'RNASeq': filter_probesets_by = 'all'
else: filter_probesets_by = filter_probeset_types
c = 'Ensembl'; d = 'Entrez Gene'
annotation_system = c
expression_threshold = 0 ###This is different than the raw_expression_threshold (probably shouldn't filter so set to 0)
if analysis_method == 'linearregres-rlm': analysis_method = 'linearregres';use_R = 'yes'
if gene_expression_cutoff<1:
gene_expression_cutoff = 2 ### A number less than one is invalid
print "WARNING!!!! Invalid gene expression fold cutoff entered,\nusing the default value of 2, must be greater than 1."
log_fold_cutoff = math.log(float(gene_expression_cutoff),2)
if analysis_method != 'ASPIRE' and analysis_method != 'none':
if p_threshold <= 0 or p_threshold >1:
p_threshold = 0.05 ### A number less than one is invalid
print "WARNING!!!! Invalid alternative exon p-value threshold entered,\nusing the default value of 0.05."
if alt_exon_fold_variable<1:
alt_exon_fold_variable = 1 ### A number less than one is invalid
print "WARNING!!!! Invalid alternative exon fold cutoff entered,\nusing the default value of 2, must be greater than 1."
try: alt_exon_logfold_cutoff = math.log(float(alt_exon_fold_variable),2)
except Exception: alt_exon_logfold_cutoff = 1
else: alt_exon_logfold_cutoff = float(alt_exon_fold_variable)
global_addition_factor = 0
export_junction_comparisons = 'no' ### No longer accessed in this module - only in update mode through a different module
factor_out_expression_changes = 'yes' ### Use 'no' if data is normalized already or no expression normalization for ASPIRE desired
only_include_constitutive_containing_genes = 'yes'
remove_transcriptional_regulated_genes = 'yes'
add_exons_to_annotations = 'no'
exclude_protein_details = 'no'
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method: annotation_system = d
if 'linear' in analysis_method: analysis_method = 'linearregres'
if 'aspire' in analysis_method: analysis_method = 'ASPIRE'
if array_type == 'AltMouse': species = 'Mm'
#if export_NI_values == 'yes': remove_transcriptional_regulated_genes = 'no'
###Saves run-time while testing the software (global variable stored)
#import_dir = '/AltDatabase/affymetrix/'+species
#dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
### Get Ensembl-GO and pathway annotations from GO-Elite files
universalPrintFunction(["Importing GO-Elite pathway/GO annotations"])
global go_annotations; go_annotations={}
import BuildAffymetrixAssociations
go_annotations = BuildAffymetrixAssociations.getEnsemblAnnotationsFromGOElite(species)
global probeset_annotations_file
if array_type == 'RNASeq': probeset_annotations_file = root_dir+'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_junctions.txt'
elif array_type == 'AltMouse': probeset_annotations_file = 'AltDatabase/'+species+'/'+array_type+'/'+'MASTER-probeset-transcript.txt'
else: probeset_annotations_file = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_probesets.txt'
#"""
if analysis_method != 'none':
analysis_summary = RunAltAnalyze() ### Only run if analysis methods is specified (only available for RNA-Seq and junction analyses)
else: analysis_summary = None
if analysis_summary != None:
summary_results_db, aspire_output_gene_list, number_events_analyzed = analysis_summary
summary_data_db2 = copy.deepcopy(summary_data_db)
for i in summary_data_db2: del summary_data_db[i] ### If we reset the variable it violates it's global declaration... do this instead
#universalPrintFunction(['Alternative Exon Results for Junction Comparisons:'])
#for i in summary_data_db: universalPrintFunction([i+' '+ str(summary_data_db[i])])
exportSummaryResults(summary_results_db,analysis_method,aspire_output_list,aspire_output_gene_list,annotate_db,array_type,number_events_analyzed,root_dir)
else:
### Occurs for RNASeq when no junctions are present
summary_data_db2={}
if array_type == 'junction' or array_type == 'RNASeq':
#Reanalyze junction array data separately for individual probests rather than recipricol junctions
if array_type == 'junction': explicit_data_type = 'exon'
elif array_type == 'RNASeq': explicit_data_type = 'junction'
else: report_single_probeset_results = 'no'
### Obtain exon analysis defaults
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults('exon',species)
analysis_method, null, filter_probeset_types, null, null, alt_exon_fold_variable, null, null, null, null, null, null, null, calculate_normIntensity_p, null = alt_exon_defaults
filter_probesets_by = filter_probeset_types
if additional_algorithm == 'splicing-index' or additional_algorithm == 'FIRMA':
analysis_method = additional_algorithm
#print [analysis_method], [filter_probeset_types], [p_threshold], [alt_exon_fold_variable]
try: alt_exon_logfold_cutoff = math.log(float(additional_score),2)
except Exception: alt_exon_logfold_cutoff = 1
agglomerate_inclusion_probesets = 'no'
try:
summary_results_db, aspire_output_gene_list, number_events_analyzed = RunAltAnalyze()
exportSummaryResults(summary_results_db,analysis_method,aspire_output_list,aspire_output_gene_list,annotate_db,'exon',number_events_analyzed,root_dir)
if len(summary_data_db2)==0: summary_data_db2 = summary_data_db; explicit_data_type = 'exon-only'
#universalPrintFunction(['Alternative Exon Results for Individual Probeset Analyses:'])
#for i in summary_data_db: universalPrintFunction([i+' '+ str(summary_data_db[i])])
except Exception:
print traceback.format_exc()
None
#"""
### Perform dPSI Analysis
try:
if 'counts.' in fl.CountsFile(): pass
else:
dir_list = read_directory(fl.RootDir()+'ExpressionInput')
for file in dir_list:
if 'exp.' in file and 'steady-state' not in file:
fl.setExpFile(fl.RootDir()+'ExpressionInput/'+file)
#print [fl.RootDir()+'ExpressionInput/'+file]
except Exception:
search_dir = fl.RootDir()+'/ExpressionInput'
files = unique.read_directory(fl.RootDir()+'/ExpressionInput')
for file in files:
if 'exp.' in file and 'steady-state.txt' not in file:
fl.setExpFile(search_dir+'/'+file)
try:
#"""
try:
graphic_links2,cluster_input_file=ExpressionBuilder.unbiasedComparisonSpliceProfiles(fl.RootDir(),
species,array_type,expFile=fl.CountsFile(),min_events=0,med_events=1)
except Exception: pass
#"""
inputpsi = fl.RootDir()+'AltResults/AlternativeOutput/'+species+'_'+array_type+'_top_alt_junctions-PSI-clust.txt'
### Calculate ANOVA p-value stats based on groups
if array_type !='gene' and array_type != 'exon':
matrix,compared_groups,original_data = statistics.matrixImport(inputpsi)
matrix_pvalues=statistics.runANOVA(inputpsi,matrix,compared_groups)
anovaFilteredDir = statistics.returnANOVAFiltered(inputpsi,original_data,matrix_pvalues)
graphic_link1 = ExpressionBuilder.exportHeatmap(anovaFilteredDir)
try: summary_data_db2['QC']+=graphic_link1
except Exception: summary_data_db2['QC']=graphic_link1
except Exception: print traceback.format_exc()
import RNASeq
try:
graphic_link = RNASeq.compareExonAndJunctionResults(species,array_type,summary_results_db,root_dir)
try: summary_data_db2['QC']+=graphic_link
except Exception: summary_data_db2['QC']=graphic_link
except Exception:
print traceback.format_exc()
#"""
### Export the top 15 spliced genes
try:
altresult_dir = fl.RootDir()+'/AltResults/'
splicing_results_root = altresult_dir+'/Clustering/'
dir_list = read_directory(splicing_results_root)
gene_string=''
altanalyze_results_folder = altresult_dir+'/RawSpliceData/'+species
### Lookup the raw expression dir
expression_results_folder = string.replace(altresult_dir,'AltResults','ExpressionInput')
expression_dir = UI.getValidExpFile(expression_results_folder)
show_introns=False
try: altresult_dir = UI.getValidSplicingScoreFile(altanalyze_results_folder)
except Exception,e:
print traceback.format_exc()
analysisType='plot'
for file in dir_list:
if 'AltExonConfirmed' in file:
gene_dir = splicing_results_root+'/'+file
genes = UI.importGeneList(gene_dir,limit=50) ### list of gene IDs or symbols
gene_string = gene_string+','+genes
print 'Imported genes from',file,'\n'
analysisType='plot'
for file in dir_list:
if 'Combined-junction-exon-evidence' in file and 'top' not in file:
gene_dir = splicing_results_root+'/'+file
try: isoform_dir = UI.exportJunctionList(gene_dir,limit=50) ### list of gene IDs or symbols
except Exception: print traceback.format_exc()
UI.altExonViewer(species,array_type,expression_dir, gene_string, show_introns, analysisType, None); print 'completed'
UI.altExonViewer(species,array_type,altresult_dir, gene_string, show_introns, analysisType, None); print 'completed'
except Exception:
print traceback.format_exc()
if array_type != 'exon' and array_type != 'gene':
### SashimiPlot Visualization
try:
top_PSI_junction = inputpsi[:-4]+'-ANOVA.txt'
isoform_dir2 = UI.exportJunctionList(top_PSI_junction,limit=50) ### list of gene IDs or symbols
except Exception:
print traceback.format_exc()
try:
analyzeBAMs = False
dir_list = unique.read_directory(fl.RootDir())
for file in dir_list:
if '.bam' in string.lower(file):
analyzeBAMs=True
if analyzeBAMs:
### Create sashimi plot index
import SashimiIndex
SashimiIndex.remoteIndexing(species,fl)
import SashimiPlot
print 'Exporting Sashimi Plots for the top-predicted splicing events... be patient'
try: SashimiPlot.remoteSashimiPlot(species,fl,fl.RootDir(),isoform_dir) ### assuming the bam files are in the root-dir
except Exception: pass # print traceback.format_exc()
print 'completed'
try: SashimiPlot.remoteSashimiPlot(species,fl,fl.RootDir(),isoform_dir2) ### assuming the bam files are in the root-dir
except Exception: pass #print traceback.format_exc()
print 'completed'
### Try again, in case the symbol conversion failed
SashimiPlot.justConvertFilenames(species,fl.RootDir()+'/SashimiPlots')
else:
print 'No BAM files present in the root directory... skipping SashimiPlot analysis...'
except Exception:
print traceback.format_exc()
try:
clearObjectsFromMemory(exon_db); clearObjectsFromMemory(constitutive_probeset_db)
clearObjectsFromMemory(go_annotations); clearObjectsFromMemory(original_microRNA_z_score_data)
clearObjectsFromMemory(last_exon_region_db)
"""
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
except Exception: null=[]
#print '!!!!!finished'
#returnLargeGlobalVars()
end_time = time.time(); time_diff = int(end_time-start_time)
universalPrintFunction(["Analyses finished in %d seconds" % time_diff])
#universalPrintFunction(["Hit Enter/Return to exit AltAnalyze"])
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]; results_dir = filepath(fl.RootDir())
### Perform GO-Elite Analysis
if pathway_permutations != 'NA':
goelite_run = False
print '\nBeginning to run GO-Elite analysis on alternative exon results'
elite_input_dirs = ['AltExonConfirmed','AltExon','regulated','upregulated','downregulated'] ### Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
file_dirs = results_dir+'GO-Elite/'+elite_dir,results_dir+'GO-Elite/denominator',results_dir+'GO-Elite/'+elite_dir
input_dir = results_dir+'GO-Elite/'+elite_dir
try: input_files = read_directory(input_dir) ### Are there any files to analyze?
except Exception: input_files = []
if len(input_files)>0:
variables = species,mod,pathway_permutations,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,returnPathways,file_dirs,root
try: GO_Elite.remoteAnalysis(variables,'non-UI',Multi=mlp); goelite_run = True
except Exception,e:
print e
print "GO-Elite analysis failed"
try: GO_Elite.moveMAPPFinderFiles(file_dirs[0])
except Exception: print 'Input GO-Elite files could NOT be moved.'
try: GO_Elite.moveMAPPFinderFiles(file_dirs[1])
except Exception: print 'Input GO-Elite files could NOT be moved.'
if goelite_run == False:
print 'No GO-Elite input files to analyze (check your criterion).'
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
try:
if root !='' and root !=None:
print "Analysis Complete\n";
UI.InfoWindow(print_out,'Analysis Completed!')
tl = Toplevel(); SummaryResultsWindow(tl,'AS',results_dir,dataset_name,'specific',summary_data_db2)
except Exception:
print traceback.format_exc()
pass #print 'Failed to open GUI.'
skip_intro = 'yes'
if root !='' and root !=None:
if pathway_permutations == 'NA' and run_from_scratch != 'Annotate External Results':
try: UI.getUpdatedParameters(array_type,species,run_from_scratch,file_dirs)
except Exception: pass
try: AltAnalyzeSetup('no')
except Exception: sys.exit()
def exportSummaryResults(summary_results_db,analysis_method,aspire_output_list,aspire_output_gene_list,annotate_db,array_type,number_events_analyzed,root_dir):
try:
ResultsExport_module.outputSummaryResults(summary_results_db,'',analysis_method,root_dir)
#ResultsExport_module.outputSummaryResults(summary_results_db2,'-uniprot_attributes',analysis_method)
ResultsExport_module.compareAltAnalyzeResults(aspire_output_list,annotate_db,number_events_analyzed,'no',analysis_method,array_type,root_dir)
ResultsExport_module.compareAltAnalyzeResults(aspire_output_gene_list,annotate_db,'','yes',analysis_method,array_type,root_dir)
except UnboundLocalError: print "...No results to summarize" ###Occurs if there is a problem parsing these files
def checkGOEliteProbesets(fn,species):
### Get all probesets in GO-Elite files
mod_source = 'Ensembl'+'-'+'Affymetrix'
import gene_associations
try: ensembl_to_probeset_id = gene_associations.getGeneToUid(species,mod_source)
except Exception: ensembl_to_probeset_id={}
mod_source = 'EntrezGene'+'-'+'Affymetrix'
try: entrez_to_probeset_id = gene_associations.getGeneToUid(species,mod_source)
except Exception: entrez_to_probeset_id={}
probeset_db={}
for gene in ensembl_to_probeset_id:
for probeset in ensembl_to_probeset_id[gene]: probeset_db[probeset]=[]
for gene in entrez_to_probeset_id:
for probeset in entrez_to_probeset_id[gene]: probeset_db[probeset]=[]
###Import an Affymetrix array annotation file (from http://www.affymetrix.com) and parse out annotations
csv_probesets = {}; x=0; y=0
fn=filepath(fn); status = 'no'
for line in open(fn,'r').readlines():
probeset_data = string.replace(line,'\n','') #remove endline
probeset_data = string.replace(probeset_data,'---','')
affy_data = string.split(probeset_data[1:-1],'","')
if x==0 and line[0]!='#':
x=1; affy_headers = affy_data
for header in affy_headers:
y = 0
while y < len(affy_headers):
if 'Probe Set ID' in affy_headers[y] or 'probeset_id' in affy_headers[y]: ps = y
y+=1
elif x == 1:
try: probeset = affy_data[ps]; csv_probesets[probeset]=[]
except Exception: null=[]
for probeset in csv_probesets:
if probeset in probeset_db: status = 'yes';break
return status
class SpeciesData:
def __init__(self, abrev, species, systems, taxid):
self._abrev = abrev; self._species = species; self._systems = systems; self._taxid = taxid
def SpeciesCode(self): return self._abrev
def SpeciesName(self): return self._species
def Systems(self): return self._systems
def TaxID(self): return self._taxid
def __repr__(self): return self.SpeciesCode()+'|'+SpeciesName
def getSpeciesInfo():
### Used by AltAnalyze
UI.importSpeciesInfo(); species_names={}
for species_full in species_codes:
sc = species_codes[species_full]; abrev = sc.SpeciesCode()
species_names[abrev] = species_full
return species_codes,species_names
def importGOEliteSpeciesInfo():
filename = 'Config/goelite_species.txt'; x=0
fn=filepath(filename); species_codes={}
for line in open(fn,'rU').readlines():
data = cleanUpLine(line)
abrev,species,taxid,compatible_mods = string.split(data,'\t')
if x==0: x=1
else:
compatible_mods = string.split(compatible_mods,'|')
sd = SpeciesData(abrev,species,compatible_mods,taxid)
species_codes[species] = sd
return species_codes
def exportGOEliteSpeciesInfo(species_codes):
fn=filepath('Config/goelite_species.txt'); data = open(fn,'w'); x=0
header = string.join(['species_code','species_name','tax_id','compatible_algorithms'],'\t')+'\n'
data.write(header)
for species in species_codes:
if 'other' not in species and 'all-' not in species:
sd = species_codes[species]
mods = string.join(sd.Systems(),'|')
values = [sd.SpeciesCode(),sd.SpeciesName(),sd.TaxID(),mods]
values = string.join(values,'\t')+'\n'
data.write(values)
data.close()
def TimeStamp():
time_stamp = time.localtime()
year = str(time_stamp[0]); month = str(time_stamp[1]); day = str(time_stamp[2])
if len(month)<2: month = '0'+month
if len(day)<2: day = '0'+day
return year+month+day
def verifyFile(filename):
status = 'not found'
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines(): status = 'found';break
except Exception: status = 'not found'
return status
def verifyFileLength(filename):
count = 0
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
count+=1
if count>9: break
except Exception: null=[]
return count
def verifyGroupFileFormat(filename):
correct_format = False
try:
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if len(string.split(data,'\t'))==3:
correct_format = True
break
except Exception: correct_format = False
return correct_format
def displayHelp():
fn=filepath('Documentation/commandline.txt')
print '\n################################################\nAltAnalyze Command-Line Help'
for line in open(fn,'rU').readlines():
print cleanUpLine(line)
print '\n################################################ - END HELP'
sys.exit()
def searchDirectory(directory,var):
directory = unique.filepath(directory)
files = unique.read_directory(directory)
version = unique.getCurrentGeneDatabaseVersion()
for file in files:
if var in file:
location = string.split(directory+'/'+file,version)[1][1:]
return [location]
break
###### Command Line Functions (AKA Headless Mode) ######
def commandLineRun():
print 'Running commandline options'
import getopt
#/hd3/home/nsalomonis/normalization/mir1 - boxer
#python AltAnalyze.py --species Mm --arraytype "3'array" --celdir "C:/CEL" --output "C:/CEL" --expname miR1_column --runGOElite yes --GEelitepval 0.01
#python AltAnalyze.py --species Hs --arraytype "3'array" --FEdir "C:/FEfiles" --output "C:/FEfiles" --channel_to_extract "green/red ratio" --expname cancer --runGOElite yes --GEelitepval 0.01
#python AltAnalyze.py --celdir "C:/CEL" --output "C:/CEL" --expname miR1_column
#open ./AltAnalyze.app --celdir "/Users/nsalomonis/Desktop" --output "/Users/nsalomonis/Desktop" --expname test
#python AltAnalyze.py --species Mm --arraytype "3'array" --expdir "C:/CEL/ExpressionInput/exp.miR1_column.txt" --output "C:/CEL" --runGOElite yes --GEelitepval 0.01
#python AltAnalyze.py --species Mm --platform RNASeq --bedDir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles" --groupdir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles/ExpressionInput/groups.test.txt" --compdir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles/ExpressionInput/comps.test.txt" --output "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles" --expname "test"
#python AltAnalyze.py --species Mm --platform RNASeq --filterdir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles/" --output "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles"
#python AltAnalyze.py --expdir "/Users/nsalomonis/Desktop/Nathan/ExpressionInput/exp.test.txt" --exonMapFile "/Users/nsalomonis/Desktop/Nathan/hgu133_probe.txt" --species Hs --platform "3'array" --output "/Users/nsalomonis/Desktop/Nathan"
#python AltAnalyze.py --species Hs --platform "3'array" --expname test --channelToExtract green --FEdir /Users/saljh8/Downloads/AgllentTest/ --output /Users/saljh8/Downloads/AgllentTest/
global apt_location; global root_dir; global probability_statistic; global log_file; global summary_data_db; summary_data_db={}
###required
marker_finder='no'
manufacturer='Affymetrix'
constitutive_source='Ensembl'
ensembl_version = 'current'
species_code = None
species = None
main_input_folder = None
output_dir = None
array_type = None
input_annotation_file = None
groups_file = None
comps_file = None
input_cdf_file = None
exp_name = None
run_GOElite = 'yes'
visualize_qc_results = 'yes'
run_lineage_profiler = 'yes'
input_exp_file = ''
cel_file_dir = ''
input_stats_file = ''
input_filtered_dir = ''
external_annotation_dir = ''
xhyb_remove = 'no'
update_method = []
update_dbs = 'no'
analyze_all_conditions = 'no'
return_all = 'no'
additional_array_types = []
remove_intronic_junctions = 'no'
ignore_built_species = 'no'
build_exon_bedfile = 'no'
compendiumType = 'protein_coding'
probability_statistic = 'unpaired t-test'
specific_array_type = None
additional_resources = [None]
wpid = None
mod = 'Ensembl'
transpose = False
input_file_dir = None
denom_file_dir = None
image_export = []
selected_species = ['Hs','Mm','Rn'] ### These are the species that additional array types are currently supported
selected_platforms = ['AltMouse','exon','gene','junction']
returnPathways = 'no'
compendiumPlatform = 'gene'
exonMapFile = None
platformType = None ### This option is used to store the orignal platform type
perform_alt_analysis = 'no'
mappedExonAnalysis = False ### Map the original IDs to the RNA-Seq exon database (when True)
microRNA_prediction_method = None
pipelineAnalysis = True
OntologyID=''
PathwaySelection=''
GeneSetSelection=''
interactionDirs=[]
inputType='ID list'
Genes=''
degrees='direct'
includeExpIDs=True
update_interactions=False
data_type = 'raw expression'
batch_effects = 'no'
channel_to_extract = None
normalization = False
justShowTheseIDs = ''
display=False
accessoryAnalysis=''
modelSize=None
geneModel=False
run_from_scratch = None
systemToUse = None ### For other IDs
custom_reference = False
multiThreading = True
genesToReport = 60
correlateAll = True
expression_data_format='log'
runICGS=False
IDtype=None
runKallisto = False
original_arguments = sys.argv
arguments=[]
for arg in original_arguments:
arg = string.replace(arg,'\xe2\x80\x9c','') ### These are non-standard forward quotes
arg = string.replace(arg,'\xe2\x80\x9d','') ### These are non-standard reverse quotes
arg = string.replace(arg,'\xe2\x80\x93','-') ### These are non-standard dashes
arg = string.replace(arg,'\x96','-') ### These are non-standard dashes
arg = string.replace(arg,'\x93','') ### These are non-standard forward quotes
arg = string.replace(arg,'\x94','') ### These are non-standard reverse quotes
arguments.append(arg)
print '\nArguments input:',arguments,'\n'
if '--help' in arguments[1:] or '--h' in arguments[1:]:
try: displayHelp() ### Print out a help file and quit
except Exception: print 'See: http://www.altanalyze.org for documentation and command-line help';sys.exit()
if 'AltAnalyze' in arguments[1]:
arguments = arguments[1:] ### Occurs on Ubuntu with the location of AltAnalyze being added to sys.argv (exclude this since no argument provided for this var)
try:
options, remainder = getopt.getopt(arguments[1:],'', ['species=', 'mod=','elitepval=', 'elitepermut=',
'method=','zscore=','pval=','num=',
'runGOElite=','denom=','output=','arraytype=',
'celdir=','expdir=','output=','statdir=',
'filterdir=','cdfdir=','csvdir=','expname=',
'dabgp=','rawexp=','avgallss=','logexp=',
'inclraw=','runalt=','altmethod=','altp=',
'probetype=','altscore=','GEcutoff=',
'exportnormexp=','calcNIp=','runMiDAS=',
'GEcutoff=','GEelitepval=','mirmethod=','ASfilter=',
'vendor=','GEelitefold=','update=','version=',
'analyzeAllGroups=','GEeliteptype=','force=',
'resources_to_analyze=', 'dataToAnalyze=','returnAll=',
'groupdir=','compdir=','annotatedir=','additionalScore=',
'additionalAlgorithm=','noxhyb=','platform=','bedDir=',
'altpermutep=','altpermute=','removeIntronOnlyJunctions=',
'normCounts=','buildExonExportFile=','groupStat=',
'compendiumPlatform=','rpkm=','exonExp=','specificArray=',
'ignoreBuiltSpecies=','ORAstat=','outputQCPlots=',
'runLineageProfiler=','input=','image=', 'wpid=',
'additional=','row_method=','column_method=',
'row_metric=','column_metric=','color_gradient=',
'transpose=','returnPathways=','compendiumType=',
'exonMapFile=','geneExp=','labels=','contrast=',
'plotType=','geneRPKM=','exonRPKM=','runMarkerFinder=',
'update_interactions=','includeExpIDs=','degrees=',
'genes=','inputType=','interactionDirs=','GeneSetSelection=',
'PathwaySelection=','OntologyID=','dataType=','combat=',
'channelToExtract=','showIntrons=','display=','join=',
'uniqueOnly=','accessoryAnalysis=','inputIDType=','outputIDType=',
'FEdir=','channelToExtract=','AltResultsDir=','geneFileDir=',
'AltResultsDir=','modelSize=','geneModel=','reference=',
'multiThreading=','multiProcessing=','genesToReport=',
'correlateAll=','normalization=','justShowTheseIDs=',
'direction=','analysisType=','algorithm=','rho=',
'clusterGOElite=','geneSetName=','runICGS=','IDtype=',
'CountsCutoff=','FoldDiff=','SamplesDiffering=','removeOutliers='
'featurestoEvaluate=','restrictBy=','ExpressionCutoff=',
'excludeCellCycle=','runKallisto=','fastq_dir=','FDR='])
except Exception:
print traceback.format_exc()
print "There is an error in the supplied command-line arguments (each flag requires an argument)"; sys.exit()
for opt, arg in options:
#print [opt, arg]
if opt == '--species': species=arg
elif opt == '--arraytype':
if array_type != None: additional_array_types.append(arg)
else: array_type=arg; platform = array_type
if specific_array_type == None: specific_array_type = platform
elif opt == '--exonMapFile':
perform_alt_analysis = 'yes' ### Perform alternative exon analysis
exonMapFile = arg
elif opt == '--specificArray': specific_array_type = arg ### e.g., hGlue
elif opt == '--celdir':
arg = verifyPath(arg)
cel_file_dir=arg
elif opt == '--bedDir':
arg = verifyPath(arg)
cel_file_dir=arg
elif opt == '--FEdir':
arg = verifyPath(arg)
cel_file_dir = arg
elif opt == '--expdir':
arg = verifyPath(arg)
input_exp_file=arg
elif opt == '--statdir':
arg = verifyPath(arg)
input_stats_file=arg
elif opt == '--filterdir':
arg = verifyPath(arg)
input_filtered_dir=arg
elif opt == '--groupdir':
arg = verifyPath(arg)
groups_file=arg
elif opt == '--compdir':
arg = verifyPath(arg)
comps_file=arg
elif opt == '--cdfdir':
arg = verifyPath(arg)
input_cdf_file=arg
elif opt == '--csvdir':
arg = verifyPath(arg)
input_annotation_file=arg
elif opt == '--expname': exp_name=arg
elif opt == '--output':
arg = verifyPath(arg)
output_dir=arg
elif opt == '--vendor': manufacturer=arg
elif opt == '--runICGS': runICGS=True
elif opt == '--IDtype': IDtype=arg
elif opt == '--ignoreBuiltSpecies': ignore_built_species=arg
elif opt == '--platform':
if array_type != None: additional_array_types.append(arg)
else: array_type=arg; platform = array_type
if specific_array_type == None: specific_array_type = platform
elif opt == '--update': update_dbs='yes'; update_method.append(arg)
elif opt == '--version': ensembl_version = arg
elif opt == '--compendiumPlatform': compendiumPlatform=arg ### platform for which the LineageProfiler compendium is built on
elif opt == '--force': force=arg
elif opt == '--input':
arg = verifyPath(arg)
input_file_dir=arg; pipelineAnalysis = False ### If this option is entered, only perform the indicated analysis
elif opt == '--image': image_export.append(arg)
elif opt == '--wpid': wpid=arg
elif opt == '--mod': mod=arg
elif opt == '--runKallisto':
if arg == 'yes' or string.lower(arg) == 'true':
runKallisto = True
elif opt == '--fastq_dir':
input_fastq_dir = arg
elif opt == '--additional':
if additional_resources[0] == None:
additional_resources=[]
additional_resources.append(arg)
else:
additional_resources.append(arg)
elif opt == '--transpose':
if arg == 'True': transpose = True
elif opt == '--runLineageProfiler': ###Variable declared here and later (independent analysis here or pipelined with other analyses later)
run_lineage_profiler=arg
elif opt == '--compendiumType': ### protein-coding, ncRNA, or exon
compendiumType=arg
elif opt == '--denom':
denom_file_dir=arg ### Indicates that GO-Elite is run independent from AltAnalyze itself
elif opt == '--accessoryAnalysis':
accessoryAnalysis = arg
elif opt == '--channelToExtract': channel_to_extract=arg
elif opt == '--genesToReport': genesToReport = int(arg)
elif opt == '--correlateAll': correlateAll = True
elif opt == '--direction': direction = arg
elif opt == '--logexp': expression_data_format=arg
elif opt == '--geneRPKM': rpkm_threshold=arg
elif opt == '--multiThreading' or opt == '--multiProcessing':
multiThreading=arg
if multiThreading == 'yes': multiThreading = True
elif 'rue' in multiThreading: multiThreading = True
else: multiThreading = False
if 'other' in manufacturer or 'Other' in manufacturer:
### For other IDs
systemToUse = array_type
if array_type == None:
print 'Please indicate a ID type as --platform when setting vendor equal to "Other IDs"'; sys.exit()
array_type = "3'array"
if array_type == 'RNASeq': manufacturer = array_type
if platformType == None: platformType = array_type
if perform_alt_analysis == 'yes':
if platform == "3'array":
mappedExonAnalysis = True
cel_file_dir = input_exp_file
exp_name = export.findFilename(input_exp_file)
exp_name = string.replace(exp_name,'.txt','')
exp_name = string.replace(exp_name,'exp.','')
input_exp_file = ''
### To perform alternative exon analyses for platforms without a dedicated database, must happing appropriate mapping info or array type data
### (will need to perform downstream testing for unsupported Affymetrix exon, gene and junction arrays)
if exonMapFile == None and specific_array_type == None and cel_file_dir == '':
print_out = "\nUnable to run!!! Please designate either a specific platfrom (e.g., --specificArray hgU133_2), select CEL files, or an "
print_out += "exon-level mapping file location (--exonMapFile C:/mapping.txt) to perform alternative exon analyses for this platform."
### Will need to check here to see if the platform is supported (local or online files) OR wait until an error is encountered later
""" Check to see if a database is already installed """
try: current_species_dirs = unique.read_directory('/AltDatabase')
except Exception: current_species_dirs=[]
if len(current_species_dirs)==0 and update_dbs != 'yes':
print "Please install a database before running AltAnalyze. Please note, AltAnalyze may need to install additional files later for RNASeq and LineageProfiler for some species, automatically. Make sure to list your platform as RNASeq if analyzing RNA-Seq data (--platform RNASeq)."
print "Example:\n"
print 'python AltAnalyze.py --species Hs --update Official --version EnsMart72';sys.exit()
######## Perform analyses independent from AltAnalyze database centric analyses that require additional parameters
if len(image_export) > 0 or len(accessoryAnalysis)>0 or runICGS:
if runICGS:
#python AltAnalyze.py --runICGS yes --expdir "/Users/saljh8/Desktop/demo/Myoblast/ExpressionInput/exp.myoblast.txt" --platform "3'array" --species Hs --GeneSetSelection BioMarkers --PathwaySelection Heart --column_method hopach --rho 0.4 --ExpressionCutoff 200 --justShowTheseIDs "NKX2-5 T TBX5" --FoldDiff 10 --SamplesDiffering 3 --excludeCellCycle conservative
try: species = species
except Exception: 'Please designate a species before continuing (e.g., --species Hs)'
try: array_type = array_type
except Exception: 'Please designate a species before continuing (e.g., --species Hs)'
if len(cel_file_dir)>0:
values = species,exp_file_location_db,dataset,mlp_instance
StatusWindow(values,'preProcessRNASeq') ### proceed to run the full discovery analysis here!!!
else:
if len(input_exp_file) > 0: pass
else: 'Please indicate a source folder or expression file (e.g., --expdir /dataset/singleCells.txt)'
if array_type == 'Other' or 'Other' in array_type:
if ':' in array_type:
array_type, IDtype = string.split(array_type)
array_type == "3'array"
if IDtype == None: IDtype = manufacturer
row_method = 'weighted'
column_method = 'average'
row_metric = 'cosine'
column_metric = 'cosine'
color_gradient = 'yellow_black_blue'
contrast=3
vendor = manufacturer
GeneSelection = ''
PathwaySelection = ''
GeneSetSelection = 'None Selected'
excludeCellCycle = True
rho_cutoff = 0.4
restrictBy = 'protein_coding'
featurestoEvaluate = 'Genes'
ExpressionCutoff = 1
CountsCutoff = 1
FoldDiff = 2
SamplesDiffering = 3
JustShowTheseIDs=''
removeOutliers = False
PathwaySelection=[]
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--row_method':
row_method=arg
if row_method == 'None': row_method = None
elif opt == '--column_method':
column_method=arg
if column_method == 'None': column_method = None
elif opt == '--row_metric': row_metric=arg
elif opt == '--column_metric': column_metric=arg
elif opt == '--color_gradient': color_gradient=arg
elif opt == '--GeneSetSelection': GeneSetSelection=arg
elif opt == '--PathwaySelection': PathwaySelection.append(arg)
elif opt == '--genes': GeneSelection=arg
elif opt == '--ExpressionCutoff': ExpressionCutoff=arg
elif opt == '--normalization': normalization=arg
elif opt == '--justShowTheseIDs': justShowTheseIDs=arg
elif opt == '--rho': rho_cutoff=float(arg)
elif opt == '--clusterGOElite':clusterGOElite=float(arg)
elif opt == '--CountsCutoff':CountsCutoff=int(float(arg))
elif opt == '--FoldDiff':FoldDiff=int(float(arg))
elif opt == '--SamplesDiffering':SamplesDiffering=int(float(arg))
elif opt == '--removeOutliers':removeOutliers=arg
elif opt == '--featurestoEvaluate':featurestoEvaluate=arg
elif opt == '--restrictBy':restrictBy=arg
elif opt == '--excludeCellCycle':
excludeCellCycle=arg
if excludeCellCycle == 'False' or excludeCellCycle == 'no': excludeCellCycle = False
elif excludeCellCycle == 'True' or excludeCellCycle == 'yes' or excludeCellCycle == 'conservative': excludeCellCycle = True
elif opt == '--contrast':
try: contrast=float(arg)
except Exception: print '--contrast not a valid float';sys.exit()
elif opt == '--vendor': vendor=arg
elif opt == '--display':
if arg=='yes':
display=True
elif arg=='True':
display=True
else:
display=False
if len(PathwaySelection)==0: PathwaySelection=''
if len(GeneSetSelection)>0 or GeneSelection != '':
gsp = UI.GeneSelectionParameters(species,array_type,vendor)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(GeneSelection)
gsp.setJustShowTheseIDs(JustShowTheseIDs)
gsp.setNormalize('median')
gsp.setSampleDiscoveryParameters(ExpressionCutoff,CountsCutoff,FoldDiff,SamplesDiffering,
removeOutliers,featurestoEvaluate,restrictBy,excludeCellCycle,column_metric,column_method,rho_cutoff)
import RNASeq
mlp_instance = mlp
if cel_file_dir != '':
expFile = cel_file_dir + '/ExpressionInput/'+ 'exp.'+exp_name+'.txt'
elif input_exp_file !='':
if 'ExpressionInput' in input_exp_file: expFile = input_exp_file
else:
### Copy over expression file to ExpressionInput
expdir2 = string.replace(input_exp_file,'exp.','')
root_dir = export.findParentDir(expFile)
expFile = root_dir+'/ExpressionInput/exp.'+export.findFilename(expdir2)
export.copyFile(input_exp_file, expFile)
global log_file
root_dir = export.findParentDir(expFile)
root_dir = string.replace(root_dir,'/ExpressionInput','')
time_stamp = timestamp()
log_file = filepath(root_dir+'AltAnalyze_report-'+time_stamp+'.log')
log_report = open(log_file,'w'); log_report.close()
sys.stdout = Logger('')
count = verifyFileLength(expFile[:-4]+'-steady-state.txt')
if count>1:
expFile = expFile[:-4]+'-steady-state.txt'
elif array_type=='RNASeq':
### Indicates that the steady-state file doesn't exist. The exp. may exist, be could be junction only so need to re-build from bed files here
values = species,exp_file_location_db,dataset,mlp_instance
StatusWindow(values,'preProcessRNASeq') ### proceed to run the full discovery analysis here!!!
expFile = expFile[:-4]+'-steady-state.txt'
print [excludeCellCycle]
UI.RemotePredictSampleExpGroups(expFile, mlp_instance, gsp,(species,array_type)) ### proceed to run the full discovery analysis here!!!
sys.exit()
if 'WikiPathways' in image_export:
#python AltAnalyze.py --input /Users/test/input/criterion1.txt --image WikiPathways --mod Ensembl --species Hs --wpid WP536
if wpid==None:
print 'Please provide a valid WikiPathways ID (e.g., WP1234)';sys.exit()
if species==None:
print 'Please provide a valid species ID for an installed database (to install: --update Official --species Hs --version EnsMart72Plus)';sys.exit()
if input_file_dir==None:
print 'Please provide a valid file location for your input IDs (also needs to inlcude system code and value column)';sys.exit()
import WikiPathways_webservice
try:
print 'Attempting to output a WikiPathways colored image from user data'
print 'mod:',mod
print 'species_code:',species
print 'wpid:',wpid
print 'input GO-Elite ID file:',input_file_dir
graphic_link = WikiPathways_webservice.visualizePathwayAssociations(input_file_dir,species,mod,wpid)
except Exception,e:
if 'force_no_matching_error' in traceback.format_exc():
print '\nUnable to run!!! None of the input IDs mapped to this pathway\n'
elif 'IndexError' in traceback.format_exc():
print '\nUnable to run!!! Input ID file does not have at least 3 columns, with the second column being system code\n'
elif 'ValueError' in traceback.format_exc():
print '\nUnable to run!!! Input ID file error. Please check that you do not have extra rows with no data\n'
elif 'source_data' in traceback.format_exc():
print '\nUnable to run!!! Input ID file does not contain a valid system code\n'
elif 'goelite' in traceback.format_exc():
print '\nUnable to run!!! A valid species database needs to first be installed. For example, run:'
print 'python AltAnalyze.py --update Official --species Hs --version EnsMart72\n'
else:
print traceback.format_exc()
print '\nError generating the pathway "%s"' % wpid,'\n'
try:
printout = 'Finished exporting visualized pathway to:',graphic_link['WP']
print printout,'\n'
except Exception: None
sys.exit()
if 'MergeFiles' in accessoryAnalysis:
#python AltAnalyze.py --accessoryAnalysis MergeFiles --input "C:\file1.txt" --input "C:\file2.txt" --output "C:\tables"
files_to_merge=[]
join_option='Intersection'
uniqueOnly=False
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--input':
arg = verifyPath(arg)
files_to_merge.append(arg)
if opt == '--join': join_option = arg
if opt == '--uniqueOnly': unique_only = arg
if len(files_to_merge)<2:
print 'Please designate two or more files to merge (--input)';sys.exit()
UI.MergeFiles(files_to_merge, join_option, uniqueOnly, output_dir, None)
sys.exit()
if 'IDTranslation' in accessoryAnalysis:
#python AltAnalyze.py --accessoryAnalysis IDTranslation --inputIDType Symbol --outputIDType RefSeq --input "C:\file1.txt" --species Hs
inputIDType=None
outputIDType=None
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--inputIDType': inputIDType = arg
if opt == '--outputIDType': outputIDType = arg
if inputIDType==None or outputIDType==None:
print 'Please designate an input ID type and and output ID type (--inputIDType Ensembl --outputIDType Symbol)'; sys.exit()
if species==None:
print "Please enter a valide species (--species)"; sys.exit()
UI.IDconverter(input_file_dir, species, inputIDType, outputIDType, None)
sys.exit()
if 'hierarchical' in image_export:
#python AltAnalyze.py --input "/Users/test/pluri.txt" --image hierarchical --row_method average --column_method single --row_metric cosine --column_metric euclidean --color_gradient red_white_blue --transpose False --PathwaySelection Apoptosis:WP254 --GeneSetSelection WikiPathways --species Hs --platform exon --display false
if input_file_dir==None:
print 'Please provide a valid file location for your input data matrix (must have an annotation row and an annotation column)';sys.exit()
row_method = 'weighted'
column_method = 'average'
row_metric = 'cosine'
column_metric = 'cosine'
color_gradient = 'red_black_sky'
contrast=2.5
vendor = 'Affymetrix'
GeneSelection = ''
PathwaySelection = ''
GeneSetSelection = 'None Selected'
rho = None
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--row_method':
row_method=arg
if row_method == 'None': row_method = None
elif opt == '--column_method':
column_method=arg
if column_method == 'None': column_method = None
elif opt == '--row_metric': row_metric=arg
elif opt == '--column_metric': column_metric=arg
elif opt == '--color_gradient': color_gradient=arg
elif opt == '--GeneSetSelection': GeneSetSelection=arg
elif opt == '--PathwaySelection': PathwaySelection=arg
elif opt == '--genes': GeneSelection=arg
elif opt == '--OntologyID': OntologyID=arg
elif opt == '--normalization': normalization=arg
elif opt == '--justShowTheseIDs': justShowTheseIDs=arg
elif opt == '--rho': rho=arg
elif opt == '--clusterGOElite':clusterGOElite=arg
elif opt == '--contrast':
try: contrast=float(arg)
except Exception: print '--contrast not a valid float';sys.exit()
elif opt == '--vendor': vendor=arg
elif opt == '--display':
if arg=='yes':
display=True
elif arg=='True':
display=True
else:
display=False
if len(GeneSetSelection)>0 or GeneSelection != '':
gsp = UI.GeneSelectionParameters(species,array_type,vendor)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(GeneSelection)
gsp.setOntologyID(OntologyID)
gsp.setTranspose(transpose)
gsp.setNormalize(normalization)
gsp.setJustShowTheseIDs(justShowTheseIDs)
try: gsp.setClusterGOElite(clusterGOElite)
except Exception: pass
if rho!=None:
try:
float(rho)
gsp.setRhoCutoff(rho)
except Exception: print 'Must enter a valid Pearson correlation cutoff (float)'
transpose = gsp ### this allows methods that don't transmit this object to also work
if row_method == 'no': row_method = None
if column_method == 'no': column_method = None
if len(GeneSetSelection)>0:
if species == None:
print "Please enter a valide species (--species)"; sys.exit()
try:
files = unique.read_directory(input_file_dir+'/')
dir = input_file_dir
for file in files:
filename = dir+'/'+file
UI.createHeatMap(filename, row_method, row_metric, column_method, column_metric, color_gradient, transpose, contrast, None, display=display)
except Exception:
UI.createHeatMap(input_file_dir, row_method, row_metric, column_method, column_metric, color_gradient, transpose, contrast, None, display=display)
#import clustering; clustering.outputClusters([input_file_dir],[])
sys.exit()
if 'PCA' in image_export:
#AltAnalyze.py --input "/Users/nsalomonis/Desktop/folds.txt" --image PCA --plotType 3D --display True --labels yes
#--algorithm "t-SNE"
include_labels = 'yes'
plotType = '2D'
pca_algorithm = 'SVD'
geneSetName = None
zscore = True
colorByGene=None
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--labels':
include_labels=arg
if include_labels == 'True' or include_labels == 'yes':
include_labels = 'yes'
else:
include_labels = 'no'
if opt == '--plotType': plotType=arg
if opt == '--algorithm': pca_algorithm=arg
if opt == '--geneSetName': geneSetName=arg
if opt == '--genes': colorByGene=arg
if opt == '--zscore':
if arg=='yes' or arg=='True' or arg == 'true':
zscore=True
else:
zscore=False
if opt == '--display':
if arg=='yes' or arg=='True' or arg == 'true':
display=True
if input_file_dir==None:
print 'Please provide a valid file location for your input data matrix (must have an annotation row and an annotation column)';sys.exit()
UI.performPCA(input_file_dir, include_labels, pca_algorithm, transpose, None,
plotType=plotType, display=display, geneSetName=geneSetName, species=species, zscore=zscore, colorByGene=colorByGene)
sys.exit()
if 'VennDiagram' in image_export:
# AltAnalyze.py --image "VennDiagram" --input "C:\file1.txt" --input "C:\file2.txt" --output "C:\graphs"
files_to_merge=[]
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--input':
arg = verifyPath(arg)
files_to_merge.append(arg)
if opt == '--display':
if arg=='yes' or arg=='True' or arg == 'true':
display=True
if len(files_to_merge)<2:
print 'Please designate two or more files to compare (--input)';sys.exit()
UI.vennDiagram(files_to_merge, output_dir, None, display=display)
sys.exit()
if 'AltExonViewer' in image_export:
#python AltAnalyze.py --image AltExonViewer --AltResultsDir "C:\CP-hESC" --genes "ANXA7 FYN TCF3 NAV2 ETS2 MYLK ATP2A2" --species Hs --platform exon --dataType "splicing-index"
genes=[]
show_introns='no'
geneFileDir=''
analysisType='plot'
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--genes':genes=arg
elif opt == '--dataType': data_type = arg
elif opt == '--showIntrons': show_introns = arg
elif opt == '--AltResultsDir': altresult_dir = arg
elif opt == '--geneFileDir': geneFileDir = arg
elif opt == '--analysisType': analysisType=arg
if altresult_dir == None:
print 'Please include the location of the AltResults directory (--AltResultsDir)'; sys.exit()
if len(genes)==0 and len(geneFileDir)==0:
print "Please indicate the genes (--genes) or gene file location (--geneFileDir) for AltExonViewer";sys.exit()
if species == None:
print "Please enter a valide species (--species)"; sys.exit()
if array_type == None:
print "Please enter a valide platform (--platform)"; sys.exit()
if 'AltResults' not in altresult_dir:
altresult_dir+='/AltResults/'
if 'Sashimi' in analysisType:
#python AltAnalyze.py --image AltExonViewer --AltResultsDir "/Users/saljh8/Desktop/Grimes/GEC14074/AltResults/" --genes "Dgat1 Dgat2 Tcf7l1" --species Mm --platform RNASeq --analysisType SashimiPlot
analysisType = 'Sashimi-Plot'
altresult_dir = string.split(altresult_dir,'AltResults')[0]
if len(geneFileDir)>0: genes = geneFileDir
geneFileDir=''
elif 'raw' in data_type: ### Switch directories if expression
altanalyze_results_folder = string.replace(altresult_dir,'AltResults','ExpressionInput')
altresult_dir = UI.getValidExpFile(altanalyze_results_folder)
if len(altresult_dir)==0:
print 'No valid expression input file (e.g., exp.MyExperiment.txt) found in',altanalyze_results_folder;sys.exit()
else:
altanalyze_results_folder = altresult_dir+'/RawSpliceData/'+species
try: altresult_dir = UI.getValidSplicingScoreFile(altanalyze_results_folder)
except Exception,e:
print "No files found in: "+altanalyze_results_folder; sys.exit()
if len(geneFileDir)>0:
try:
genes = UI.importGeneList(geneFileDir) ### list of gene IDs or symbols
except Exception:
### Can occur if a directory of files is selected
try:
files = unique.read_directory(geneFileDir+'/')
gene_string=''
for file in files:
if '.txt' in file:
filename = geneFileDir+'/'+file
genes = UI.importGeneList(filename) ### list of gene IDs or symbols
gene_string = gene_string+','+genes
print 'Imported genes from',file,'\n'
#print [altresult_dir];sys.exit()
UI.altExonViewer(species,platform,altresult_dir, gene_string, show_introns, analysisType, False)
except Exception: pass
sys.exit()
if len(genes)==0:
print 'Please list one or more genes (--genes "ANXA7 FYN TCF3 NAV2 ETS2 MYLK ATP2A2")'; sys.exit()
try: UI.altExonViewer(species,platform,altresult_dir, genes, show_introns, analysisType, False)
except Exception:
print traceback.format_exc()
sys.exit()
if 'network' in image_export:
#AltAnalyze.py --image network --species Hs --output "C:\GSE9440_RAW" --PathwaySelection Apoptosis:WP254 --GeneSetSelection WikiPathways
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--update_interactions': update_interactions=arg
elif opt == '--includeExpIDs': includeExpIDs=arg
elif opt == '--degrees': degrees=arg
elif opt == '--genes':
Genes=arg
inputType = 'IDs'
elif opt == '--inputType': inputType=arg
elif opt == '--interactionDirs': interactionDirs.append(arg)
elif opt == '--GeneSetSelection': GeneSetSelection=arg
elif opt == '--PathwaySelection': PathwaySelection=arg
elif opt == '--OntologyID': OntologyID=arg
elif opt == '--display': display=arg
if update_interactions == 'yes': update_interactions = True
else: update_interactions = False
if input_file_dir == None: pass
elif len(input_file_dir) == 0: input_file_dir = None
if len(input_exp_file) == 0: input_exp_file = None
if len(interactionDirs) == 0: interactionDirs=['WikiPathways']
if interactionDirs == ['all']:
interactionDirs = ['WikiPathways','KEGG','BioGRID','TFTargets','common-microRNATargets','all-microRNATargets','common-DrugBank','all-DrugBank']
if interactionDirs == ['main']:
interactionDirs = ['WikiPathways','KEGG','BioGRID','TFTargets']
if interactionDirs == ['confident']:
interactionDirs = ['WikiPathways','KEGG','TFTargets']
if len(Genes) == 0: Genes = None
if output_dir == None: pass
elif len(output_dir) == 0: output_dir = None
if len(GeneSetSelection) == 'None Selected': GeneSetSelection = None
if includeExpIDs=='yes': includeExpIDs = True
else: includeExpIDs = False
gsp = UI.GeneSelectionParameters(species,array_type,manufacturer)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(Genes)
gsp.setOntologyID(OntologyID)
gsp.setIncludeExpIDs(includeExpIDs)
root = ''
if species == None:
print 'Please designate a species (--species).'; sys.exit()
if output_dir == None:
print 'Please designate an ouput directory (--output)'; sys.exit()
if input_file_dir !=None:
if '.txt' in input_file_dir or '.sif' in input_file_dir:
UI.networkBuilder(input_file_dir,inputType,output_dir,interactionDirs,degrees,input_exp_file,gsp,root)
else:
parent_dir = input_file_dir
dir_list = read_directory(parent_dir)
for file in dir_list:
input_file_dir = parent_dir+'/'+file
try:
UI.networkBuilder(input_file_dir,inputType,output_dir,interactionDirs,degrees,input_exp_file,gsp,root)
except Exception:
print file, 'failed to produce network'
else:
UI.networkBuilder(None,inputType,output_dir,interactionDirs,degrees,input_exp_file,gsp,root)
sys.exit()
########## Begin database dependent AltAnalyze workflows
if ensembl_version != 'current' and 'markers' not in update_method:
dbversion = string.replace(ensembl_version,'EnsMart','')
UI.exportDBversion('EnsMart'+dbversion)
gene_database = unique.getCurrentGeneDatabaseVersion()
print 'Current database version:',gene_database
if array_type == None and update_dbs != 'yes' and denom_file_dir == None:
print "Please specify an array or data type (e.g., RNASeq, exon, gene, junction, AltMouse, 3'array)."; sys.exit()
if 'archive' in update_method:
###
print 'Archiving databases', ensembl_version
try: archive_dir = 'ArchiveDBs/EnsMart'+ensembl_version+'/archive'; export.createDirPath(filepath(archive_dir))
except Exception: null = [] ### directory already exists
dirs = unique.read_directory('/ArchiveDBs/EnsMart'+ensembl_version)
print len(dirs), dirs
import shutil
for species_dir in dirs:
try:
#print '/ArchiveDBs/EnsMart'+ensembl_version+'/'+species_dir+'/'+species_dir+'_RNASeq.zip'
src = filepath('ArchiveDBs/EnsMart'+ensembl_version+'/'+species_dir+'/'+species_dir+'_RNASeq.zip')
dstn = filepath('ArchiveDBs/EnsMart'+ensembl_version+'/archive/'+species_dir+'_RNASeq.zip')
#export.copyFile(src, dstn)
shutil.move(src, dstn)
try:
srcj = string.replace(src,'RNASeq.','junction.'); dstnj = string.replace(dstn,'RNASeq.','junction.')
shutil.move(srcj, dstnj)
except Exception: null=[]
try:
src = string.replace(src,'_RNASeq.','.'); dstn = string.replace(dstn,'_RNASeq.','.')
shutil.move(src, dstn)
except Exception: null=[]
except Exception: null=[]
sys.exit()
if update_dbs == 'yes' and 'Official' not in update_method:
if 'cleanup' in update_method:
existing_species_dirs = unique.read_directory('/AltDatabase/ensembl')
print 'Deleting EnsemblSQL directory for all species, ensembl version',ensembl_version
for species in existing_species_dirs:
export.deleteFolder('AltDatabase/ensembl/'+species+'/EnsemblSQL')
existing_species_dirs = unique.read_directory('/AltDatabase')
print 'Deleting SequenceData directory for all species, ensembl version',ensembl_version
for species in existing_species_dirs:
export.deleteFolder('AltDatabase/'+species+'/SequenceData')
print 'Finished...exiting'
sys.exit()
if 'package' not in update_method and 'markers' not in update_method:
### Example:
### python AltAnalyze.py --species all --arraytype all --update all --version 60
### tr -d \\r < AltAnalyze.py > AltAnalyze_new.py
### chmod +x AltAnalyze_new.py
### nohup ./AltAnalyze.py --update all --species Mm --arraytype gene --arraytype exon --version 60 2>&1 > nohup_v60_Mm.txt
if array_type == 'all' and (species == 'Mm' or species == 'all'): array_type = ['AltMouse','exon','gene','junction','RNASeq']
elif array_type == 'all' and (species == 'Hs' or species == 'Rn'): array_type = ['exon','gene','junction','RNASeq']
else: array_type = [array_type]+additional_array_types
if species == 'all' and 'RNASeq' not in array_type: species = selected_species ### just analyze the species for which multiple platforms are supported
if species == 'selected': species = selected_species ### just analyze the species for which multiple platforms are supported
elif species == 'all':
all_supported_names = {}; all_species_names={}
species_names = UI.getSpeciesInfo()
for species in species_names: all_supported_names[species_names[species]]=species
import EnsemblSQL
child_dirs, ensembl_species, ensembl_versions = EnsemblSQL.getCurrentEnsemblSpecies('release-'+ensembl_version)
for ens_species in ensembl_species:
ens_species = string.replace(ens_species,'_',' ')
if ens_species in all_supported_names:
all_species_names[all_supported_names[ens_species]]=[]
del all_species_names['Hs']
del all_species_names['Mm']
del all_species_names['Rn']
"""
del all_species_names['Go']
del all_species_names['Bt']
del all_species_names['Sc']
del all_species_names['Ss']
del all_species_names['Pv']
del all_species_names['Pt']
del all_species_names['La']
del all_species_names['Tt']
del all_species_names['Tr']
del all_species_names['Ts']
del all_species_names['Pb']
del all_species_names['Pc']
del all_species_names['Ec']
del all_species_names['Tb']
del all_species_names['Tg']
del all_species_names['Dn']
del all_species_names['Do']
del all_species_names['Tn']
del all_species_names['Dm']
del all_species_names['Oc']
del all_species_names['Og']
del all_species_names['Fc']
del all_species_names['Dr']
del all_species_names['Me']
del all_species_names['Cp']
del all_species_names['Tt']
del all_species_names['La']
del all_species_names['Tr']
del all_species_names['Ts']
del all_species_names['Et'] ### No alternative isoforms?
del all_species_names['Pc']
del all_species_names['Tb']
del all_species_names['Fc']
del all_species_names['Sc']
del all_species_names['Do']
del all_species_names['Dn']
del all_species_names['Og']
del all_species_names['Ga']
del all_species_names['Me']
del all_species_names['Ml']
del all_species_names['Mi']
del all_species_names['St']
del all_species_names['Sa']
del all_species_names['Cs']
del all_species_names['Vp']
del all_species_names['Ch']
del all_species_names['Ee']
del all_species_names['Ac']"""
sx=[]; all_species_names2=[] ### Ensure that the core selected species are run first
for species in selected_species:
if species in all_species_names: sx.append(species)
for species in all_species_names:
if species not in selected_species: all_species_names2.append(species)
all_species_names = sx+all_species_names2
species = all_species_names
else: species = [species]
update_uniprot='no'; update_ensembl='no'; update_probeset_to_ensembl='no'; update_domain='no'; update_miRs = 'no'; genomic_build = 'new'; update_miR_seq = 'yes'
if 'all' in update_method:
update_uniprot='yes'; update_ensembl='yes'; update_probeset_to_ensembl='yes'; update_domain='yes'; update_miRs = 'yes'
if 'UniProt' in update_method: update_uniprot = 'yes'
if 'Ensembl' in update_method: update_ensembl = 'yes'
if 'Probeset' in update_method or 'ExonAnnotations' in update_method: update_probeset_to_ensembl = 'yes'
if 'Domain' in update_method:
update_domain = 'yes'
try: from Bio import Entrez #test this
except Exception: print 'The dependent module Bio is not installed or not accessible through the default python interpretter. Existing AltAnalyze.'; sys.exit()
if 'miRBs' in update_method or 'miRBS' in update_method: update_miRs = 'yes'
if 'NewGenomeBuild' in update_method: genomic_build = 'new'
if 'current' in ensembl_version: print "Please specify an Ensembl version number (e.g., 60) before proceeding with the update.";sys.exit()
try: force = force ### Variable is not declared otherwise
except Exception: force = 'yes'; print 'force:',force
existing_species_dirs={}
update_all = 'no' ### We don't pass this as yes, in order to skip certain steps when multiple array types are analyzed (others are specified above)
try: print "Updating AltDatabase the following array_types",string.join(array_type),"for the species",string.join(species)
except Exception: print 'Please designate a valid platform/array_type (e.g., exon) and species code (e.g., Mm).'
for specific_species in species:
for platform_name in array_type:
if platform_name == 'AltMouse' and specific_species == 'Mm': proceed = 'yes'
elif platform_name == 'exon' or platform_name == 'gene':
import ExonArrayEnsemblRules
#### Check to see if the probeset.csv file is present
#try: probeset_transcript_file = ExonArrayEnsemblRules.getDirectoryFiles('/AltDatabase/'+specific_species+'/'+platform_name)
#except Exception: print "Affymetrix probeset.csv anotation file is not found. You must save this to",'/AltDatabase/'+specific_species+'/'+platform_name,'before updating (unzipped).'; sys.exit()
proceed = 'yes'
elif platform_name == 'junction' and (specific_species == 'Hs' or specific_species == 'Mm'): proceed = 'yes'
elif platform_name == 'RNASeq': proceed = 'yes'
else: proceed = 'no'
if proceed == 'yes':
print "Analyzing", specific_species, platform_name
if (platform_name != array_type[0]) and len(species)==1:
update_uniprot = 'no'; update_ensembl = 'no'; update_miR_seq = 'no' ### Don't need to do this twice in a row
print 'Skipping ensembl, uniprot and mir-sequence file import updates since already completed for this species',array_type,platform_name
if ignore_built_species == 'yes': ### Useful for when building all species for a new database build
existing_species_dirs = unique.read_directory('/AltDatabase/ensembl') ### call this here to update with every species - if running multiple instances
if specific_array_type != None and specific_array_type != platform_name: platform_name+='|'+specific_array_type ### For the hGlue vs. JAY arrays
if specific_species not in existing_species_dirs: ### Useful when running multiple instances of AltAnalyze to build all species
print 'update_ensembl',update_ensembl
print 'update_uniprot',update_uniprot
print 'update_probeset_to_ensembl',update_probeset_to_ensembl
print 'update_domain',update_domain
print 'update_miRs',update_miRs
update.executeParameters(specific_species,platform_name,force,genomic_build,update_uniprot,update_ensembl,update_probeset_to_ensembl,update_domain,update_miRs,update_all,update_miR_seq,ensembl_version)
else: print 'ignoring',specific_species
sys.exit()
if 'package' in update_method:
### Example: python AltAnalyze.py --update package --species all --platform all --version 65
if ensembl_version == 'current': print '\nPlease specify version of the database to package (e.g., --version 60).'; sys.exit()
ensembl_version = 'EnsMart'+ensembl_version
### Get all possible species
species_names = UI.getSpeciesInfo(); possible_species={}
possible_species = species_names
possible_arrays = ['exon','gene','junction','AltMouse','RNASeq']
try:
if species == 'all': possible_species = possible_species
elif species == 'selected': possible_species = selected_species
else: possible_species = [species]
except Exception: species = possible_species
if array_type == None or array_type == 'all': possible_arrays = possible_arrays
else: possible_arrays = [array_type]+additional_array_types
species_to_package={}
dirs = unique.read_directory('/AltDatabase/'+ensembl_version)
#print possible_arrays, possible_species; sys.exit()
for species_code in dirs:
if species_code in possible_species:
array_types = unique.read_directory('/AltDatabase/'+ensembl_version+'/'+species_code)
for arraytype in array_types:
if arraytype in possible_arrays:
if species_code in possible_species:
array_types = unique.read_directory('/AltDatabase/'+ensembl_version+'/'+species_code)
try: species_to_package[species_code].append(arraytype)
except Exception: species_to_package[species_code] = [arraytype]
species_to_package = eliminate_redundant_dict_values(species_to_package)
for species in species_to_package:
files_to_copy =[species+'_Ensembl_domain_aligning_probesets.txt']
files_to_copy+=[species+'_Ensembl_indirect_domain_aligning_probesets.txt']
files_to_copy+=[species+'_Ensembl_probesets.txt']
files_to_copy+=[species+'_Ensembl_exons.txt']
#files_to_copy+=[species+'_Ensembl_junctions.txt']
files_to_copy+=[species+'_exon_core.mps']
files_to_copy+=[species+'_exon_extended.mps']
files_to_copy+=[species+'_exon_full.mps']
files_to_copy+=[species+'_gene_core.mps']
files_to_copy+=[species+'_gene_extended.mps']
files_to_copy+=[species+'_gene_full.mps']
files_to_copy+=[species+'_gene-exon_probesets.txt']
files_to_copy+=[species+'_probes_to_remove.txt']
files_to_copy+=[species+'_probeset-probes.txt']
files_to_copy+=[species+'_probeset_microRNAs_any.txt']
files_to_copy+=[species+'_probeset_microRNAs_multiple.txt']
files_to_copy+=['probeset-domain-annotations-exoncomp.txt']
files_to_copy+=['probeset-protein-annotations-exoncomp.txt']
#files_to_copy+=['probeset-protein-dbase_exoncomp.txt']
files_to_copy+=['SEQUENCE-protein-dbase_exoncomp.txt']
files_to_copy+=[species+'_Ensembl_junction_probesets.txt']
files_to_copy+=[species+'_Ensembl_AltMouse_probesets.txt']
files_to_copy+=[species+'_RNASeq-exon_probesets.txt']
files_to_copy+=[species+'_junction-exon_probesets.txt']
files_to_copy+=[species+'_junction_all.mps']
files_to_copy+=['platform.txt'] ### Indicates the specific platform for an array type (e.g., HJAY for junction or hGlue for junction)
files_to_copy+=[species+'_junction_comps_updated.txt']
files_to_copy+=['MASTER-probeset-transcript.txt']
files_to_copy+=['AltMouse-Ensembl.txt']
files_to_copy+=['AltMouse_junction-comparisons.txt']
files_to_copy+=['AltMouse_gene_annotations.txt']
files_to_copy+=['AltMouse_annotations.txt']
common_to_copy =['uniprot/'+species+'/custom_annotations.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl-annotations_simple.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl-annotations.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_microRNA-Ensembl.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl_transcript-biotypes.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl_transcript-annotations.txt']
common_to_copy+= searchDirectory("AltDatabase/ensembl/"+species+"/",'Ensembl_Protein')
common_to_copy+= searchDirectory("AltDatabase/ensembl/"+species+"/",'ProteinFeatures')
common_to_copy+= searchDirectory("AltDatabase/ensembl/"+species+"/",'ProteinCoordinates')
common_to_copy+= searchDirectory("AltDatabase/uniprot/"+species+"/",'FeatureCoordinate')
supported_arrays_present = 'no'
for arraytype in selected_platforms:
if arraytype in species_to_package[species]: supported_arrays_present = 'yes' #Hence a non-RNASeq platform is present
if supported_arrays_present == 'yes':
for file in common_to_copy:
ir = 'AltDatabase/'+ensembl_version+'/'
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version+'/'
export.copyFile(ir+file, er+file)
if 'RNASeq' in species_to_package[species]:
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl_junction.txt']
common_to_copy+=['ensembl/'+species+'/'+species+'_Ensembl_exon.txt']
for file in common_to_copy:
ir = 'AltDatabase/'+ensembl_version+'/'
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version+'/'
if species in selected_species:
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/RNASeq/'+ensembl_version+'/' ### This allows us to build the package archive in a separate directory for selected species, so separate but overlapping content can be packaged
export.copyFile(ir+file, er+file)
for array_type in species_to_package[species]:
ir = 'AltDatabase/'+ensembl_version+'/'+species+'/'+array_type+'/'
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version+'/'+species+'/'+array_type+'/'
if array_type == 'junction':
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+array_type+'/'
if array_type == 'RNASeq' and species in selected_species:
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/RNASeq/'+ensembl_version+'/'+species+'/'+array_type+'/'
for file in files_to_copy:
if array_type == 'RNASeq': file=string.replace(file,'_updated.txt','.txt')
filt_file = string.replace(file ,'.txt','-filtered.txt')
try: export.copyFile(ir+filt_file, er+filt_file); export_path = er+filt_file
except Exception:
try: export.copyFile(ir+file, er+file); export_path = er+file
except Exception: null = [] ### File not found in directory
if len(export_path)>0:
if 'AltMouse' in export_path or 'probes_' in export_path:
export.cleanFile(export_path)
if array_type == 'junction':
subdir = '/exon/'
ir = 'AltDatabase/'+ensembl_version+'/'+species+'/'+array_type+subdir
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+array_type+subdir
for file in files_to_copy:
export_path=[]
filt_file = string.replace(file ,'.txt','-filtered.txt')
try: export.copyFile(ir+filt_file, er+filt_file); export_path = er+filt_file
except Exception:
try: export.copyFile(ir+file, er+file); export_path = er+file
except Exception: null = [] ### File not found in directory
if array_type == 'RNASeq':
subdir = '/junction/'
ir = 'AltDatabase/'+ensembl_version+'/'+species+'/'+array_type+subdir
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version+'/'+species+'/'+array_type+subdir
if species in selected_species:
er = 'ArchiveDBs/'+ensembl_version+'/'+species+'/RNASeq/'+ensembl_version+'/'+species+'/'+array_type+subdir
for file in files_to_copy:
if 'SEQUENCE-protein-dbase' not in file and 'domain_aligning' not in file: ### This data is now combined into the main file
export_path=[]
filt_file = string.replace(file ,'.txt','-filtered.txt')
try: export.copyFile(ir+filt_file, er+filt_file); export_path = er+filt_file
except Exception:
try: export.copyFile(ir+file, er+file); export_path = er+file
except Exception: null = [] ### File not found in directory
if 'RNASeq' in species_to_package[species]:
src = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version
dst = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+species+'_RNASeq.zip'
if species in selected_species:
src = 'ArchiveDBs/'+ensembl_version+'/'+species+'/RNASeq/'+ensembl_version
update.zipDirectory(src); print 'Zipping',species, array_type, dst
os.rename(src+'.zip', dst)
if supported_arrays_present == 'yes':
src = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+ensembl_version
dst = 'ArchiveDBs/'+ensembl_version+'/'+species+'/'+species+'.zip'
update.zipDirectory(src); print 'Zipping',species, array_type, dst
os.rename(src+'.zip', dst)
if 'junction' in species_to_package[species]:
src = 'ArchiveDBs/'+ensembl_version+'/'+species+'/junction'
dst = string.replace(src,'junction',species+'_junction.zip')
update.zipDirectory(src); print 'Zipping',species+'_junction'
os.rename(src+'.zip', dst)
sys.exit()
if 'markers' in update_method:
if species == None or platform == None:
print "WARNING! A species and platform (e.g., exon, junction, 3'array or RNASeq) must be defined to identify markers.";sys.exit()
elif input_exp_file == '':
print "WARNING! A input expression file must be supplied (e.g., ExpressionOutput/DATASET.YourExperimentName.txt) for this analysis.";sys.exit()
else:
#python AltAnalyze.py --update markers --platform gene --expdir "/home/socr/c/users2/salomoni/other/boxer/normalization/Mm_Gene-TissueAtlas/ExpressionInput/exp.meta.txt"
#python AltAnalyze.py --update markers --platform gene --expdir "/home/socr/c/users2/salomoni/other/boxer/normalization/Mm_Gene-TissueAtlas/AltResults/RawSpliceData/Mm/splicing-index/meta.txt"
#python AltAnalyze.py --update markers --platform "3'array" --expdir "/home/socr/c/users2/salomoni/other/boxer/normalization/U133/ExpressionOutput/DATASET-meta.txt"
#python AltAnalyze.py --update markers --compendiumType ncRNA --platform "exon" --expdir "/home/socr/c/users2/salomoni/conklin/nsalomonis/normalization/Hs_Exon-TissueAtlas/ExpressionOutput/DATASET-meta.txt"
#python AltAnalyze.py --update markers --platform RNASeq --species Mm --geneRPKM 1 --expdir /Users/saljh8/Desktop/Grimes/MergedRSEM/DN-Analysis/ExpressionInput/exp.DN.txt --genesToReport 200
"""The markerFinder module:
1) takes an input ExpressionOutput file (DATASET.YourExperimentName.txt)
2) extracts group average expression and saves to AVERAGE.YourExperimentName.txt to the ExpressionOutput directory
3) re-imports AVERAGE.YourExperimentName.txt
4) correlates the average expression of each gene to an idealized profile to derive a Pearson correlation coefficient
5) identifies optimal markers based on these correlations for each tissue
6) exports an expression file with just these marker genes and tissues
This module can peform these analyses on protein coding or ncRNAs and can segregate the cell/tissue groups into clusters
when a group notation is present in the sample name (e.g., 0~Heart, 0~Brain, 1~Stem Cell)"""
import markerFinder
if 'AltResults' in input_exp_file and 'Clustering' not in input_exp_file:
### This applies to a file compoosed of exon-level normalized intensities (calculae average group expression)
markerFinder.getAverageExonExpression(species,platform,input_exp_file)
if 'Raw' in input_exp_file:
group_exp_file = string.replace(input_exp_file,'Raw','AVERAGE')
else:
group_exp_file = string.replace(input_exp_file,'FullDatasets','AVERAGE-FullDatasets')
altexon_correlation_file = markerFinder.analyzeData(group_exp_file,species,platform,compendiumType,geneToReport=genesToReport,correlateAll=correlateAll,AdditionalParameters=fl)
markerFinder.getExprValsForNICorrelations(platform,altexon_correlation_file,group_exp_file)
else:
### This applies to an ExpressionOutput DATASET file compoosed of gene expression values (averages already present)
import collections
try: test_ordereddict=collections.OrderedDict()
except Exception:
try: import ordereddict
except Exception:
### This is needed to re-order the average file so that the groups are sequentially ordered when analyzing clustered groups (0~)
print 'Warning!!!! To run markerFinder correctly call python version 2.7x or greater (python 3.x not supported)'
print 'Requires ordereddict (also can install the library ordereddict). To call 2.7: /usr/bin/python2.7'
sys.exit()
try:
output_dir = markerFinder.getAverageExpressionValues(input_exp_file,platform) ### Either way, make an average annotated file from the DATASET file
if 'DATASET' in input_exp_file:
group_exp_file = string.replace(input_exp_file,'DATASET','AVERAGE')
else:
group_exp_file = (input_exp_file,output_dir) ### still analyze the primary sample
except Exception:
### Work around when performing this analysis on an alternative exon input cluster file
group_exp_file = input_exp_file
fl = UI.ExpressionFileLocationData(input_exp_file,'','',''); fl.setOutputDir(export.findParentDir(export.findParentDir(input_exp_file)[:-1]))
try: fl.setSpecies(species); fl.setVendor(vendor)
except Exception: pass
try:
rpkm_threshold = float(rpkm_threshold) ### If supplied, for any platform, use it
fl.setRPKMThreshold(rpkm_threshold)
except Exception: pass
if platform=='RNASeq':
try: rpkm_threshold = float(rpkm_threshold)
except Exception: rpkm_threshold = 1.0
fl.setRPKMThreshold(rpkm_threshold)
try: correlationDirection = direction ### correlate to a positive or inverse negative in silico artificial pattern
except Exception: correlationDirection = 'up'
fl.setCorrelationDirection(correlationDirection)
if expression_data_format == 'non-log': logTransform = True
else: logTransform = False
if 'topSplice' in input_exp_file:
markerFinder.filterRNASeqSpliceEvents(species,platform,fl,input_exp_file)
sys.exit()
if 'stats.' in input_exp_file:
markerFinder.filterDetectionPvalues(species,platform,fl,input_exp_file)
sys.exit()
else:
markerFinder.analyzeData(group_exp_file,species,platform,compendiumType,geneToReport=genesToReport,correlateAll=correlateAll,AdditionalParameters=fl,logTransform=logTransform)
try: fl.setVendor(manufacturer)
except Exception:
print '--vendor not indicated by user... assuming Affymetrix'
fl.setVendor('Affymetrix')
try: markerFinder.generateMarkerHeatMaps(fl,array_type,convertNonLogToLog=logTransform,Species=species)
except Exception: print traceback.format_exc()
print 'Cell/Tissue marker classification analysis finished';sys.exit()
if 'EnsMart' in ensembl_version:
UI.exportDBversion(ensembl_version)
annotation_found = verifyFile(input_annotation_file)
proceed = 'no'
if 'Official' not in update_method and denom_file_dir == None: ### If running GO-Elite independent of AltAnalyze (see below GO_Elite call)
try:
time_stamp = timestamp()
if len(cel_file_dir)>0:
if output_dir == None:
output_dir = cel_file_dir
print "Setting output directory to the input path:", output_dir
if output_dir == None and input_filtered_dir>0:
output_dir = input_filtered_dir
if '/' == output_dir[-1] or '\\' in output_dir[-2]: null=[]
else: output_dir +='/'
log_file = filepath(output_dir+'AltAnalyze_report-'+time_stamp+'.log')
log_report = open(log_file,'w'); log_report.close()
sys.stdout = Logger('')
except Exception,e:
print e
print 'Please designate an output directory before proceeding (e.g., --output "C:\RNASeq)';sys.exit()
if mappedExonAnalysis:
array_type = 'RNASeq' ### Although this is not the actual platform, the resulting data will be treated as RNA-Seq with parameters most suitable for arrays
if len(external_annotation_dir)>0:
run_from_scratch = 'Annotate External Results'
if channel_to_extract != None:
run_from_scratch = 'Process Feature Extraction files' ### Agilent Feature Extraction files as input for normalization
manufacturer = 'Agilent'
constitutive_source = 'Agilent'
expression_threshold = 'NA'
perform_alt_analysis = 'NA'
if len(input_filtered_dir)>0:
run_from_scratch ='Process AltAnalyze filtered'; proceed='yes'
if len(input_exp_file)>0:
run_from_scratch = 'Process Expression file'; proceed='yes'
input_exp_file = string.replace(input_exp_file,'\\','/') ### Windows convention is \ rather than /, but works with /
ief_list = string.split(input_exp_file,'/')
if len(output_dir)>0: parent_dir = output_dir
else: parent_dir = string.join(ief_list[:-1],'/')
exp_name = ief_list[-1]
if len(cel_file_dir)>0 or runKallisto == True:
# python AltAnalyze.py --species Mm --platform RNASeq --runKallisto yes --expname test
if exp_name == None:
print "No experiment name defined. Please sumbit a name (e.g., --expname CancerComp) before proceeding."; sys.exit()
else:
dataset_name = 'exp.'+exp_name+'.txt'; exp_file_dir = filepath(output_dir+'/ExpressionInput/'+dataset_name)
if runKallisto:
run_from_scratch == 'Process RNA-seq reads'
elif run_from_scratch!= 'Process Feature Extraction files':
run_from_scratch = 'Process CEL files'; proceed='yes'
if array_type == 'RNASeq': file_ext = '.BED'
else: file_ext = '.CEL'
try: cel_files,cel_files_fn = UI.identifyCELfiles(cel_file_dir,array_type,manufacturer)
except Exception,e:
print e
if mappedExonAnalysis: pass
else: print "No",file_ext,"files found in the directory:",cel_file_dir;sys.exit()
if array_type != 'RNASeq': cel_file_list_dir = UI.exportCELFileList(cel_files_fn,cel_file_dir)
if groups_file != None and comps_file != None:
try: export.copyFile(groups_file, string.replace(exp_file_dir,'exp.','groups.'))
except Exception: print 'Groups file already present in target location OR bad input path.'
try: export.copyFile(comps_file, string.replace(exp_file_dir,'exp.','comps.'))
except Exception: print 'Comparison file already present in target location OR bad input path.'
groups_file = string.replace(exp_file_dir,'exp.','groups.')
comps_file = string.replace(exp_file_dir,'exp.','comps.')
if verifyGroupFileFormat(groups_file) == False:
print "\nWarning! The format of your groups file is not correct. For details, see:\nhttp://code.google.com/p/altanalyze/wiki/ManualGroupsCompsCreation\n"
sys.exit()
if array_type != 'RNASeq' and manufacturer!= 'Agilent':
"""Determine if Library and Annotations for the array exist, if not, download or prompt for selection"""
try:
### For the HGLUE and HJAY arrays, this step is critical in order to have the commond-line AltAnalyze downloadthe appropriate junction database (determined from specific_array_type)
specific_array_types,specific_array_type = UI.identifyArrayType(cel_files_fn)
num_array_types = len(specific_array_types)
except Exception:
null=[]; num_array_types=1; specific_array_type=None
if array_type == 'exon':
if species == 'Hs': specific_array_type = 'HuEx-1_0-st-v2'
if species == 'Mm': specific_array_type = 'MoEx-1_0-st-v2'
if species == 'Rn': specific_array_type = 'RaEx-1_0-st-v2'
elif array_type == 'gene':
if species == 'Hs': specific_array_type = 'HuGene-1_0-st-v1'
if species == 'Mm': specific_array_type = 'MoGene-1_0-st-v1'
if species == 'Rn': specific_array_type = 'RaGene-1_0-st-v1'
elif array_type == 'AltMouse': specific_array_type = 'altMouseA'
"""
elif array_type == 'junction':
if species == 'Mm': specific_array_type = 'MJAY'
if species == 'Hs': specific_array_type = 'HJAY'
"""
supproted_array_db = UI.importSupportedArrayInfo()
if specific_array_type in supproted_array_db and input_cdf_file == None and input_annotation_file == None:
sa = supproted_array_db[specific_array_type]; species = sa.Species(); array_type = sa.ArrayType()
input_cdf_file, input_annotation_file, bgp_file, clf_file = UI.getAffyFilesRemote(specific_array_type,array_type,species)
else: array_type = "3'array"
cdf_found = verifyFile(input_cdf_file)
annotation_found = verifyFile(input_annotation_file)
if input_cdf_file == None:
print [specific_array_type], 'not currently supported... Please provide CDF to AltAnalyze (commandline or GUI) or manually add to AltDatabase/affymetrix/LibraryFiles'; sys.exit()
if cdf_found != "found":
### Copy valid Library files to a local AltAnalyze database directory
input_cdf_file_lower = string.lower(input_cdf_file)
if array_type == "3'array":
if '.cdf' in input_cdf_file_lower:
clf_file='';bgp_file=''; assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
icf_list = string.split(input_cdf_file,'/'); cdf_short = icf_list[-1]
destination_parent = 'AltDatabase/affymetrix/LibraryFiles/'
destination_parent = osfilepath(destination_parent+cdf_short)
info_list = input_cdf_file,destination_parent; UI.StatusWindow(info_list,'copy')
else: print "Valid CDF file not found. Exiting program.";sys.exit()
else:
if '.pgf' in input_cdf_file_lower:
###Check to see if the clf and bgp files are present in this directory
icf_list = string.split(input_cdf_file,'/'); parent_dir = string.join(icf_list[:-1],'/'); cdf_short = icf_list[-1]
clf_short = string.replace(cdf_short,'.pgf','.clf')
kil_short = string.replace(cdf_short,'.pgf','.kil') ### Only applies to the Glue array
if array_type == 'exon' or array_type == 'junction': bgp_short = string.replace(cdf_short,'.pgf','.antigenomic.bgp')
else: bgp_short = string.replace(cdf_short,'.pgf','.bgp')
dir_list = read_directory(parent_dir)
if clf_short in dir_list and bgp_short in dir_list:
pgf_file = input_cdf_file
clf_file = string.replace(pgf_file,'.pgf','.clf')
kil_file = string.replace(pgf_file,'.pgf','.kil') ### Only applies to the Glue array
if array_type == 'exon' or array_type == 'junction': bgp_file = string.replace(pgf_file,'.pgf','.antigenomic.bgp')
else: bgp_file = string.replace(pgf_file,'.pgf','.bgp')
assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
destination_parent = 'AltDatabase/affymetrix/LibraryFiles/'
info_list = input_cdf_file,osfilepath(destination_parent+cdf_short); UI.StatusWindow(info_list,'copy')
info_list = clf_file,osfilepath(destination_parent+clf_short); UI.StatusWindow(info_list,'copy')
info_list = bgp_file,osfilepath(destination_parent+bgp_short); UI.StatusWindow(info_list,'copy')
if 'Glue' in pgf_file:
info_list = kil_file,osfilepath(destination_parent+kil_short); UI.StatusWindow(info_list,'copy')
if annotation_found != "found" and update_dbs == 'no' and array_type != 'RNASeq' and denom_file_dir == None and manufacturer != 'Agilent':
### Copy valid Annotation files to a local AltAnalyze database directory
try:
input_annotation_lower = string.lower(input_annotation_file)
if '.csv' in input_annotation_lower:
assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
icf_list = string.split(input_annotation_file,'/'); csv_short = icf_list[-1]
destination_parent = 'AltDatabase/affymetrix/'+species+'/'
info_list = input_annotation_file,filepath(destination_parent+csv_short); UI.StatusWindow(info_list,'copy')
except Exception: print "No Affymetrix annotation file provided. AltAnalyze will use any .csv annotations files in AltDatabase/Affymetrix/"+species
if 'Official' in update_method and species != None:
proceed = 'yes'
elif array_type != None and species != None:
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults(array_type,species)
ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, ORA_algorithm, resources_to_analyze, goelite_permutations, mod, returnPathways, NA = goelite_defaults
use_direct_domain_alignments_only,microRNA_prediction_method = functional_analysis_defaults
analysis_method, additional_algorithms, filter_probeset_types, analyze_all_conditions, p_threshold, alt_exon_fold_variable, additional_score, permute_p_threshold, gene_expression_cutoff, remove_intronic_junctions, perform_permutation_analysis, export_NI_values, run_MiDAS, calculate_normIntensity_p, filter_for_AS = alt_exon_defaults
dabg_p, rpkm_threshold, gene_exp_threshold, exon_exp_threshold, exon_rpkm_threshold, expression_threshold, perform_alt_analysis, analyze_as_groups, expression_data_format, normalize_feature_exp, normalize_gene_data, avg_all_for_ss, include_raw_data, probability_statistic, FDR_statistic, batch_effects, marker_finder, visualize_qc_results, run_lineage_profiler, null = expr_defaults
elif denom_file_dir != None and species != None:
proceed = 'yes' ### Only run GO-Elite
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults('RNASeq',species) ### platform not relevant
ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, ORA_algorithm, resources_to_analyze, goelite_permutations, mod, returnPathways, NA = goelite_defaults
else:
print 'No species defined. Please include the species code (e.g., "--species Hs") and array type (e.g., "--arraytype exon") before proceeding.'
print '\nAlso check the printed arguments above to see if there are formatting errors, such as bad quotes.'; sys.exit()
array_type_original = array_type
#if array_type == 'gene': array_type = "3'array"
for opt, arg in options:
if opt == '--runGOElite': run_GOElite=arg
elif opt == '--outputQCPlots': visualize_qc_results=arg
elif opt == '--runLineageProfiler': run_lineage_profiler=arg
elif opt == '--elitepermut': goelite_permutations=arg
elif opt == '--method': filter_method=arg
elif opt == '--zscore': z_threshold=arg
elif opt == '--elitepval': p_val_threshold=arg
elif opt == '--num': change_threshold=arg
elif opt == '--dataToAnalyze': resources_to_analyze=arg
elif opt == '--GEelitepval': ge_pvalue_cutoffs=arg
elif opt == '--GEelitefold': ge_fold_cutoffs=arg
elif opt == '--GEeliteptype': ge_ptype=arg
elif opt == '--ORAstat': ORA_algorithm=arg
elif opt == '--returnPathways': returnPathways=arg
elif opt == '--FDR': FDR_statistic=arg
elif opt == '--dabgp': dabg_p=arg
elif opt == '--rawexp': expression_threshold=arg
elif opt == '--geneRPKM': rpkm_threshold=arg
elif opt == '--exonRPKM': exon_rpkm_threshold=arg
elif opt == '--geneExp': gene_exp_threshold=arg
elif opt == '--exonExp': exon_exp_threshold=arg
elif opt == '--groupStat': probability_statistic=arg
elif opt == '--avgallss': avg_all_for_ss=arg
elif opt == '--logexp': expression_data_format=arg
elif opt == '--inclraw': include_raw_data=arg
elif opt == '--combat': batch_effects=arg
elif opt == '--runalt': perform_alt_analysis=arg
elif opt == '--altmethod': analysis_method=arg
elif opt == '--altp': p_threshold=arg
elif opt == '--probetype': filter_probeset_types=arg
elif opt == '--altscore': alt_exon_fold_variable=arg
elif opt == '--GEcutoff': gene_expression_cutoff=arg
elif opt == '--removeIntronOnlyJunctions': remove_intronic_junctions=arg
elif opt == '--normCounts': normalize_feature_exp=arg
elif opt == '--normMatrix': normalize_gene_data=arg
elif opt == '--altpermutep': permute_p_threshold=arg
elif opt == '--altpermute': perform_permutation_analysis=arg
elif opt == '--exportnormexp': export_NI_values=arg
elif opt == '--buildExonExportFile': build_exon_bedfile = 'yes'
elif opt == '--runMarkerFinder': marker_finder = arg
elif opt == '--calcNIp': calculate_normIntensity_p=arg
elif opt == '--runMiDAS': run_MiDAS=arg
elif opt == '--analyzeAllGroups':
analyze_all_conditions=arg
if analyze_all_conditions == 'yes': analyze_all_conditions = 'all groups'
elif opt == '--GEcutoff': use_direct_domain_alignments_only=arg
elif opt == '--mirmethod': microRNA_prediction_method=arg
elif opt == '--ASfilter': filter_for_AS=arg
elif opt == '--noxhyb': xhyb_remove=arg
elif opt == '--returnAll': return_all=arg
elif opt == '--annotatedir': external_annotation_dir=arg
elif opt == '--additionalScore': additional_score=arg
elif opt == '--additionalAlgorithm': additional_algorithms=arg
elif opt == '--modelSize':
modelSize=arg
try: modelSize = int(modelSize)
except Exception: modelSize = None
elif opt == '--geneModel':
geneModel=arg # file location
if geneModel == 'no' or 'alse' in geneModel:
geneModel = False
elif opt == '--reference':
custom_reference = arg
if run_from_scratch == 'Process Feature Extraction files': ### Agilent Feature Extraction files as input for normalization
normalize_gene_data = 'quantile' ### required for Agilent
proceed = 'yes'
if returnPathways == 'no' or returnPathways == 'None':
returnPathways = None
if pipelineAnalysis == False:
proceed = 'yes'
if proceed == 'yes':
species_codes = UI.remoteSpeciesInfo()
### Update Ensembl Databases
if 'Official' in update_method:
file_location_defaults = UI.importDefaultFileLocations()
db_versions_vendors,db_versions = UI.remoteOnlineDatabaseVersions()
array_codes = UI.remoteArrayInfo()
UI.getOnlineDBConfig(file_location_defaults,'')
if len(species)==2:
species_names = UI.getSpeciesInfo()
species_full = species_names[species]
else: species_full = species
print 'Species name to update:',species_full
db_version_list=[]
for version in db_versions: db_version_list.append(version)
db_version_list.sort(); db_version_list.reverse(); select_version = db_version_list[0]
db_versions[select_version].sort()
print 'Ensembl version',ensembl_version
if ensembl_version != 'current':
if len(ensembl_version) < 4: ensembl_version = 'EnsMart'+ensembl_version
if ensembl_version not in db_versions:
try: UI.getOnlineEliteDatabase(file_location_defaults,ensembl_version,[species],'no',''); sys.exit()
except Exception:
### This is only for database that aren't officially released yet for prototyping
print ensembl_version, 'is not a valid version of Ensembl, while',select_version, 'is.'; sys.exit()
else: select_version = ensembl_version
### Export basic species information
sc = species; db_version = ensembl_version
if sc != None:
for ad in db_versions_vendors[db_version]:
if ad.SpeciesCodes() == species_full:
for array_system in array_codes:
ac = array_codes[array_system]
compatible_species = ac.SpeciesCodes()
if ac.Manufacturer() in ad.Manufacturer() and ('expression' in ac.ArrayName() or 'RNASeq' in ac.ArrayName() or 'RNA-seq' in ac.ArrayName()):
if sc not in compatible_species: compatible_species.append(sc)
ac.setSpeciesCodes(compatible_species)
UI.exportArrayInfo(array_codes)
if species_full not in db_versions[select_version]:
print db_versions[select_version]
print species_full, ': This species is not available for this version %s of the Official database.' % select_version
else:
update_goelite_resources = 'no' ### This is handled separately below
UI.getOnlineEliteDatabase(file_location_defaults,ensembl_version,[species],update_goelite_resources,'');
### Attempt to download additional Ontologies and GeneSets
if additional_resources[0] != None: ### Indicates that the user requested the download of addition GO-Elite resources
try:
import GeneSetDownloader
print 'Adding supplemental GeneSet and Ontology Collections'
if 'all' in additional_resources:
additionalResources = UI.importResourceList() ### Get's all additional possible resources
else: additionalResources = additional_resources
GeneSetDownloader.buildAccessoryPathwayDatabases([species],additionalResources,'yes')
print 'Finished adding additional analysis resources.'
except Exception:
print 'Download error encountered for additional Ontologies and GeneSets...\nplease try again later.'
status = UI.verifyLineageProfilerDatabases(species,'command-line')
if status == False:
print 'Please note: LineageProfiler not currently supported for this species...'
if array_type == 'junction' or array_type == 'RNASeq': ### Download junction databases
try: UI.checkForLocalArraySupport(species,array_type,specific_array_type,'command-line')
except Exception:
print 'Please install a valid gene database before proceeding.\n'
print 'For example: python AltAnalyze.py --species Hs --update Official --version EnsMart72';sys.exit()
status = UI.verifyLineageProfilerDatabases(species,'command-line')
print "Finished adding database"
sys.exit()
try:
#print ge_fold_cutoffs,ge_pvalue_cutoffs, change_threshold, resources_to_analyze, goelite_permutations, p_val_threshold, z_threshold
change_threshold = int(change_threshold)-1
goelite_permutations = int(goelite_permutations);change_threshold = change_threshold
p_val_threshold = float(p_val_threshold); z_threshold = float(z_threshold)
if ORA_algorithm == 'Fisher Exact Test':
goelite_permutations = 'FisherExactTest'
except Exception,e:
print e
print 'One of the GO-Elite input values is inapporpriate. Please review and correct.';sys.exit()
if run_GOElite == None or run_GOElite == 'no': goelite_permutations = 'NA' ### This haults GO-Elite from running
else:
if output_dir == None:
print "\nPlease specify an output directory using the flag --output"; sys.exit()
try: expression_threshold = float(expression_threshold)
except Exception: expression_threshold = 1
try: dabg_p = float(dabg_p)
except Exception: dabg_p = 1 ### Occurs for RNASeq
if microRNA_prediction_method == 'two or more': microRNA_prediction_method = 'multiple'
else: microRNA_prediction_method = 'any'
### Run GO-Elite directly from user supplied input and denominator ID folders (outside of the normal workflows)
if run_GOElite == 'yes' and pipelineAnalysis == False and '--runGOElite' in arguments:# and denom_file_dir != None:
#python AltAnalyze.py --input "/Users/nsalomonis/Desktop/Mm_sample/input_list_small" --runGOElite yes --denom "/Users/nsalomonis/Desktop/Mm_sample/denominator" --mod Ensembl --species Mm
"""if denom_file_dir == None:
print 'Please include a folder containing a valid denominator ID list for the input ID sets.'; sys.exit()"""
try:
if output_dir==None:
### Set output to the same directory or parent if none selected
i = -1 ### 1 directory up
output_dir = string.join(string.split(input_file_dir,'/')[:i],'/')
file_dirs = input_file_dir, denom_file_dir, output_dir
import GO_Elite
if ORA_algorithm == 'Fisher Exact Test':
goelite_permutations = 'FisherExactTest'
goelite_var = species,mod,goelite_permutations,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,returnPathways,file_dirs,''
GO_Elite.remoteAnalysis(goelite_var,'non-UI',Multi=mlp)
sys.exit()
except Exception:
print traceback.format_exc()
print "Unexpected error encountered. Please see log file."; sys.exit()
if run_lineage_profiler == 'yes':
status = UI.verifyLineageProfilerDatabases(species,'command-line')
if status == False:
print 'Please note: LineageProfiler not currently supported for this species...'
if run_lineage_profiler == 'yes' and input_file_dir != None and pipelineAnalysis == False and '--runLineageProfiler' in arguments:
#python AltAnalyze.py --input "/Users/arrays/test.txt" --runLineageProfiler yes --vendor Affymetrix --platform "3'array" --species Mm --output "/Users/nsalomonis/Merrill"
#python AltAnalyze.py --input "/Users/qPCR/samples.txt" --runLineageProfiler yes --geneModel "/Users/qPCR/models.txt"
if array_type==None:
print "Please include a platform name (e.g., --platform RNASeq)";sys.exit()
if species==None:
print "Please include a species name (e.g., --species Hs)";sys.exit()
try:
status = UI.verifyLineageProfilerDatabases(species,'command-line')
except ValueError:
### Occurs due to if int(gene_database[-2:]) < 65: - ValueError: invalid literal for int() with base 10: ''
print '\nPlease install a valid gene database before proceeding.\n'
print 'For example: python AltAnalyze.py --species Hs --update Official --version EnsMart72\n';sys.exit()
if status == False:
print 'Please note: LineageProfiler not currently supported for this species...';sys.exit()
try:
fl = UI.ExpressionFileLocationData('','','','')
fl.setSpecies(species)
fl.setVendor(manufacturer)
fl.setPlatformType(array_type)
fl.setCompendiumType('protein_coding')
#fl.setCompendiumType('AltExon')
fl.setCompendiumPlatform(array_type)
try: expr_input_dir
except Exception: expr_input_dir = input_file_dir
UI.remoteLP(fl, expr_input_dir, manufacturer, custom_reference, geneModel, None, modelSize=modelSize)
#graphic_links = ExpressionBuilder.remoteLineageProfiler(fl,input_file_dir,array_type,species,manufacturer)
print_out = 'Lineage profiles and images saved to the folder "DataPlots" in the input file folder.'
print print_out
except Exception:
print traceback.format_exc()
print_out = 'Analysis error occured...\nplease see warning printouts.'
print print_out
sys.exit()
if array_type == 'junction' or array_type == 'RNASeq': ### Download junction databases
try: UI.checkForLocalArraySupport(species,array_type,specific_array_type,'command-line')
except Exception:
print 'Please install a valid gene database before proceeding.\n'
print 'For example: python AltAnalyze.py --species Hs --update Official --version EnsMart72';sys.exit()
probeset_types = ['full','core','extended']
if return_all == 'yes': ### Perform no alternative exon filtering when annotating existing FIRMA or MADS results
dabg_p = 1; expression_threshold = 1; p_threshold = 1; alt_exon_fold_variable = 1
gene_expression_cutoff = 10000; filter_probeset_types = 'full'; exon_exp_threshold = 1; rpkm_threshold = 0
gene_exp_threshold = 1; exon_rpkm_threshold = 0
if array_type == 'RNASeq':
gene_exp_threshold = 0
else:
if array_type != "3'array":
try:
p_threshold = float(p_threshold); alt_exon_fold_variable = float(alt_exon_fold_variable)
expression_threshold = float(expression_threshold); gene_expression_cutoff = float(gene_expression_cutoff)
dabg_p = float(dabg_p); additional_score = float(additional_score)
gene_expression_cutoff = float(gene_expression_cutoff)
except Exception:
try: gene_expression_cutoff = float(gene_expression_cutoff)
except Exception: gene_expression_cutoff = 0
try: rpkm_threshold = float(rpkm_threshold)
except Exception: rpkm_threshold = -1
try: exon_exp_threshold = float(exon_exp_threshold)
except Exception: exon_exp_threshold = 0
try: gene_exp_threshold = float(gene_exp_threshold)
except Exception: gene_exp_threshold = 0
try: exon_rpkm_threshold = float(exon_rpkm_threshold)
except Exception: exon_rpkm_threshold = 0
if filter_probeset_types not in probeset_types and array_type == 'exon':
print "Invalid probeset-type entered:",filter_probeset_types,'. Must be "full", "extended" or "core"'; sys.exit()
elif array_type == 'gene' and filter_probeset_types == 'NA': filter_probeset_types = 'core'
if dabg_p > 1 or dabg_p <= 0:
print "Invalid DABG p-value entered:",dabg_p,'. Must be > 0 and <= 1'; sys.exit()
if expression_threshold <1:
print "Invalid expression threshold entered:",expression_threshold,'. Must be > 1'; sys.exit()
if p_threshold > 1 or p_threshold <= 0:
print "Invalid alternative exon p-value entered:",p_threshold,'. Must be > 0 and <= 1'; sys.exit()
if alt_exon_fold_variable < 1 and analysis_method != 'ASPIRE':
print "Invalid alternative exon threshold entered:",alt_exon_fold_variable,'. Must be > 1'; sys.exit()
if gene_expression_cutoff < 1:
print "Invalid gene expression threshold entered:",gene_expression_cutoff,'. Must be > 1'; sys.exit()
if additional_score < 1:
print "Invalid additional score threshold entered:",additional_score,'. Must be > 1'; sys.exit()
if array_type == 'RNASeq':
if rpkm_threshold < 0:
print "Invalid gene RPKM threshold entered:",rpkm_threshold,'. Must be >= 0'; sys.exit()
if exon_exp_threshold < 1:
print "Invalid exon expression threshold entered:",exon_exp_threshold,'. Must be > 1'; sys.exit()
if exon_rpkm_threshold < 0:
print "Invalid exon RPKM threshold entered:",exon_rpkm_threshold,'. Must be >= 0'; sys.exit()
if gene_exp_threshold < 1:
print "Invalid gene expression threshold entered:",gene_exp_threshold,'. Must be > 1'; sys.exit()
if 'FIRMA' in additional_algorithms and array_type == 'RNASeq':
print 'FIRMA is not an available option for RNASeq... Changing this to splicing-index.'
additional_algorithms = 'splicing-index'
additional_algorithms = UI.AdditionalAlgorithms(additional_algorithms); additional_algorithms.setScore(additional_score)
if array_type == 'RNASeq':
manufacturer = 'RNASeq'
if 'CEL' in run_from_scratch: run_from_scratch = 'Process RNA-seq reads'
if build_exon_bedfile == 'yes': run_from_scratch = 'buildExonExportFiles'
if run_from_scratch == 'Process AltAnalyze filtered': expression_data_format = 'log' ### This is switched to log no matter what, after initial import and analysis of CEL or BED files
### These variables are modified from the defaults in the module UI as below
excludeNonExpExons = True
if avg_all_for_ss == 'yes': avg_all_for_ss = 'yes'
elif 'all exon aligning' in avg_all_for_ss or 'known exons' in avg_all_for_ss or 'expressed exons' in avg_all_for_ss:
if 'known exons' in avg_all_for_ss and array_type == 'RNASeq': excludeNonExpExons = False
avg_all_for_ss = 'yes'
else: avg_all_for_ss = 'no'
if run_MiDAS == 'NA': run_MiDAS = 'no'
if perform_alt_analysis == 'yes': perform_alt_analysis = 'yes'
elif perform_alt_analysis == 'expression': perform_alt_analysis = 'expression'
elif perform_alt_analysis == 'just expression': perform_alt_analysis = 'expression'
elif perform_alt_analysis == 'no': perform_alt_analysis = 'expression'
elif platform != "3'array": perform_alt_analysis = 'both'
if systemToUse != None: array_type = systemToUse
try: permute_p_threshold = float(permute_p_threshold)
except Exception: permute_p_threshold = permute_p_threshold
### Store variables for AltAnalyzeMain
expr_var = species,array_type,manufacturer,constitutive_source,dabg_p,expression_threshold,avg_all_for_ss,expression_data_format,include_raw_data,run_from_scratch,perform_alt_analysis
alt_var = analysis_method,p_threshold,filter_probeset_types,alt_exon_fold_variable,gene_expression_cutoff,remove_intronic_junctions,permute_p_threshold,perform_permutation_analysis, export_NI_values, analyze_all_conditions
additional_var = calculate_normIntensity_p, run_MiDAS, use_direct_domain_alignments_only, microRNA_prediction_method, filter_for_AS, additional_algorithms
goelite_var = ge_fold_cutoffs,ge_pvalue_cutoffs,ge_ptype,filter_method,z_threshold,p_val_threshold,change_threshold,resources_to_analyze,goelite_permutations,mod,returnPathways
if run_from_scratch == 'buildExonExportFiles':
fl = UI.ExpressionFileLocationData('','','',''); fl.setExonBedBuildStatus('yes'); fl.setFeatureNormalization('none')
fl.setCELFileDir(cel_file_dir); fl.setArrayType(array_type); fl.setOutputDir(output_dir)
fl.setMultiThreading(multiThreading)
exp_file_location_db={}; exp_file_location_db[dataset_name]=fl; parent_dir = output_dir
perform_alt_analysis = 'expression'
if run_from_scratch == 'Process Expression file':
if len(input_exp_file)>0:
if groups_file != None and comps_file != None:
if 'exp.' in input_exp_file: new_exp_file = input_exp_file
else:
new_exp_file = export.findParentDir(input_exp_file)+'exp.'+export.findFilename(input_exp_file)
if 'ExpressionInput' not in new_exp_file:
### This expression file is not currently used (could make it the default after copying to this location)
if output_dir[-1] != '/' and output_dir[-1] != '\\':
output_dir += '/'
new_exp_file = output_dir+'ExpressionInput/'+export.findFilename(new_exp_file)
try: export.copyFile(input_exp_file, new_exp_file)
except Exception: print 'Expression file already present in target location.'
try: export.copyFile(groups_file, string.replace(new_exp_file,'exp.','groups.'))
except Exception: print 'Groups file already present in target location OR bad input path.'
try: export.copyFile(comps_file, string.replace(new_exp_file,'exp.','comps.'))
except Exception: print 'Comparison file already present in target location OR bad input path.'
groups_file = string.replace(new_exp_file,'exp.','groups.')
comps_file = string.replace(new_exp_file,'exp.','comps.')
input_exp_file = new_exp_file
if verifyGroupFileFormat(groups_file) == False:
print "\nWarning! The format of your groups file is not correct. For details, see:\nhttp://code.google.com/p/altanalyze/wiki/ManualGroupsCompsCreation\n"
sys.exit()
try:
cel_files, array_linker_db = ExpressionBuilder.getArrayHeaders(input_exp_file)
if len(input_stats_file)>1: ###Make sure the files have the same arrays and order first
cel_files2, array_linker_db2 = ExpressionBuilder.getArrayHeaders(input_stats_file)
if cel_files2 != cel_files:
print "The probe set p-value file:\n"+input_stats_file+"\ndoes not have the same array order as the\nexpression file. Correct before proceeding."; sys.exit()
except Exception: print '\nWARNING...Expression file not found: "'+input_exp_file+'"\n\n'; sys.exit()
exp_name = string.replace(exp_name,'exp.',''); dataset_name = exp_name; exp_name = string.replace(exp_name,'.txt','')
groups_name = 'ExpressionInput/groups.'+dataset_name; comps_name = 'ExpressionInput/comps.'+dataset_name
groups_file_dir = output_dir+'/'+groups_name; comps_file_dir = output_dir+'/'+comps_name
groups_found = verifyFile(groups_file_dir)
comps_found = verifyFile(comps_file_dir)
if ((groups_found != 'found' or comps_found != 'found') and analyze_all_conditions != 'all groups') or (analyze_all_conditions == 'all groups' and groups_found != 'found'):
files_exported = UI.predictGroupsAndComps(cel_files,output_dir,exp_name)
if files_exported == 'yes': print "AltAnalyze inferred a groups and comps file from the CEL file names."
elif run_lineage_profiler == 'yes' and input_file_dir != None and pipelineAnalysis == False and '--runLineageProfiler' in arguments: pass
else: print '...groups and comps files not found. Create before running AltAnalyze in command line mode.';sys.exit()
fl = UI.ExpressionFileLocationData(input_exp_file,input_stats_file,groups_file_dir,comps_file_dir)
dataset_name = exp_name
if analyze_all_conditions == "all groups":
try: array_group_list,group_db = UI.importArrayGroupsSimple(groups_file_dir,cel_files)
except Exception:
print '...groups and comps files not found. Create before running AltAnalyze in command line mode.';sys.exit()
print len(group_db), 'groups found'
if len(group_db) == 2: analyze_all_conditions = 'pairwise'
exp_file_location_db={}; exp_file_location_db[exp_name]=fl
elif run_from_scratch == 'Process CEL files' or run_from_scratch == 'Process RNA-seq reads' or run_from_scratch == 'Process Feature Extraction files':
if groups_file != None and comps_file != None:
try: shutil.copyfile(groups_file, string.replace(exp_file_dir,'exp.','groups.'))
except Exception: print 'Groups file already present in target location OR bad input path.'
try: shutil.copyfile(comps_file, string.replace(exp_file_dir,'exp.','comps.'))
except Exception: print 'Comparison file already present in target location OR bad input path.'
stats_file_dir = string.replace(exp_file_dir,'exp.','stats.')
groups_file_dir = string.replace(exp_file_dir,'exp.','groups.')
comps_file_dir = string.replace(exp_file_dir,'exp.','comps.')
groups_found = verifyFile(groups_file_dir)
comps_found = verifyFile(comps_file_dir)
if ((groups_found != 'found' or comps_found != 'found') and analyze_all_conditions != 'all groups') or (analyze_all_conditions == 'all groups' and groups_found != 'found'):
if mappedExonAnalysis: pass
else:
files_exported = UI.predictGroupsAndComps(cel_files,output_dir,exp_name)
if files_exported == 'yes': print "AltAnalyze inferred a groups and comps file from the CEL file names."
#else: print '...groups and comps files not found. Create before running AltAnalyze in command line mode.';sys.exit()
fl = UI.ExpressionFileLocationData(exp_file_dir,stats_file_dir,groups_file_dir,comps_file_dir)
exp_file_location_db={}; exp_file_location_db[dataset_name]=fl
parent_dir = output_dir ### interchangable terms (parent_dir used with expression file import)
if analyze_all_conditions == "all groups":
array_group_list,group_db = UI.importArrayGroupsSimple(groups_file_dir,cel_files)
UI.exportGroups(exp_file_location_db,array_group_list)
print len(group_db), 'groups found'
if len(group_db) == 2: analyze_all_conditions = 'pairwise'
try: fl.setRunKallisto(input_fastq_dir)
except Exception: pass
elif run_from_scratch == 'Process AltAnalyze filtered':
if '.txt' in input_filtered_dir: ### Occurs if the user tries to load a specific file
dirs = string.split(input_filtered_dir,'/')
input_filtered_dir = string.join(dirs[:-1],'/')
fl = UI.ExpressionFileLocationData('','','',''); dataset_name = 'filtered-exp_dir'
dirs = string.split(input_filtered_dir,'AltExpression'); parent_dir = dirs[0]
exp_file_location_db={}; exp_file_location_db[dataset_name]=fl
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset_name]
file_location_defaults = UI.importDefaultFileLocations()
apt_location = UI.getAPTLocations(file_location_defaults,run_from_scratch,run_MiDAS)
fl.setAPTLocation(apt_location)
if run_from_scratch == 'Process CEL files':
if xhyb_remove == 'yes' and (array_type == 'gene' or array_type == 'junction'): xhyb_remove = 'no' ### This is set when the user mistakenly selects exon array, initially
fl.setInputCDFFile(input_cdf_file); fl.setCLFFile(clf_file); fl.setBGPFile(bgp_file); fl.setXHybRemoval(xhyb_remove)
fl.setCELFileDir(cel_file_dir); fl.setArrayType(array_type_original); fl.setOutputDir(output_dir)
elif run_from_scratch == 'Process RNA-seq reads':
fl.setCELFileDir(cel_file_dir); fl.setOutputDir(output_dir)
elif run_from_scratch == 'Process Feature Extraction files':
fl.setCELFileDir(cel_file_dir); fl.setOutputDir(output_dir)
fl = exp_file_location_db[dataset]; fl.setRootDir(parent_dir)
try: apt_location = fl.APTLocation()
except Exception: apt_location = ''
root_dir = fl.RootDir(); fl.setExonBedBuildStatus(build_exon_bedfile)
fl.setMarkerFinder(marker_finder)
fl.setFeatureNormalization(normalize_feature_exp)
fl.setNormMatrix(normalize_gene_data)
fl.setProbabilityStatistic(probability_statistic)
fl.setProducePlots(visualize_qc_results)
fl.setPerformLineageProfiler(run_lineage_profiler)
fl.setCompendiumType(compendiumType)
fl.setCompendiumPlatform(compendiumPlatform)
fl.setVendor(manufacturer)
try: fl.setFDRStatistic(FDR_statistic)
except Exception: pass
fl.setAnalysisMode('commandline')
fl.setBatchEffectRemoval(batch_effects)
fl.setChannelToExtract(channel_to_extract)
fl.setMultiThreading(multiThreading)
try: fl.setExcludeLowExpressionExons(excludeNonExpExons)
except Exception: fl.setExcludeLowExpressionExons(True)
if 'other' in manufacturer or 'Other' in manufacturer:
### For data without a primary array ID key
manufacturer = "other:3'array"
fl.setVendor(manufacturer)
if array_type == 'RNASeq': ### Post version 2.0, add variables in fl rather than below
fl.setRPKMThreshold(rpkm_threshold)
fl.setExonExpThreshold(exon_exp_threshold)
fl.setGeneExpThreshold(gene_exp_threshold)
fl.setExonRPKMThreshold(exon_rpkm_threshold)
fl.setJunctionExpThreshold(expression_threshold)
fl.setExonMapFile(exonMapFile)
fl.setPlatformType(platformType)
### Verify database presence
try: dirs = unique.read_directory('/AltDatabase')
except Exception: dirs=[]
if species not in dirs:
print '\n'+species,'species not yet installed. Please install before proceeding (e.g., "python AltAnalyze.py --update Official --species',species,'--version EnsMart75").'
global commandLineMode; commandLineMode = 'yes'
AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var, exp_file_location_db,None)
else:
print 'Insufficient Flags entered (requires --species and --output)'
def cleanUpCommandArguments():
### Needed on PC
command_args = string.join(sys.argv,' ')
arguments = string.split(command_args,' --')
for argument in arguments:
"""
argument_list = string.split(argument,' ')
if len(argument_list)>2:
filename = string.join(argument_list[1:],' ')
argument = argument_list[0]+' '+string.replace(filename,' ','$$$')
"""
argument_list = string.split(argument,' ')
#argument = string.join(re.findall(r"\w",argument),'')
if ':' in argument: ### Windows OS
z = string.find(argument_list[1],':')
if z!= -1 and z!=1: ### Hence, it is in the argument but not at the second position
print 'Illegal parentheses found. Please re-type these and re-run.'; sys.exit()
def runCommandLineVersion():
### This code had to be moved to a separate function to prevent iterative runs upon AltAnalyze.py re-import
command_args = string.join(sys.argv,' ')
#try: cleanUpCommandArguments()
#except Exception: null=[]
print 3,[sys.argv],
if len(sys.argv[1:])>0 and '--' in command_args:
if '--GUI' in command_args:
### Hard-restart of AltAnalyze while preserving the prior parameters
command_arguments = string.split(command_args,' --')
if len(command_arguments)>2:
command_arguments = map(lambda x: string.split(x,' '),command_arguments)
command_arguments = map(lambda (x,y): (x,string.replace(y,'__',' ')),command_arguments[2:])
selected_parameters = [command_arguments[0][1]]
user_variables={}
for (o,v) in command_arguments: user_variables[o]=v
AltAnalyzeSetup((selected_parameters,user_variables))
else:
AltAnalyzeSetup('no') ### a trick to get back to the main page of the GUI (if AltAnalyze has Tkinter conflict)
try:
commandLineRun()
except Exception:
print traceback.format_exc()
###### Determine Command Line versus GUI Control ######
command_args = string.join(sys.argv,' ')
if len(sys.argv[1:])>1 and '-' in command_args: null=[]
else:
try:
import Tkinter
from Tkinter import *
import PmwFreeze
import tkFileDialog
from tkFont import Font
use_Tkinter = 'yes'
except ImportError: use_Tkinter = 'yes'; print "\nPmw or Tkinter not found... Tkinter print out not available";
def testResultsPanel():
import QC
file = "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/3'Array/Merrill/ExpressionInput/exp.test.txt"
#QC.outputArrayQC(file)
global root; root = Tk()
global pathway_permutations; pathway_permutations = 'NA'
global log_file; log_file = 'null.txt'
global array_type; global explicit_data_type
global run_GOElite; run_GOElite = 'run-immediately'
explicit_data_type = 'exon-only'
array_type = 'RNASeq'
fl = UI.ExpressionFileLocationData('','','','')
graphic_links = []
graphic_links.append(['PCA','PCA.png'])
graphic_links.append(['HC','HC.png'])
graphic_links.append(['PCA1','PCA.png'])
graphic_links.append(['HC1','HC.png'])
graphic_links.append(['PCA2','PCA.png'])
graphic_links.append(['HC2','HC.png'])
graphic_links.append(['PCA3','PCA.png'])
graphic_links.append(['HC3','HC.png'])
graphic_links.append(['PCA4','PCA.png'])
graphic_links.append(['HC4','HC.png'])
summary_db={}
summary_db['QC'] = graphic_links
#summary_db={}
fl.setGraphicLinks(graphic_links)
summary_db['gene_assayed'] = 1
summary_db['denominator_exp_genes'] = 1
summary_db['alt_events'] = 1
summary_db['denominator_exp_events'] = 1
summary_db['alt_events'] = 1
summary_db['denominator_exp_events'] = 1
summary_db['alt_events'] = 1
summary_db['denominator_exp_events'] = 1
summary_db['alt_genes'] = 1
summary_db['direct_domain_genes'] = 1
summary_db['miRNA_gene_hits'] = 1
#summary_db={}
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
dataset = 'test'; results_dir=''
print "Analysis Complete\n";
if root !='' and root !=None:
UI.InfoWindow(print_out,'Analysis Completed!')
tl = Toplevel(); SummaryResultsWindow(tl,'GE',results_dir,dataset,'parent',summary_db)
print 'here'
#sys.exit()
class Logger(object):
def __init__(self,null):
self.terminal = sys.stdout
self.log = open(log_file, "w")
def write(self, message):
self.terminal.write(message)
self.log = open(log_file, "a")
self.log.write(message)
self.log.close()
def flush(self): pass
def verifyPath(filename):
### See if the file is in the current working directory
new_filename = filename
try:
cwd = os.getcwd()
files = unique.read_directory(cwd)
if filename in files:
new_filename = cwd+'/'+new_filename
except Exception:
pass
return new_filename
def dependencyCheck():
### Make sure core dependencies for AltAnalyze are met and if not report back
from pkgutil import iter_modules
modules = set(x[1] for x in iter_modules()) ### all installed modules
dependent_modules = ['string','csv','base64','getpass','requests']
dependent_modules += ['warnings','sklearn','os','webbrowser']
dependent_modules += ['scipy','numpy','matplotlib','igraph','pandas','patsy']
dependent_modules += ['ImageTk','PIL','cairo','wx','fastcluster','pysam', 'Tkinter']
print ''
count=0
for module in dependent_modules:
if module not in modules:
print 'AltAnalyze depedency not met for:',module
if 'fastcluster' == module:
print '...Faster hierarchical cluster not supported without fastcluster'
if 'pysam' == module:
print '...BAM file access not supported without pysam'
if 'scipy' == module:
print '...Many required statistical routines not supported without scipy'
if 'numpy' == module:
print '...Many required statistical routines not supported without numpy'
if 'matplotlib' == module:
print '...Core graphical outputs not supported without matplotlib'
if 'requests' == module:
print '...Wikipathways visualization not supported without requests'
if 'lxml' == module:
print '...Wikipathways visualization not supported without lxml'
if 'wx' == module:
print '...The AltAnalyze Results Viewer requires wx'
if 'ImageTk' == module or 'PIL' == module:
print '...Some graphical results displays require ImageTk and PIL'
if 'Tkinter' == module:
print '...AltAnalyze graphical user interface mode requires Tkinter'
if 'igraph' == module or 'cairo' == module:
print '...Network visualization requires igraph and cairo'
if 'sklearn' == module:
print '...t-SNE analysis requires sklearn'
if 'pandas' == module or 'patsy' == module:
print '...Combat batch effects correction requires pandas and patsy'
count+=1
if count>0:
print '\nWARNING!!!! Some dependencies are not currently met.'
print "This may impact AltAnalyze's performance\n"
if __name__ == '__main__':
try: mlp.freeze_support()
except Exception: pass
#testResultsPanel()
skip_intro = 'yes'; #sys.exit()
#skip_intro = 'remoteViewer'
runCommandLineVersion()
dependencyCheck()
if use_Tkinter == 'yes': AltAnalyzeSetup(skip_intro)
""" To do list:
1) RNA-Seq and LineageProfiler: threshold based RPKM expression filtering for binary absent present gene and exon calls
3) SQLite for gene-set databases prior to clustering and network visualization
5) (explored - not good) Optional algorithm type of PCA
7) (partially) Integrate splicing factor enrichment analysis (separate module?)
11) Update fields in summary combined alt.exon files (key by probeset)
12) Check field names for junction, exon, RNA-Seq in summary alt.exon report
14) Proper FDR p-value for alt.exon analyses (include all computed p-values)
15) Add all major clustering and LineageProfiler options to UI along with stats filtering by default
17) Support R check (and response that they need it) along with GUI gcrma, agilent array, hopach, combat
18) Probe-level annotations from Ensembl (partial code in place) and probe-level RMA in R (or possibly APT) - google pgf for U133 array
19) Update the software from the software
Advantages of this tool kit:
0) Easiest to use, hands down
1) Established and novel functionality for transcriptome/proteomics analysis built in
2) Independent and cooperative options for RNA-Seq and array analysis (splicing and gene expression)
3) Superior functional analyses (TF-target, splicing-factor target, lineage markers, WikiPathway visualization)
4) Options for different levels of users with different integration options (multiple statistical method options, option R support)
5) Built in secondary analysis options for already processed data (graphing, clustering, biomarker discovery, pathway analysis, network visualization)
6) Incorporates highly validated alternative exon identification methods, independent and jointly
Primary Engineer Work:
0) C-library calls and/or multithreading where applicable to improve peformance.
1) MySQL or equivalent transition for all large database queries (e.g., HuEx 2.1 on-the-fly coordinate mapping).
3) Isoform-domain network visualization and WP overlays.
4) Webservice calls to in silico protein translation, domain prediction, splicing factor regulation.
### 2.0.9
moncole integration
generic and cell classification machine learning
PCR primer design (gene centric after file selection)
BAM->BED (local SAMTools)
updated APT
""" | {
"repo_name": "wuxue/altanalyze",
"path": "AltAnalyze.py",
"copies": "1",
"size": "493280",
"license": "apache-2.0",
"hash": -4702367625643405000,
"line_mean": 60.0949962844,
"line_max": 423,
"alpha_frac": 0.6119952157,
"autogenerated": false,
"ratio": 3.733236460509188,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4845231676209188,
"avg_score": null,
"num_lines": null
} |
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys, string
import os.path
import unique
import platform
import export
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
class GrabFiles:
def setdirectory(self,value):
self.data = value
def display(self):
print self.data
def searchdirectory(self,search_term):
#self is an instance while self.data is the value of the instance
file_dir,file = getDirectoryFiles(self.data,str(search_term))
if len(file)<1: print search_term,'not found'
return file_dir,file
def getDirectoryFiles(import_dir, search_term):
exact_file = ''; exact_file_dir=''
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
import_dir = filepath(import_dir)
for data in dir_list: #loop through each file in the directory to output results
if (':' in import_dir) or ('/Users/' == import_dir[:7]) or ('Linux' in platform.system()): affy_data_dir = import_dir+'/'+data
else: affy_data_dir = import_dir[1:]+'/'+data
if search_term in affy_data_dir: exact_file_dir = affy_data_dir; exact_file = data
return exact_file_dir,exact_file
def makeUnique(item):
db1={}; list1=[]; k=0
for i in item:
try: db1[i]=[]
except TypeError: db1[tuple(i)]=[]; k=1
for i in db1:
if k==0: list1.append(i)
else: list1.append(list(i))
list1.sort()
return list1
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def importPGF(dir,species,filename):
fn=filepath(filename); probe_db = {}; x=0
psr_file = dir+'/'+species+'/'+array_type+'/'+species+'_probeset-psr.txt'
psr_file = string.replace(psr_file,'affymetrix/LibraryFiles/','')
try: eo = export.ExportFile(filepath(psr_file))
except Exception: eo = export.ExportFile(filepath(psr_file[1:]))
for line in open(fn,'rU').xreadlines():
if line[0] != '#':
data = cleanUpLine(line); x+=1
t = string.split(data,'\t')
if len(t)==2 or len(t)==3:
if len(t[0])>0:
probeset = t[0]; type = t[1]
eo.write(probeset+'\t'+t[-1]+'\n') ### Used for HTA array where we need to have PSR to probeset IDs
else:
try:
probe = t[2]
#if probeset == '10701621': print probe
try: probe_db[probeset].append(probe)
except KeyError: probe_db[probeset] = [probe]
except Exception: null=[]
eo.close()
new_file = dir+'/'+species+'/'+array_type+'/'+species+'_probeset-probes.txt'
new_file = string.replace(new_file,'affymetrix/LibraryFiles/','')
headers = 'probeset\t' + 'probe\n'; n=0
try: data = export.ExportFile(filepath(new_file))
except Exception: data = export.ExportFile(filepath(new_file[1:]))
data.write(headers)
for probeset in probe_db:
for probe in probe_db[probeset]:
data.write(probeset+'\t'+probe+'\n'); n+=1
data.close()
print n, 'Entries exported for', new_file
if __name__ == '__main__':
skip_intro = 'yes'
array_type = 'gene'
#array_type = 'exon'
array_type = 'junction'
species = 'Mm'
parent_dir = 'AltDatabase/'+species+'/'+array_type+'/library'
parent_dir = '/AltDatabase/affymetrix/LibraryFiles'
e = GrabFiles(); e.setdirectory(parent_dir)
pgf_dir,pgf_file = e.searchdirectory('MTA-1_0.r3.pgf')
importPGF(parent_dir,species,pgf_dir)
| {
"repo_name": "wuxue/altanalyze",
"path": "ParsePGF.py",
"copies": "1",
"size": "5014",
"license": "apache-2.0",
"hash": 2885942311884667000,
"line_mean": 39.435483871,
"line_max": 134,
"alpha_frac": 0.6414040686,
"autogenerated": false,
"ratio": 3.4843641417651146,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.949308889947644,
"avg_score": 0.02653586217773494,
"num_lines": 124
} |
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import platform
def filepath(filename):
#fn = unique.filepath(filename)
return filename
def read_directory(sub_dir):
dir_list = os.listdir(sub_dir)
return dir_list
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
ls = list(set(database[key]))
ls.sort()
db1[key] = ls
return db1
class GrabFiles:
def setdirectory(self,value):
self.data = value
def display(self):
print self.data
def searchdirectory(self,search_term):
#self is an instance while self.data is the value of the instance
file_dir,file = getDirectoryFiles(self.data,str(search_term))
if len(file)<1: print search_term,'not found'
return file_dir,file
def getDirectoryFiles(import_dir, search_term):
exact_file = ''; exact_file_dir=''
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
import_dir = filepath(import_dir)
for data in dir_list: #loop through each file in the directory to output results
if (':' in import_dir) or ('/Users/' == import_dir[:7]) or ('Linux' in platform.system()): affy_data_dir = import_dir+'/'+data
else: affy_data_dir = import_dir[1:]+'/'+data
if search_term in affy_data_dir: exact_file_dir = affy_data_dir; exact_file = data
return exact_file_dir,exact_file
def makeUnique(item):
db1={}; list1=[]; k=0
for i in item:
try: db1[i]=[]
except TypeError: db1[tuple(i)]=[]; k=1
for i in db1:
if k==0: list1.append(i)
else: list1.append(list(i))
list1.sort()
return list1
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def importPGF(dir,species,filename):
fn=filepath(filename); probe_db = {}; x=0
psr_file = dir+'/'+species+'/'+array_type+'/'+species+'_probeset-psr.txt'
psr_file = string.replace(psr_file,'affymetrix/LibraryFiles/','')
try: eo = open(filepath(psr_file),'w')
except Exception: eo = open(filepath(psr_file[1:]),'w')
for line in open(fn,'rU').xreadlines():
if line[0] != '#':
data = cleanUpLine(line); x+=1
t = string.split(data,'\t')
if len(t)==2 or len(t)==3:
if len(t[0])>0:
probeset = t[0]; type = t[1]
eo.write(probeset+'\t'+t[-1]+'\n') ### Used for HTA array where we need to have PSR to probeset IDs
else:
try:
probe = t[2]
#if probeset == '10701621': print probe
try: probe_db[probeset].append(probe)
except KeyError: probe_db[probeset] = [probe]
except Exception: null=[]
eo.close()
new_file = dir+'/'+species+'/'+array_type+'/'+species+'_probeset-probes.txt'
new_file = string.replace(new_file,'affymetrix/LibraryFiles/','')
headers = 'probeset\t' + 'probe\n'; n=0
try: data = open(filepath(new_file),'w')
except Exception: data = open(filepath(new_file[1:]),'w')
data.write(headers)
for probeset in probe_db:
for probe in probe_db[probeset]:
data.write(probeset+'\t'+probe+'\n'); n+=1
data.close()
print n, 'Entries exported for', new_file
if __name__ == '__main__':
skip_intro = 'yes'
array_type = 'gene'
#array_type = 'exon'
#array_type = 'junction'
array_type = 'gene'
species = 'Mm'
parent_dir = 'AltDatabase/'+species+'/'+array_type+'/library'
parent_dir = '/AltDatabase/affymetrix/LibraryFiles'
e = GrabFiles(); e.setdirectory(parent_dir)
pgf_dir,pgf_file = e.searchdirectory('MoGene-2_0-st.pgf')
importPGF(parent_dir,species,pgf_dir)
| {
"repo_name": "nsalomonis/AltAnalyze",
"path": "build_scripts/ParsePGF.py",
"copies": "1",
"size": "5177",
"license": "apache-2.0",
"hash": 2827505809345064400,
"line_mean": 39.75,
"line_max": 134,
"alpha_frac": 0.6200502221,
"autogenerated": false,
"ratio": 3.5074525745257454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9485179739837066,
"avg_score": 0.02846461135773592,
"num_lines": 124
} |
"""Alter and add many columns in DetachedAwardProcurement and AwardProcurement
Revision ID: d45dde2ba15b
Revises: 001758a1ab82
Create Date: 2018-03-09 14:08:13.058669
"""
# revision identifiers, used by Alembic.
revision = 'd45dde2ba15b'
down_revision = '001758a1ab82'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.execute("ALTER TABLE award_procurement RENAME COLUMN walsh_healey_act TO materials_supplies_article")
op.execute("ALTER TABLE award_procurement RENAME COLUMN service_contract_act TO labor_standards")
op.execute("ALTER TABLE award_procurement RENAME COLUMN davis_bacon_act TO construction_wage_rate_req")
op.execute("ALTER TABLE award_procurement RENAME COLUMN government_furnished_equip TO government_furnished_prope")
op.add_column('award_procurement', sa.Column('cage_code', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('inherently_government_func', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('organizational_type', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('number_of_employees', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('annual_revenue', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('total_obligated_amount', sa.Text(), nullable=True))
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN walsh_healey_act TO materials_supplies_article")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN walsh_healey_act_descrip TO materials_supplies_descrip")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN service_contract_act TO labor_standards")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN service_contract_act_desc TO labor_standards_descrip")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN davis_bacon_act TO construction_wage_rate_req")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN davis_bacon_act_descrip TO construction_wage_rat_desc")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN government_furnished_equip TO government_furnished_prope")
op.add_column('detached_award_procurement', sa.Column('cage_code', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('inherently_government_func', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('organizational_type', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.execute("ALTER TABLE award_procurement RENAME COLUMN materials_supplies_article TO walsh_healey_act")
op.execute("ALTER TABLE award_procurement RENAME COLUMN labor_standards TO service_contract_act")
op.execute("ALTER TABLE award_procurement RENAME COLUMN construction_wage_rate_req TO davis_bacon_act")
op.execute("ALTER TABLE award_procurement RENAME COLUMN government_furnished_prope TO government_furnished_equip")
op.drop_column('award_procurement', 'cage_code')
op.drop_column('award_procurement', 'inherently_government_func')
op.drop_column('award_procurement', 'organizational_type')
op.drop_column('award_procurement', 'number_of_employees')
op.drop_column('award_procurement', 'annual_revenue')
op.drop_column('award_procurement', 'total_obligated_amount')
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN materials_supplies_article TO walsh_healey_act")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN materials_supplies_descrip TO walsh_healey_act_descrip")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN labor_standards TO service_contract_act")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN labor_standards_descrip TO service_contract_act_desc")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN construction_wage_rate_req TO davis_bacon_act")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN construction_wage_rat_desc TO davis_bacon_act_descrip")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN government_furnished_prope TO government_furnished_equip")
op.drop_column('detached_award_procurement', 'cage_code')
op.drop_column('detached_award_procurement', 'inherently_government_func')
op.drop_column('detached_award_procurement', 'organizational_type')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/d45dde2ba15b_alter_detached_regular_award_procurement.py",
"copies": "1",
"size": "4837",
"license": "cc0-1.0",
"hash": -8356240148092870000,
"line_mean": 59.4625,
"line_max": 127,
"alpha_frac": 0.7614223692,
"autogenerated": false,
"ratio": 3.408738548273432,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4670160917473432,
"avg_score": null,
"num_lines": null
} |
"""alter biobank dv table
Revision ID: 534d805d5dcf
Revises: dc971fc16861
Create Date: 2019-03-18 13:23:40.194824
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = "534d805d5dcf"
down_revision = "dc971fc16861"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("biobank_dv_order", sa.Column("version", sa.Integer(), nullable=False))
op.alter_column(
"biobank_dv_order",
"modified",
existing_type=mysql.DATETIME(fsp=6),
nullable=False,
existing_server_default=sa.text("CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6)"),
)
op.create_unique_constraint(None, "biobank_dv_order", ["biobank_order_id"])
op.drop_column("biobank_dv_order", "biobank_reference")
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("biobank_dv_order", sa.Column("biobank_reference", mysql.VARCHAR(length=80), nullable=True))
op.drop_constraint(None, "biobank_dv_order", type_="unique")
op.alter_column(
"biobank_dv_order",
"modified",
existing_type=mysql.DATETIME(fsp=6),
nullable=True,
existing_server_default=sa.text("CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6)"),
)
op.drop_column("biobank_dv_order", "version")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/534d805d5dcf_alter_biobank_dv_table.py",
"copies": "1",
"size": "1944",
"license": "bsd-3-clause",
"hash": -5627673043598841000,
"line_mean": 28.4545454545,
"line_max": 110,
"alpha_frac": 0.6594650206,
"autogenerated": false,
"ratio": 3.452930728241563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9609293214489029,
"avg_score": 0.0006205068705068705,
"num_lines": 66
} |
"""Alter Constraints
Revision ID: 3a37e844b277
Revises: b237b9f6a2ce
Create Date: 2016-05-30 15:57:56.017519
"""
# revision identifiers, used by Alembic.
revision = '3a37e844b277'
down_revision = 'b237b9f6a2ce'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'last_name')
op.drop_column('users', 'first_name')
### end Alembic commands ###
with op.batch_alter_table("notes") as batch_op:
batch_op.drop_constraint(
"notes_author_id_fkey", type_="foreignkey")
with op.batch_alter_table("notebooks") as batch_op:
batch_op.drop_constraint(
"notebooks_author_id_fkey", type_="foreignkey")
op.create_foreign_key(
"notes_author_id_fkey", "notes", "users",
["author_id"], ["id"], ondelete="CASCADE")
op.create_foreign_key(
"notebooks_author_id_fkey", "notebooks", "users",
["author_id"], ["id"], ondelete="CASCADE")
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('first_name', sa.VARCHAR(length=200), autoincrement=False, nullable=True))
op.add_column('users', sa.Column('last_name', sa.VARCHAR(length=200), autoincrement=False, nullable=True))
### end Alembic commands ###
| {
"repo_name": "levlaz/braindump",
"path": "migrations/versions/3a37e844b277_alter_constraints.py",
"copies": "1",
"size": "1358",
"license": "mit",
"hash": -2444169952049946000,
"line_mean": 32.975,
"line_max": 111,
"alpha_frac": 0.6524300442,
"autogenerated": false,
"ratio": 3.288135593220339,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44405656374203395,
"avg_score": null,
"num_lines": null
} |
"""alter database for mysql compatibility
Revision ID: 9be372ec38bc
Revises: 4328f2c08f05
Create Date: 2020-02-16 15:43:35.276655
"""
from alembic import op
import sqlalchemy as sa
from docassemble.webapp.database import dbtableprefix, dbprefix, daconfig
import sys
# revision identifiers, used by Alembic.
revision = '9be372ec38bc'
down_revision = '4328f2c08f05'
branch_labels = None
depends_on = None
def upgrade():
if dbprefix.startswith('postgresql') and not daconfig.get('force text to varchar upgrade', False):
sys.stderr.write("Not changing text type to varchar type because underlying database is PostgreSQL\n")
else:
op.alter_column(
table_name='userdict',
column_name='filename',
type_=sa.String(255)
)
op.alter_column(
table_name='userdictkeys',
column_name='filename',
type_=sa.String(255)
)
op.alter_column(
table_name='chatlog',
column_name='filename',
type_=sa.String(255)
)
op.alter_column(
table_name='uploads',
column_name='filename',
type_=sa.String(255)
)
op.alter_column(
table_name='uploads',
column_name='yamlfile',
type_=sa.String(255)
)
op.alter_column(
table_name='objectstorage',
column_name='key',
type_=sa.String(1024)
)
op.alter_column(
table_name='speaklist',
column_name='filename',
type_=sa.String(255)
)
op.alter_column(
table_name='shortener',
column_name='filename',
type_=sa.String(255)
)
op.alter_column(
table_name='shortener',
column_name='key',
type_=sa.String(255)
)
op.alter_column(
table_name='machinelearning',
column_name='key',
type_=sa.String(1024)
)
op.alter_column(
table_name='machinelearning',
column_name='group_id',
type_=sa.String(1024)
)
op.alter_column(
table_name='globalobjectstorage',
column_name='key',
type_=sa.String(1024)
)
op.create_index(dbtableprefix + 'ix_uploads_yamlfile', 'uploads', ['yamlfile'])
def downgrade():
op.alter_column(
table_name='userdict',
column_name='filename',
type_=sa.Text()
)
op.alter_column(
table_name='userdictkeys',
column_name='filename',
type_=sa.Text()
)
op.alter_column(
table_name='chatlog',
column_name='filename',
type_=sa.Text()
)
op.alter_column(
table_name='uploads',
column_name='filename',
type_=sa.Text()
)
op.alter_column(
table_name='uploads',
column_name='yamlfile',
type_=sa.Text()
)
op.alter_column(
table_name='objectstorage',
column_name='key',
type_=sa.Text()
)
op.alter_column(
table_name='speaklist',
column_name='filename',
type_=sa.Text()
)
op.alter_column(
table_name='shortener',
column_name='filename',
type_=sa.Text()
)
op.alter_column(
table_name='shortener',
column_name='key',
type_=sa.Text()
)
op.alter_column(
table_name='machinelearning',
column_name='key',
type_=sa.Text()
)
op.alter_column(
table_name='machinelearning',
column_name='group_id',
type_=sa.Text()
)
op.alter_column(
table_name='globalobjectstorage',
column_name='key',
type_=sa.Text()
)
op.drop_index(dbtableprefix + 'ix_uploads_yamlfile', table_name='uploads')
| {
"repo_name": "jhpyle/docassemble",
"path": "docassemble_webapp/docassemble/webapp/alembic/versions/9be372ec38bc_alter_database_for_mysql_compatibility.py",
"copies": "1",
"size": "3895",
"license": "mit",
"hash": 3524080116712008700,
"line_mean": 25.6780821918,
"line_max": 110,
"alpha_frac": 0.5406931964,
"autogenerated": false,
"ratio": 3.8111545988258317,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9846501469717055,
"avg_score": 0.0010692651017553422,
"num_lines": 146
} |
"""Alter meeting id columns from integer to string
Revision ID: 54b91da358e
Revises: 40d44f5e7b69
Create Date: 2014-09-26 15:02:36.192223
"""
# revision identifiers, used by Alembic.
revision = '54b91da358e'
down_revision = '40d44f5e7b69'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.alter_column('meeting', 'parliament_id',
existing_type=sa.INTEGER(),
type_=sa.Text(),
existing_nullable=False)
op.alter_column('meeting', 'session_id',
existing_type=sa.INTEGER(),
type_=sa.Text(),
existing_nullable=True)
op.alter_column('meeting', 'sitting_id',
existing_type=sa.INTEGER(),
type_=sa.Text(),
existing_nullable=True)
def downgrade():
op.alter_column('meeting', 'sitting_id',
existing_type=sa.Text(),
type_=sa.INTEGER(),
existing_nullable=True)
op.alter_column('meeting', 'session_id',
existing_type=sa.Text(),
type_=sa.INTEGER(),
existing_nullable=True)
op.alter_column('meeting', 'parliament_id',
existing_type=sa.Text(),
type_=sa.INTEGER(),
existing_nullable=False)
| {
"repo_name": "teampopong/pokr.kr",
"path": "alembic/versions/54b91da358e_.py",
"copies": "1",
"size": "1331",
"license": "apache-2.0",
"hash": 5222914808648766000,
"line_mean": 28.5777777778,
"line_max": 50,
"alpha_frac": 0.5800150263,
"autogenerated": false,
"ratio": 3.717877094972067,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4797892121272067,
"avg_score": null,
"num_lines": null
} |
"""alter metricsRaceCache table and create indexes
Revision ID: bf7f784daca9
Revises: 93d831aa6fb4
Create Date: 2019-01-31 16:53:34.008379
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = "bf7f784daca9"
down_revision = "93d831aa6fb4"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index("calendar_idx", "calendar", ["day"], unique=False)
op.add_column("metrics_race_cache", sa.Column("american_indian_alaska_native", sa.Integer(), nullable=False))
op.add_column("metrics_race_cache", sa.Column("asian", sa.Integer(), nullable=False))
op.add_column("metrics_race_cache", sa.Column("black_african_american", sa.Integer(), nullable=False))
op.add_column("metrics_race_cache", sa.Column("middle_eastern_north_african", sa.Integer(), nullable=False))
op.add_column(
"metrics_race_cache", sa.Column("native_hawaiian_other_pacific_islander", sa.Integer(), nullable=False)
)
op.add_column("metrics_race_cache", sa.Column("white", sa.Integer(), nullable=False))
op.add_column("metrics_race_cache", sa.Column("hispanic_latino_spanish", sa.Integer(), nullable=False))
op.add_column("metrics_race_cache", sa.Column("none_of_these_fully_describe_me", sa.Integer(), nullable=False))
op.add_column("metrics_race_cache", sa.Column("prefer_not_to_answer", sa.Integer(), nullable=False))
op.add_column("metrics_race_cache", sa.Column("multi_ancestry", sa.Integer(), nullable=False))
op.add_column("metrics_race_cache", sa.Column("no_ancestry_checked", sa.Integer(), nullable=False))
op.execute("ALTER TABLE metrics_race_cache DROP PRIMARY KEY")
op.drop_column("metrics_race_cache", "race_count")
op.drop_column("metrics_race_cache", "race_name")
op.execute("ALTER TABLE metrics_race_cache ADD PRIMARY KEY (`date_inserted`,`hpo_id`,`hpo_name`,`date`)")
op.create_index(
"participant_withdrawl_sign_up_hpo",
"participant",
["participant_id", "withdrawal_status", "sign_up_time", "hpo_id", "is_ghost_id"],
unique=False,
)
op.drop_index("participant_sign_up_time", table_name="participant")
op.drop_index("participant_summary_core_ordered_time", table_name="participant_summary")
op.drop_index("participant_summary_core_stored_time", table_name="participant_summary")
op.drop_index("participant_summary_member_time", table_name="participant_summary")
op.drop_index("participant_summary_sign_up_time", table_name="participant_summary")
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index("participant_summary_sign_up_time", "participant_summary", ["sign_up_time"], unique=False)
op.create_index(
"participant_summary_member_time", "participant_summary", ["enrollment_status_member_time"], unique=False
)
op.create_index(
"participant_summary_core_stored_time",
"participant_summary",
["enrollment_status_core_stored_sample_time"],
unique=False,
)
op.create_index(
"participant_summary_core_ordered_time",
"participant_summary",
["enrollment_status_core_ordered_sample_time"],
unique=False,
)
op.create_index("participant_sign_up_time", "participant", ["sign_up_time"], unique=False)
op.drop_index("participant_withdrawl_sign_up_hpo", table_name="participant")
op.add_column("metrics_race_cache", sa.Column("race_name", mysql.VARCHAR(length=255), nullable=False))
op.add_column(
"metrics_race_cache",
sa.Column("race_count", mysql.INTEGER(display_width=11), autoincrement=False, nullable=False),
)
op.drop_column("metrics_race_cache", "white")
op.drop_column("metrics_race_cache", "prefer_not_to_answer")
op.drop_column("metrics_race_cache", "none_of_these_fully_describe_me")
op.drop_column("metrics_race_cache", "no_ancestry_checked")
op.drop_column("metrics_race_cache", "native_hawaiian_other_pacific_islander")
op.drop_column("metrics_race_cache", "multi_ancestry")
op.drop_column("metrics_race_cache", "middle_eastern_north_african")
op.drop_column("metrics_race_cache", "hispanic_latino_spanish")
op.drop_column("metrics_race_cache", "black_african_american")
op.drop_column("metrics_race_cache", "asian")
op.drop_column("metrics_race_cache", "american_indian_alaska_native")
op.drop_index("calendar_idx", table_name="calendar")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/bf7f784daca9_alter_metricsracecache_table_and_create_.py",
"copies": "1",
"size": "5059",
"license": "bsd-3-clause",
"hash": 3000098823851273700,
"line_mean": 44.9909090909,
"line_max": 115,
"alpha_frac": 0.6870923107,
"autogenerated": false,
"ratio": 3.341479524438573,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4528571835138573,
"avg_score": null,
"num_lines": null
} |
# Alternate formulation using decorators
import types
class multimethod:
def __init__(self, func):
self._methods = {}
self.__name__ = func.__name__
self._default = func
def match(self, *types):
def register(func):
ndefaults = len(func.__defaults__) if func.__defaults__ else 0
for n in range(ndefaults+1):
self._methods[types[:len(types) - n]] = func
return self
return register
def __call__(self, *args):
types = tuple(type(arg) for arg in args[1:])
meth = self._methods.get(types, None)
if meth:
return meth(*args)
else:
return self._default(*args)
def __get__(self, instance, cls):
if instance is not None:
return types.MethodType(self, instance)
else:
return self
# Example use
class Spam:
@multimethod
def bar(self, *args):
# Default method called if no match
raise TypeError('No matching method for bar')
@bar.match(int, int)
def bar(self, x, y):
print('Bar 1:', x, y)
@bar.match(str, int)
def bar(self, s, n = 0):
print('Bar 2:', s, n)
if __name__ == '__main__':
s = Spam()
s.bar(2, 3)
s.bar('hello')
s.bar('hello', 5)
try:
s.bar(2, 'hello')
except TypeError as e:
print(e)
| {
"repo_name": "tuanavu/python-cookbook-3rd",
"path": "src/9/multiple_dispatch_with_function_annotations/example2.py",
"copies": "2",
"size": "1398",
"license": "mit",
"hash": 2615664088114171400,
"line_mean": 23.9642857143,
"line_max": 74,
"alpha_frac": 0.5236051502,
"autogenerated": false,
"ratio": 3.6984126984126986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5222017848612699,
"avg_score": null,
"num_lines": null
} |
# Alternate formulation using function attributes directly
from functools import wraps
import logging
def logged(level, name=None, message=None):
'''
Add logging to a function. level is the logging
level, name is the logger name, and message is the
log message. If name and message aren't specified,
they default to the function's module and name.
'''
def decorate(func):
logname = name if name else func.__module__
log = logging.getLogger(logname)
logmsg = message if message else func.__name__
@wraps(func)
def wrapper(*args, **kwargs):
wrapper.log.log(wrapper.level, wrapper.logmsg)
return func(*args, **kwargs)
# Attach adjustable attributes
wrapper.level = level
wrapper.logmsg = logmsg
wrapper.log = log
return wrapper
return decorate
# Example use
@logged(logging.DEBUG)
def add(x, y):
return x + y
@logged(logging.CRITICAL, 'example')
def spam():
print('Spam!')
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.DEBUG)
print(add(2, 3))
# Change the log message
add.logmsg = 'Add called'
print(add(2, 3))
# Change the log level
add.level = logging.WARNING
print(add(2, 3))
| {
"repo_name": "tuanavu/python-cookbook-3rd",
"path": "src/9/defining_a_decorator_with_user_adjustable_attributes/example2.py",
"copies": "2",
"size": "1298",
"license": "mit",
"hash": 6282091073412531000,
"line_mean": 24.4509803922,
"line_max": 58,
"alpha_frac": 0.6340523883,
"autogenerated": false,
"ratio": 3.957317073170732,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0025543058364882747,
"num_lines": 51
} |
# ## Alternates ##
# This animation alternates colours on every other pixel and then animates them flipping between the default
# colours White and Off.
#
# ## Usage ###
# Alternates has 3 optional properties
#
# * max_led - int the number of pixels you want used
# * color1 - (int, int, int) the color you want the odd pixels to be
# * color2 - (int, int, int) the color you want the even pixels to be
#
# In code:
#
# from Alternates import Alternates
# ...
# anim = Alternates(led, max_led=10, color1=(255, 0, 0), color2=(0, 0, 255))
#
# Best run in the region of 5-10 FPS
from bibliopixel.animation.strip import Strip
class Alternates(Strip):
COLOR_DEFAULTS = ('color1', (255, 255, 255)), ('color2', (0, 0, 0))
def __init__(self, layout, max_led=-1, **kwds):
super().__init__(layout, 0, -1, **kwds)
self._current = 0
self._minLed = 0
self._maxLed = max_led
if self._maxLed < 0 or self._maxLed < self._minLed:
self._maxLed = self.layout.numLEDs - 1
self._positive = True
def pre_run(self):
self._step = 0
def step(self, amt=1):
while self._current < self._maxLed:
odd = bool(self._current % 2)
color = self.palette(odd == self._positive)
self.layout.fill(color, self._current, self._current)
self._current += amt
self._current = self._minLed
self._positive = not self._positive
| {
"repo_name": "rec/BiblioPixelAnimations",
"path": "BiblioPixelAnimations/strip/Alternates.py",
"copies": "2",
"size": "1448",
"license": "mit",
"hash": 8334293209812063000,
"line_mean": 29.8085106383,
"line_max": 108,
"alpha_frac": 0.604281768,
"autogenerated": false,
"ratio": 3.2834467120181405,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4887728480018141,
"avg_score": null,
"num_lines": null
} |
"""Alternating Direction method of Multipliers (ADMM) method variants."""
from __future__ import division
from builtins import range
from odl.operator import Operator, OpDomainError
__all__ = ('admm_linearized',)
def admm_linearized(x, f, g, L, tau, sigma, niter, **kwargs):
"""Generic linearized ADMM method for convex problems.
ADMM stands for "Alternating Direction Method of Multipliers" and
is a popular convex optimization method. This variant solves problems
of the form ::
min_x [ f(x) + g(Lx) ]
with convex ``f`` and ``g``, and a linear operator ``L``. See Section
4.4 of `[PB2014] <http://web.stanford.edu/~boyd/papers/prox_algs.html>`_
and the Notes for more mathematical details.
Parameters
----------
x : ``L.domain`` element
Starting point of the iteration, updated in-place.
f, g : `Functional`
The functions ``f`` and ``g`` in the problem definition. They
need to implement the ``proximal`` method.
L : linear `Operator`
The linear operator that is composed with ``g`` in the problem
definition. It must fulfill ``L.domain == f.domain`` and
``L.range == g.domain``.
tau, sigma : positive float
Step size parameters for the update of the variables.
niter : non-negative int
Number of iterations.
Other Parameters
----------------
callback : callable, optional
Function called with the current iterate after each iteration.
Notes
-----
Given :math:`x^{(0)}` (the provided ``x``) and
:math:`u^{(0)} = z^{(0)} = 0`, linearized ADMM applies the following
iteration:
.. math::
x^{(k+1)} &= \mathrm{prox}_{\\tau f} \\left[
x^{(k)} - \sigma^{-1}\\tau L^*\\big(
L x^{(k)} - z^{(k)} + u^{(k)}
\\big)
\\right]
z^{(k+1)} &= \mathrm{prox}_{\sigma g}\\left(
L x^{(k+1)} + u^{(k)}
\\right)
u^{(k+1)} &= u^{(k)} + L x^{(k+1)} - z^{(k+1)}
The step size parameters :math:`\\tau` and :math:`\sigma` must satisfy
.. math::
0 < \\tau < \\frac{\sigma}{\|L\|^2}
to guarantee convergence.
The name "linearized ADMM" comes from the fact that in the
minimization subproblem for the :math:`x` variable, this variant
uses a linearization of a quadratic term in the augmented Lagrangian
of the generic ADMM, in order to make the step expressible with
the proximal operator of :math:`f`.
Another name for this algorithm is *split inexact Uzawa method*.
References
----------
[PB2014] Parikh, N and Boyd, S. *Proximal Algorithms*. Foundations and
Trends in Optimization, 1(3) (2014), pp 123-231.
"""
if not isinstance(L, Operator):
raise TypeError('`op` {!r} is not an `Operator` instance'
''.format(L))
if x not in L.domain:
raise OpDomainError('`x` {!r} is not in the domain of `op` {!r}'
''.format(x, L.domain))
tau, tau_in = float(tau), tau
if tau <= 0:
raise ValueError('`tau` must be positive, got {}'.format(tau_in))
sigma, sigma_in = float(sigma), sigma
if sigma <= 0:
raise ValueError('`sigma` must be positive, got {}'.format(sigma_in))
niter, niter_in = int(niter), niter
if niter < 0 or niter != niter_in:
raise ValueError('`niter` must be a non-negative integer, got {}'
''.format(niter_in))
# Callback object
callback = kwargs.pop('callback', None)
if callback is not None and not callable(callback):
raise TypeError('`callback` {} is not callable'.format(callback))
# Initialize range variables
z = L.range.zero()
u = L.range.zero()
# Temporary for Lx + u [- z]
tmp_ran = L(x)
# Temporary for L^*(Lx + u - z)
tmp_dom = L.domain.element()
# Store proximals since their initialization may involve computation
prox_tau_f = f.proximal(tau)
prox_sigma_g = g.proximal(sigma)
for _ in range(niter):
# tmp_ran has value Lx^k here
# tmp_dom <- L^*(Lx^k + u^k - z^k)
tmp_ran += u
tmp_ran -= z
L.adjoint(tmp_ran, out=tmp_dom)
# x <- x^k - (tau/sigma) L^*(Lx^k + u^k - z^k)
x.lincomb(1, x, -tau / sigma, tmp_dom)
# x^(k+1) <- prox[tau*f](x)
prox_tau_f(x, out=x)
# tmp_ran <- Lx^(k+1)
L(x, out=tmp_ran)
# z^(k+1) <- prox[sigma*g](Lx^(k+1) + u^k)
prox_sigma_g(tmp_ran + u, out=z) # 1 copy here
# u^(k+1) = u^k + Lx^(k+1) - z^(k+1)
u += tmp_ran
u -= z
if callback is not None:
callback(x)
def admm_linearized_simple(x, f, g, L, tau, sigma, niter, **kwargs):
"""Non-optimized version of ``admm_linearized``.
This function is intended for debugging. It makes a lot of copies and
performs no error checking.
"""
callback = kwargs.pop('callback', None)
z = L.range.zero()
u = L.range.zero()
for _ in range(niter):
x[:] = f.proximal(tau)(x - tau / sigma * L.adjoint(L(x) + u - z))
z = g.proximal(sigma)(L(x) + u)
u = L(x) + u - z
if callback is not None:
callback(x)
| {
"repo_name": "aringh/odl",
"path": "odl/solvers/nonsmooth/admm.py",
"copies": "1",
"size": "5250",
"license": "mpl-2.0",
"hash": -3276875268114099000,
"line_mean": 31.2085889571,
"line_max": 77,
"alpha_frac": 0.5643809524,
"autogenerated": false,
"ratio": 3.337571519389701,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9397089677668065,
"avg_score": 0.0009725588243273821,
"num_lines": 163
} |
""" Alternating Least Squares for Collaborative Filtering
"""
# Author: Vladimir Larin <vladimir@vlarine.ru>
# License: MIT
import numpy as np
import scipy.sparse as sp
import six
GOT_NUMBA = True
try:
from pyrecsys._polara.lib.hosvd import tucker_als
except ImportError:
GOT_NUMBA = False
__all__ = ['ALS', ]
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class ALS():
""" Alternating Least Squares for Collaborative Filtering
For now supports implicit ALS only.
Parameters
----------
n_components: int, optional, defalult: 15
The number of components for factorisation.
lambda_: float, optional, dedault: 0.01
The regularisation parameter in ALS.
alpha: int, optional, default: 15
The parameter associated with the confidence matrix
in the implicit ALS algorithm.
n_iter: int, optional, default: 20
The number of iterations of the ALS algorithm.
method: 'implicit' | 'explicit' | 'polara', default: 'implicit'
The ALS method. For now supports implicit ALS only.
rank: int, optional, default: 5
Polara-specific. Base rating rank.
growth_tol: float, optional, dedault: 0.0001
Polara-specific. Threshold for early stopping.
mlrank: (int, int, int), optional, default: (13, 10, 2)
Polara-specific. Tuple of model ranks.
n_jobs: int, optional, default: 1
The number of jobs to use for computation.
For now supports 1 job only.
random_state: int seed or None (default)
Random number generator seed.
verbose: int, optional (default=0)
Controls the verbosity of the model building process.
References
----------
Collaborative Filtering for Implicit Feedback Datasets.
Yifan Hu. AT&T Labs – Research. Florham Park, NJ 07932.
Yehuda Koren. Yahoo! Research.
http://yifanhu.net/PUB/cf.pdf
Ben Frederickson. Fast Python Collaborative Filtering
for Implicit Datasets.
https://github.com/benfred/implicit
Evgeny Frolov, Ivan Oseledets. Fifty Shades of Ratings: How to Benefit
from a Negative Feedback in Top-N Recommendations Tasks.
https://arxiv.org/abs/1607.04228
https://github.com/Evfro/polara
"""
def __init__(self, n_components=15, lambda_=0.01, alpha=15, n_iter=20,
method='implicit', n_jobs=1, rank=5, growth_tol=0.0001,
mlrank=(13, 10, 2), random_state=None, verbose=0):
self.n_components = n_components
self.lambda_ = lambda_
self.alpha = alpha
self.n_iter = n_iter
self.method = method
self.n_jobs = n_jobs
self.random_state = random_state
self.rank = rank
self.mlrank = mlrank
self.growth_tol = growth_tol
self.verbose = verbose
self._eps = 0.0000001 # Small value for evoid a division by zero
def fit(self, X):
"""Learn an ALS model.
Parameters
----------
X: {array-like, sparse matrix}, shape (n_rows, n_columns)
Data matrix to learn a model.
Returns
-------
self : object
Returns the instance itself.
"""
if self.method == 'implicit':
self._als(X * self.alpha)
elif self.method == 'explicit':
self._als(X)
elif self.method == 'polara':
if GOT_NUMBA:
self._polara_als(X)
else:
raise ImportError('Numba is not installed')
else:
raise NotImplementedError('Method {} is not implemented.'.format(self.method))
return self
def predict(self, X):
"""Learn an ALS model.
Parameters
----------
X: iterable with two integers
Pairs of row index, column index to predict.
Returns
-------
pred : array
Returns array of predictions.
"""
pred = []
if self.method == 'polara':
u, v, w, c = self.rows_, self.columns_, self.feedback_factors_, self.core_
for item in X:
i = item[0]
j = item[1]
if i < u.shape[0] and j < v.shape[0]:
p = v[j, :].dot(c.T.dot(u[i, :]).T).dot(w.T).argmax()
else:
p = (self.rank - 1) / 2
p = p * (self.x_max_ - self.x_min_) / (self.rank + self._eps) + self.x_min_
pred.append(p)
else:
for item in X:
i = item[0]
j = item[1]
pred.append(self.rows_[i, :].dot(self.columns_[j, :]))
return np.array(pred)
def _nonzeros(self, m, row):
""" returns the non zeroes of a row in csr_matrix """
for index in range(m.indptr[row], m.indptr[row+1]):
yield m.indices[index], m.data[index]
def _als(self, Cui):
dtype = np.float64
self.n_rows_, self.n_columns_ = Cui.shape
if self.random_state is not None:
np.random.seed(self.random_state)
self.rows_ = np.random.rand(self.n_rows_, self.n_components).astype(dtype) * 0.01
self.columns_ = np.random.rand(self.n_columns_, self.n_components).astype(dtype) * 0.01
Cui, Ciu = Cui.tocsr(), Cui.T.tocsr()
if self.method == 'implicit':
solver = self._implicit_least_squares
elif self.method == 'explicit':
solver = self._explicit_least_squares
else:
raise NotImplementedError('Method {} is not implemented.'.format(self.method))
for iteration in range(self.n_iter):
solver(Cui, self.rows_, self.columns_, self.lambda_)
solver(Ciu, self.columns_, self.rows_, self.lambda_)
def _polara_als(self, Cui):
Cui = sp.coo_matrix(Cui)
self.x_min_ = Cui.data.min()
self.x_max_ = Cui.data.max()
Cui.data -= self.x_min_
if self.x_max_ > self.x_min_:
Cui.data /= (self.x_max_ - self.x_min_)
Cui.data *= (self.rank - self._eps)
Cui = np.ascontiguousarray(np.transpose(np.array((Cui.row, Cui.col, Cui.data), dtype=np.int64)))
shp = tuple(Cui.max(axis=0) + 1)
val = np.ascontiguousarray(np.ones(Cui.shape[0], ))
users_factors, items_factors, feedback_factors, core = \
tucker_als(Cui, val, shp, self.mlrank,
growth_tol=self.growth_tol,
iters=self.n_iter,
batch_run=False if self.verbose else True)
self.rows_ = users_factors
self.columns_ = items_factors
self.feedback_factors_ = feedback_factors
self.core_ = core
def _explicit_least_squares(self, Cui, X, Y, regularization):
users, factors = X.shape
YtY = Y.T.dot(Y)
for u in range(users):
# accumulate YtCuY + regularization*I in A
A = YtY + regularization * np.eye(factors)
# accumulate YtCuPu in b
b = np.zeros(factors)
for i, confidence in self._nonzeros(Cui, u):
factor = Y[i]
b += confidence * factor
X[u] = np.linalg.solve(A, b)
def _implicit_least_squares_(self, Cui, X, Y, regularization):
users, factors = X.shape
YtY = Y.T.dot(Y)
for u in range(users):
indexes = [x[0] for x in self._nonzeros(Cui, u)]
if len(indexes) > 0:
Hix = Y[indexes, :]
M = YtY + self.alpha * Hix.T.dot(Hix) + np.diag(self.lambda_ * np.eye(factors))
X[u] = np.dot(np.linalg.inv(M), (1 + self.alpha) * Hix.sum(axis=0))
else:
X[u] = np.zeros(factors)
def _implicit_least_squares(self, Cui, X, Y, regularization):
""" For each user in Cui, calculate factors Xu for them
using least squares on Y.
"""
users, factors = X.shape
YtY = Y.T.dot(Y)
for u in range(users):
# accumulate YtCuY + regularization*I in A
A = YtY + regularization * np.eye(factors)
# accumulate YtCuPu in b
b = np.zeros(factors)
for i, confidence in self._nonzeros(Cui, u):
factor = Y[i]
A += (confidence - 1) * np.outer(factor, factor)
b += confidence * factor
# Xu = (YtCuY + regularization * I)^-1 (YtCuPu)
X[u] = np.linalg.solve(A, b)
def get_params(self):
"""Get parameters for this model.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
names = ['n_components', 'lambda_', 'alpha', 'n_iter',\
'method', 'n_jobs', 'random_state', 'verbose']
if self.method == 'polara':
names += ['rank', 'mlrank', 'growth_tol']
for key in names:
out[key] = getattr(self, key, None)
return out
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(),
offset=len(class_name),),)
| {
"repo_name": "vlarine/pyrecsys",
"path": "pyrecsys/collaborative_filtering.py",
"copies": "1",
"size": "11093",
"license": "mit",
"hash": -6014215116501120000,
"line_mean": 31.2412790698,
"line_max": 104,
"alpha_frac": 0.5402578667,
"autogenerated": false,
"ratio": 3.677387267904509,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9698493604274923,
"avg_score": 0.0038303060659173435,
"num_lines": 344
} |
# Alternative deferred-based API
# TODO: If we use this module do we still need Process?
# TODO: errbacks are not actually used. Isn't that weird?
# TODO: Using this, the operations are scheduled before entering "yield"
# (before process gives up control), whereas with Process runner, the
# operations are scheduled after entering "yield" (after process gives up
# control). Which one is better?
from twisted.internet.defer import Deferred, returnValue, inlineCallbacks
from csp.impl import dispatch
from csp.impl.channels import ManyToManyChannel as Channel, CLOSED
from csp.impl.channels import take_then_callback, put_then_callback
from csp.impl.select import do_alts
def put(channel, value):
d = Deferred()
put_then_callback(channel, value, d.callback)
return d
def take(channel):
d = Deferred()
take_then_callback(channel, d.callback)
return d
def alts(operations, priority=False, default=None):
d = Deferred()
do_alts(operations, d.callback, priority=priority, default=default)
return d
def sleep(seconds):
d = Deferred()
dispatch.queue_delay(lambda: d.callback(None), seconds)
return d
def stop(value=None):
returnValue(value)
def go_channel(f, *args, **kwargs):
f1 = inlineCallbacks(f)
d = f1(*args, **kwargs)
channel = Channel(1)
def done(value):
if value == CLOSED:
channel.close()
else:
put_then_callback(channel, value, lambda ok: channel.close())
d.addBoth(done)
return channel
def go_deferred(f, *args, **kwargs):
f1 = inlineCallbacks(f)
return f1(*args, **kwargs)
go = go_deferred
# Decorators
def process_channel(f):
def returning_channel(*args, **kwargs):
return go_channel(f, *args, **kwargs)
return returning_channel
process_deferred = inlineCallbacks
process = process_deferred
| {
"repo_name": "ubolonton/twisted-csp",
"path": "csp/defer.py",
"copies": "1",
"size": "1867",
"license": "epl-1.0",
"hash": -2028937607251300400,
"line_mean": 22.3375,
"line_max": 73,
"alpha_frac": 0.6946973755,
"autogenerated": false,
"ratio": 3.5766283524904217,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4771325727990422,
"avg_score": null,
"num_lines": null
} |
"""Alternative implementation of Beancount's Inventory."""
from typing import Any
from typing import Callable
from typing import Dict
from typing import Optional
from typing import Tuple
from beancount.core.amount import Amount
from beancount.core.number import Decimal
from beancount.core.number import ZERO
from beancount.core.position import Cost
from beancount.core.position import Position
InventoryKey = Tuple[str, Optional[Cost]]
class SimpleCounterInventory(Dict[str, Decimal]):
"""A simple inventory mapping just strings to numbers."""
def is_empty(self) -> bool:
"""Check if the inventory is empty."""
return not bool(self)
def add(self, key: str, number: Decimal) -> None:
"""Add a number to key."""
new_num = number + self.get(key, ZERO)
if new_num == ZERO:
self.pop(key, None)
else:
self[key] = new_num
class CounterInventory(Dict[InventoryKey, Decimal]):
"""A lightweight inventory.
This is intended as a faster alternative to Beancount's Inventory class.
Due to not using a list, for inventories with a lot of different positions,
inserting is much faster.
The keys should be tuples ``(currency, cost)``.
"""
def is_empty(self) -> bool:
"""Check if the inventory is empty."""
return not bool(self)
def add(self, key: InventoryKey, number: Decimal) -> None:
"""Add a number to key."""
new_num = number + self.get(key, ZERO)
if new_num == ZERO:
self.pop(key, None)
else:
self[key] = new_num
def reduce(
self, reducer: Callable[..., Amount], *args: Any
) -> SimpleCounterInventory:
"""Reduce inventory.
Note that this returns a simple :class:`CounterInventory` with just
currencies as keys.
"""
counter = SimpleCounterInventory()
for (currency, cost), number in self.items():
pos = Position(Amount(number, currency), cost)
amount = reducer(pos, *args)
assert amount.number is not None
counter.add(amount.currency, amount.number)
return counter
def add_amount(self, amount: Amount, cost: Optional[Cost] = None) -> None:
"""Add an Amount to the inventory."""
assert amount.number is not None
key = (amount.currency, cost)
self.add(key, amount.number)
def add_position(self, pos: Position) -> None:
"""Add a Position or Posting to the inventory."""
self.add_amount(pos.units, pos.cost)
def __neg__(self) -> "CounterInventory":
return CounterInventory({key: -num for key, num in self.items()})
def __add__(self, other: "CounterInventory") -> "CounterInventory":
counter = CounterInventory(self)
counter.add_inventory(other)
return counter
def add_inventory(self, counter: "CounterInventory") -> None:
"""Add another :class:`CounterInventory`."""
if not self:
self.update(counter)
else:
self_get = self.get
for key, num in counter.items():
new_num = num + self_get(key, ZERO)
if new_num == ZERO:
self.pop(key, None)
else:
self[key] = new_num
| {
"repo_name": "yagebu/fava",
"path": "src/fava/core/inventory.py",
"copies": "2",
"size": "3325",
"license": "mit",
"hash": 2657367274862171000,
"line_mean": 31.9207920792,
"line_max": 79,
"alpha_frac": 0.6108270677,
"autogenerated": false,
"ratio": 4.182389937106918,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5793217004806919,
"avg_score": null,
"num_lines": null
} |
"""Alternative implementation of Beancount's Inventory."""
from typing import Dict
from typing import Optional
from typing import Tuple
from beancount.core.amount import Amount
from beancount.core.number import Decimal
from beancount.core.number import ZERO
from beancount.core.position import Position
InventoryKey = Tuple[str, Optional[str]]
class CounterInventory(Dict[InventoryKey, Decimal]):
"""A lightweight inventory.
This is intended as a faster alternative to Beancount's Inventory class.
Due to not using a list, for inventories with a lot of different positions,
inserting is much faster.
The keys should be tuples ``(currency, cost)``.
"""
# False positive due to use of the typing.Dict base instead of dict
# pylint: disable=no-member,unsupported-assignment-operation
def is_empty(self) -> bool:
"""Check if the inventory is empty."""
return not bool(self)
def add(self, key: InventoryKey, number: Decimal) -> None:
"""Add a number to key."""
new_num = number + self.get(key, ZERO)
if new_num == ZERO:
self.pop(key, None)
else:
self[key] = new_num
def reduce(self, reducer, *args) -> "CounterInventory":
"""Reduce inventory.
Note that this returns a simple :class:`CounterInventory` with just
currencies as keys.
"""
counter = CounterInventory()
for (currency, cost), number in self.items():
pos = Position(Amount(number, currency), cost)
amount = reducer(pos, *args)
counter.add(amount.currency, amount.number)
return counter
def add_amount(self, amount, cost=None):
"""Add an Amount to the inventory."""
key = (amount.currency, cost)
self.add(key, amount.number)
def add_position(self, pos):
"""Add a Position or Posting to the inventory."""
self.add_amount(pos.units, pos.cost)
def __neg__(self) -> "CounterInventory":
return CounterInventory({key: -num for key, num in self.items()})
def __add__(self, other) -> "CounterInventory":
counter = CounterInventory(self)
counter.add_inventory(other)
return counter
def add_inventory(self, counter: "CounterInventory") -> None:
"""Add another :class:`CounterInventory`."""
if not self:
self.update(counter)
else:
self_get = self.get
for key, num in counter.items():
new_num = num + self_get(key, ZERO)
if new_num == ZERO:
self.pop(key, None)
else:
self[key] = new_num
| {
"repo_name": "aumayr/beancount-web",
"path": "src/fava/core/inventory.py",
"copies": "1",
"size": "2693",
"license": "mit",
"hash": -4273038728112002000,
"line_mean": 32.2469135802,
"line_max": 79,
"alpha_frac": 0.6167842555,
"autogenerated": false,
"ratio": 4.201248049921997,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5318032305421997,
"avg_score": null,
"num_lines": null
} |
"Alternative methods of calculating moving window statistics."
import warnings
import numpy as np
__all__ = [
"move_sum",
"move_mean",
"move_std",
"move_var",
"move_min",
"move_max",
"move_argmin",
"move_argmax",
"move_median",
"move_rank",
]
def move_sum(a, window, min_count=None, axis=-1):
"Slow move_sum for unaccelerated dtype"
return move_func(np.nansum, a, window, min_count, axis=axis)
def move_mean(a, window, min_count=None, axis=-1):
"Slow move_mean for unaccelerated dtype"
return move_func(np.nanmean, a, window, min_count, axis=axis)
def move_std(a, window, min_count=None, axis=-1, ddof=0):
"Slow move_std for unaccelerated dtype"
return move_func(np.nanstd, a, window, min_count, axis=axis, ddof=ddof)
def move_var(a, window, min_count=None, axis=-1, ddof=0):
"Slow move_var for unaccelerated dtype"
return move_func(np.nanvar, a, window, min_count, axis=axis, ddof=ddof)
def move_min(a, window, min_count=None, axis=-1):
"Slow move_min for unaccelerated dtype"
return move_func(np.nanmin, a, window, min_count, axis=axis)
def move_max(a, window, min_count=None, axis=-1):
"Slow move_max for unaccelerated dtype"
return move_func(np.nanmax, a, window, min_count, axis=axis)
def move_argmin(a, window, min_count=None, axis=-1):
"Slow move_argmin for unaccelerated dtype"
def argmin(a, axis):
a = np.array(a, copy=False)
flip = [slice(None)] * a.ndim
flip[axis] = slice(None, None, -1)
a = a[flip] # if tie, pick index of rightmost tie
try:
idx = np.nanargmin(a, axis=axis)
except ValueError:
# an all nan slice encountered
a = a.copy()
mask = np.isnan(a)
np.copyto(a, np.inf, where=mask)
idx = np.argmin(a, axis=axis).astype(np.float64)
if idx.ndim == 0:
idx = np.nan
else:
mask = np.all(mask, axis=axis)
idx[mask] = np.nan
return idx
return move_func(argmin, a, window, min_count, axis=axis)
def move_argmax(a, window, min_count=None, axis=-1):
"Slow move_argmax for unaccelerated dtype"
def argmax(a, axis):
a = np.array(a, copy=False)
flip = [slice(None)] * a.ndim
flip[axis] = slice(None, None, -1)
a = a[flip] # if tie, pick index of rightmost tie
try:
idx = np.nanargmax(a, axis=axis)
except ValueError:
# an all nan slice encountered
a = a.copy()
mask = np.isnan(a)
np.copyto(a, -np.inf, where=mask)
idx = np.argmax(a, axis=axis).astype(np.float64)
if idx.ndim == 0:
idx = np.nan
else:
mask = np.all(mask, axis=axis)
idx[mask] = np.nan
return idx
return move_func(argmax, a, window, min_count, axis=axis)
def move_median(a, window, min_count=None, axis=-1):
"Slow move_median for unaccelerated dtype"
return move_func(np.nanmedian, a, window, min_count, axis=axis)
def move_rank(a, window, min_count=None, axis=-1):
"Slow move_rank for unaccelerated dtype"
return move_func(lastrank, a, window, min_count, axis=axis)
# magic utility functions ---------------------------------------------------
def move_func(func, a, window, min_count=None, axis=-1, **kwargs):
"Generic moving window function implemented with a python loop."
a = np.array(a, copy=False)
if min_count is None:
mc = window
else:
mc = min_count
if mc > window:
msg = "min_count (%d) cannot be greater than window (%d)"
raise ValueError(msg % (mc, window))
elif mc <= 0:
raise ValueError("`min_count` must be greater than zero.")
if a.ndim == 0:
raise ValueError("moving window functions require ndim > 0")
if axis is None:
raise ValueError("An `axis` value of None is not supported.")
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > a.shape[axis]:
raise ValueError("`window` is too long.")
if issubclass(a.dtype.type, np.inexact):
y = np.empty_like(a)
else:
y = np.empty(a.shape)
idx1 = [slice(None)] * a.ndim
idx2 = list(idx1)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for i in range(a.shape[axis]):
win = min(window, i + 1)
idx1[axis] = slice(i + 1 - win, i + 1)
idx2[axis] = i
y[tuple(idx2)] = func(a[tuple(idx1)], axis=axis, **kwargs)
idx = _mask(a, window, mc, axis)
y[idx] = np.nan
return y
def _mask(a, window, min_count, axis):
n = (a == a).cumsum(axis)
idx1 = [slice(None)] * a.ndim
idx2 = [slice(None)] * a.ndim
idx3 = [slice(None)] * a.ndim
idx1[axis] = slice(window, None)
idx2[axis] = slice(None, -window)
idx3[axis] = slice(None, window)
idx1 = tuple(idx1)
idx2 = tuple(idx2)
idx3 = tuple(idx3)
nidx1 = n[idx1]
nidx1 = nidx1 - n[idx2]
idx = np.empty(a.shape, dtype=np.bool)
idx[idx1] = nidx1 < min_count
idx[idx3] = n[idx3] < min_count
return idx
# ---------------------------------------------------------------------------
def lastrank(a, axis=-1):
"""
The ranking of the last element along the axis, ignoring NaNs.
The ranking is normalized to be between -1 and 1 instead of the more
common 1 and N. The results are adjusted for ties.
Parameters
----------
a : ndarray
Input array. If `a` is not an array, a conversion is attempted.
axis : int, optional
The axis over which to rank. By default (axis=-1) the ranking
(and reducing) is performed over the last axis.
Returns
-------
d : array
In the case of, for example, a 2d array of shape (n, m) and
axis=1, the output will contain the rank (normalized to be between
-1 and 1 and adjusted for ties) of the the last element of each row.
The output in this example will have shape (n,).
Examples
--------
Create an array:
>>> y1 = larry([1, 2, 3])
What is the rank of the last element (the value 3 in this example)?
It is the largest element so the rank is 1.0:
>>> import numpy as np
>>> from la.afunc import lastrank
>>> x1 = np.array([1, 2, 3])
>>> lastrank(x1)
1.0
Now let's try an example where the last element has the smallest
value:
>>> x2 = np.array([3, 2, 1])
>>> lastrank(x2)
-1.0
Here's an example where the last element is not the minimum or maximum
value:
>>> x3 = np.array([1, 3, 4, 5, 2])
>>> lastrank(x3)
-0.5
"""
a = np.array(a, copy=False)
ndim = a.ndim
if a.size == 0:
# At least one dimension has length 0
shape = list(a.shape)
shape.pop(axis)
r = np.empty(shape, dtype=a.dtype)
r.fill(np.nan)
if (r.ndim == 0) and (r.size == 1):
r = np.nan
return r
indlast = [slice(None)] * ndim
indlast[axis] = slice(-1, None)
indlast = tuple(indlast)
indlast2 = [slice(None)] * ndim
indlast2[axis] = -1
indlast2 = tuple(indlast2)
n = (~np.isnan(a)).sum(axis)
a_indlast = a[indlast]
g = (a_indlast > a).sum(axis)
e = (a_indlast == a).sum(axis)
r = (g + g + e - 1.0) / 2.0
r = r / (n - 1.0)
r = 2.0 * (r - 0.5)
if ndim == 1:
if n == 1:
r = 0
if np.isnan(a[indlast2]): # elif?
r = np.nan
else:
np.putmask(r, n == 1, 0)
np.putmask(r, np.isnan(a[indlast2]), np.nan)
return r
| {
"repo_name": "kwgoodman/bottleneck",
"path": "bottleneck/slow/move.py",
"copies": "1",
"size": "7756",
"license": "bsd-2-clause",
"hash": -1871236977593894400,
"line_mean": 28.7164750958,
"line_max": 77,
"alpha_frac": 0.5613718412,
"autogenerated": false,
"ratio": 3.223607647547797,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9284979488747798,
"avg_score": 0,
"num_lines": 261
} |
# Alternative:
# $ inotifywait -e CLOSE_WRITE -m /tmp
# Setting up watches.
# Watches established.
# /tmp/ CLOSE_WRITE,CLOSE ok
# /tmp/ CLOSE_WRITE,CLOSE ok
# /tmp/ CLOSE_WRITE,CLOSE ok
import logging
import argparse
import os
import signal
import sys
import inotify.adapters
def handler(signum, frame):
sys.exit(0)
_DEFAULT_LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
_LOGGER = logging.getLogger(__name__)
def _configure_logging():
_LOGGER.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter(_DEFAULT_LOG_FORMAT)
ch.setFormatter(formatter)
_LOGGER.addHandler(ch)
def _main():
parser = argparse.ArgumentParser()
parser.add_argument("--outdir", default=os.path.expanduser("~/run2"))
args = parser.parse_args()
signal.signal(signal.SIGUSR1, handler)
s = args.outdir.encode("UTF-8")
i = inotify.adapters.Inotify()
i.add_watch(s, mask=8)
try:
for event in i.event_gen():
if event is not None:
(header, type_names, watch_path, filename) = event
_LOGGER.info("WD=(%d) MASK=(%d) COOKIE=(%d) LEN=(%d) MASK->NAMES=%s "
"WATCH-PATH=[%s] FILENAME=[%s]",
header.wd, header.mask, header.cookie, header.len, type_names,
watch_path.decode('utf-8'), filename.decode('utf-8'))
finally:
i.remove_watch(s)
if __name__ == '__main__':
_configure_logging()
_main()
| {
"repo_name": "danblick/robocar",
"path": "scripts/inotify_example.py",
"copies": "1",
"size": "1529",
"license": "mit",
"hash": -2761533047885023000,
"line_mean": 23.6612903226,
"line_max": 91,
"alpha_frac": 0.6030085023,
"autogenerated": false,
"ratio": 3.2881720430107526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4391180545310752,
"avg_score": null,
"num_lines": null
} |
"""Alternative output generators for ELC endpoints."""
def type_bibjson(data):
"""Format BibJSON return from list of standard JSON objects."""
refs_bibjson = list()
for rec in data:
bib = dict()
# Add basic reference info
bib.update(type=rec.get('kind'),
year=rec.get('year'),
title=rec.get('title'),
citation=rec.get('cite'))
# Add info specific to the journal or book
bib.update(journal=[{'name': rec.get('journal'),
'volume': rec.get('vol'),
'pages': rec.get('pages'),
'editor': rec.get('editor')}])
# Add rec locator IDs
bib.update(identifier=[{'type': 'doi',
'id': rec.get('doi')},
{'type': 'db_index',
'id': rec.get('ref_id')}])
# Sequentially add authors to the authors block
bib.update(author=[])
for author in rec.get('authors'):
bib['author'].append({'name': author})
refs_bibjson.append(bib)
return refs_bibjson
def type_ris(data):
"""Format RIS return from list of standard JSON objects."""
import yaml
with open('swagger_server/lookup/ris_mapping.yaml') as f:
ris_type = yaml.safe_load(f)
ris = list()
for rec in data:
if rec.get('kind'):
ris.append('TY - {0:s}\n'
.format(ris_type.get(rec['kind'].lower(), '')))
else:
ris.append('TY - \n')
if rec.get('authors'):
for author in rec['authors']:
ris.append('AU - {0:s}\n'.format(author))
if rec.get('year'):
ris.append('YR - {0:s}//\n'.format(str(rec['year'])))
if rec.get('title'):
ris.append('TI - {0:s}\n'.format(rec['title']))
if rec.get('journal'):
ris.append('JF - {0:s}\n'.format(rec['journal']))
if rec.get('vol_no'):
ris.append('VL - {0:s}\n'.format(rec['vol_no']))
if rec.get('page_range'):
pages = [x.strip() for x in rec['page_range'].split('-')]
ris.append('SP - {0:s}\n'.format(str(pages[0])))
if len(pages) == 2:
ris.append('EP - {0:s}\n'.format(str(pages[1])))
if rec.get('editor'):
ris.append('ED - {0:s}\n'.format(rec['editor']))
if rec.get('publisher'):
ris.append('PB - {0:s}\n'.format(str(rec['publisher'])))
if rec.get('place'):
ris.append('PP - {0:s}\n'.format(str(rec['place'])))
if rec.get('doi'):
ris.append('DO - {0:s}\n'.format(rec['doi']))
if rec.get('cite'):
ris.append('CP - {0:s}\n'.format(rec['cite']))
if rec.get('ref_id'):
ris.append('ID - {0:s}\n'.format(rec['ref_id']))
ris.append('ER -\n\n')
return ris
def type_csv(data):
"""Format CSV return from list of standard JSON objects."""
import io
import csv
tab_data = list()
mem_file = io.StringIO()
writer = csv.writer(mem_file)
headers = sorted(data[0].keys())
row = [key for key in headers]
writer.writerow(row)
tab_data.append(mem_file.getvalue())
for rec in data:
mem_file = io.StringIO()
writer = csv.writer(mem_file)
if 'authors' in rec:
rec.update(authors=', '.join(rec['authors']))
row = [rec[key] for key in headers]
writer.writerow(row)
tab_data.append(mem_file.getvalue())
return tab_data
| {
"repo_name": "EarthLifeConsortium/elc_api",
"path": "swagger_server/elc/formatter.py",
"copies": "1",
"size": "3668",
"license": "apache-2.0",
"hash": 7015047831977816000,
"line_mean": 26.7878787879,
"line_max": 70,
"alpha_frac": 0.4923664122,
"autogenerated": false,
"ratio": 3.5508228460793805,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9543189258279381,
"avg_score": 0,
"num_lines": 132
} |
"""Alternative "tab node creator thingy" for The Foundry's Nuke
homepage: https://github.com/dbr/tabtabtab-nuke
license: http://unlicense.org/
"""
__version__ = "1.8"
import os
import sys
try:
from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtCore import Qt
except ImportError:
try:
from PySide import QtCore, QtGui, QtGui as QtWidgets
from PySide.QtCore import Qt
except ImportError:
import sip
for mod in ("QDate", "QDateTime", "QString", "QTextStream", "QTime", "QUrl", "QVariant"):
sip.setapi(mod, 2)
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import Qt
QtCore.Signal = QtCore.pyqtSignal
def find_menu_items(menu, _path = None):
"""Extracts items from a given Nuke menu
Returns a list of strings, with the path to each item
Ignores divider lines and hidden items (ones like "@;&CopyBranch" for shift+k)
>>> found = find_menu_items(nuke.menu("Nodes"))
>>> found.sort()
>>> found[:5]
['3D/Axis', '3D/Camera', '3D/CameraTracker', '3D/DepthGenerator', '3D/Geometry/Card']
"""
import nuke
found = []
mi = menu.items()
for i in mi:
if isinstance(i, nuke.Menu):
# Sub-menu, recurse
mname = i.name().replace("&", "")
subpath = "/".join(x for x in (_path, mname) if x is not None)
if "ToolSets/Delete" in subpath:
# Remove all ToolSets delete commands
continue
sub_found = find_menu_items(menu = i, _path = subpath)
found.extend(sub_found)
elif isinstance(i, nuke.MenuItem):
if i.name() == "":
# Skip dividers
continue
if i.name().startswith("@;"):
# Skip hidden items
continue
subpath = "/".join(x for x in (_path, i.name()) if x is not None)
found.append({'menuobj': i, 'menupath': subpath})
return found
def nonconsec_find(needle, haystack, anchored = False):
"""checks if each character of "needle" can be found in order (but not
necessarily consecutivly) in haystack.
For example, "mm" can be found in "matchmove", but not "move2d"
"m2" can be found in "move2d", but not "matchmove"
>>> nonconsec_find("m2", "move2d")
True
>>> nonconsec_find("m2", "matchmove")
False
Anchored ensures the first letter matches
>>> nonconsec_find("atch", "matchmove", anchored = False)
True
>>> nonconsec_find("atch", "matchmove", anchored = True)
False
>>> nonconsec_find("match", "matchmove", anchored = True)
True
If needle starts with a string, non-consecutive searching is disabled:
>>> nonconsec_find(" mt", "matchmove", anchored = True)
False
>>> nonconsec_find(" ma", "matchmove", anchored = True)
True
>>> nonconsec_find(" oe", "matchmove", anchored = False)
False
>>> nonconsec_find(" ov", "matchmove", anchored = False)
True
"""
if "[" not in needle:
haystack = haystack.rpartition(" [")[0]
if len(haystack) == 0 and len(needle) > 0:
# "a" is not in ""
return False
elif len(needle) == 0 and len(haystack) > 0:
# "" is in "blah"
return True
elif len(needle) == 0 and len(haystack) == 0:
# ..?
return True
# Turn haystack into list of characters (as strings are immutable)
haystack = [hay for hay in str(haystack)]
if needle.startswith(" "):
# "[space]abc" does consecutive search for "abc" in "abcdef"
if anchored:
if "".join(haystack).startswith(needle.lstrip(" ")):
return True
else:
if needle.lstrip(" ") in "".join(haystack):
return True
if anchored:
if needle[0] != haystack[0]:
return False
else:
# First letter matches, remove it for further matches
needle = needle[1:]
del haystack[0]
for needle_atom in needle:
try:
needle_pos = haystack.index(needle_atom)
except ValueError:
return False
else:
# Dont find string in same pos or backwards again
del haystack[:needle_pos + 1]
return True
class NodeWeights(object):
def __init__(self, fname = None):
self.fname = fname
self._weights = {}
self._successful_load = False
def load(self):
if self.fname is None:
return
def _load_internal():
import json
if not os.path.isfile(self.fname):
print "Weight file does not exist"
return
f = open(self.fname)
self._weights = json.load(f)
f.close()
# Catch any errors, print traceback and continue
try:
_load_internal()
self._successful_load = True
except Exception:
print "Error loading node weights"
import traceback
traceback.print_exc()
self._successful_load = False
def save(self):
if self.fname is None:
print "Not saving node weights, no file specified"
return
if not self._successful_load:
# Avoid clobbering existing weights file on load error
print "Not writing weights file because %r previously failed to load" % (
self.fname)
return
def _save_internal():
import json
ndir = os.path.dirname(self.fname)
if not os.path.isdir(ndir):
try:
os.makedirs(ndir)
except OSError, e:
if e.errno != 17: # errno 17 is "already exists"
raise
f = open(self.fname, "w")
# TODO: Limit number of saved items to some sane number
json.dump(self._weights, fp = f)
f.close()
# Catch any errors, print traceback and continue
try:
_save_internal()
except Exception:
print "Error saving node weights"
import traceback
traceback.print_exc()
def get(self, k, default = 0):
if len(self._weights.values()) == 0:
maxval = 1.0
else:
maxval = max(self._weights.values())
maxval = max(1, maxval)
maxval = float(maxval)
return self._weights.get(k, default) / maxval
def increment(self, key):
self._weights.setdefault(key, 0)
self._weights[key] += 1
class NodeModel(QtCore.QAbstractListModel):
def __init__(self, mlist, weights, num_items = 15, filtertext = ""):
super(NodeModel, self).__init__()
self.weights = weights
self.num_items = num_items
self._all = mlist
self._filtertext = filtertext
# _items is the list of objects to be shown, update sets this
self._items = []
self.update()
def set_filter(self, filtertext):
self._filtertext = filtertext
self.update()
def update(self):
filtertext = self._filtertext.lower()
# Two spaces as a shortcut for [
filtertext = filtertext.replace(" ", "[")
scored = []
for n in self._all:
# Turn "3D/Shader/Phong" into "Phong [3D/Shader]"
menupath = n['menupath'].replace("&", "")
uiname = "%s [%s]" % (menupath.rpartition("/")[2], menupath.rpartition("/")[0])
if nonconsec_find(filtertext, uiname.lower(), anchored=True):
# Matches, get weighting and add to list of stuff
score = self.weights.get(n['menupath'])
scored.append({
'text': uiname,
'menupath': n['menupath'],
'menuobj': n['menuobj'],
'score': score})
# Store based on scores (descending), then alphabetically
s = sorted(scored, key = lambda k: (-k['score'], k['text']))
self._items = s
self.modelReset.emit()
def rowCount(self, parent = QtCore.QModelIndex()):
return min(self.num_items, len(self._items))
def data(self, index, role = Qt.DisplayRole):
if role == Qt.DisplayRole:
# Return text to display
raw = self._items[index.row()]['text']
return raw
elif role == Qt.DecorationRole:
weight = self._items[index.row()]['score']
hue = 0.4
sat = weight
if index.row() % 2 == 0:
col = QtGui.QColor.fromHsvF(hue, sat, 0.9)
else:
col = QtGui.QColor.fromHsvF(hue, sat, 0.8)
pix = QtGui.QPixmap(6, 12)
pix.fill(col)
return pix
elif role == Qt.BackgroundRole:
return
weight = self._items[index.row()]['score']
hue = 0.4
sat = weight ** 2 # gamma saturation to make faster falloff
sat = min(1.0, sat)
if index.row() % 2 == 0:
return QtGui.QColor.fromHsvF(hue, sat, 0.9)
else:
return QtGui.QColor.fromHsvF(hue, sat, 0.8)
else:
# Ignore other roles
return None
def getorig(self, selected):
# TODO: Is there a way to get this via data()? There's no
# Qt.DataRole or something (only DisplayRole)
if len(selected) > 0:
# Get first selected index
selected = selected[0]
else:
# Nothing selected, get first index
selected = self.index(0)
# TODO: Maybe check for IndexError?
selected_data = self._items[selected.row()]
return selected_data
class TabyLineEdit(QtWidgets.QLineEdit):
pressed_arrow = QtCore.Signal(str)
cancelled = QtCore.Signal()
def event(self, event):
"""Make tab trigger returnPressed
Also emit signals for the up/down arrows, and escape.
"""
is_keypress = event.type() == QtCore.QEvent.KeyPress
if is_keypress and event.key() == QtCore.Qt.Key_Tab:
# Can't access tab key in keyPressedEvent
self.returnPressed.emit()
return True
elif is_keypress and event.key() == QtCore.Qt.Key_Up:
# These could be done in keyPressedEvent, but.. this is already here
self.pressed_arrow.emit("up")
return True
elif is_keypress and event.key() == QtCore.Qt.Key_Down:
self.pressed_arrow.emit("down")
return True
elif is_keypress and event.key() == QtCore.Qt.Key_Escape:
self.cancelled.emit()
return True
else:
return super(TabyLineEdit, self).event(event)
class TabTabTabWidget(QtWidgets.QDialog):
def __init__(self, on_create = None, parent = None, winflags = None):
super(TabTabTabWidget, self).__init__(parent = parent)
if winflags is not None:
self.setWindowFlags(winflags)
self.setMinimumSize(200, 300)
self.setMaximumSize(200, 300)
# Store callback
self.cb_on_create = on_create
# Input box
self.input = TabyLineEdit()
# Node weighting
self.weights = NodeWeights(os.path.expanduser("~/.nuke/tabtabtab_weights.json"))
self.weights.load() # weights.save() called in close method
import nuke
nodes = find_menu_items(nuke.menu("Nodes")) + find_menu_items(nuke.menu("Nuke"))
# List of stuff, and associated model
self.things_model = NodeModel(nodes, weights = self.weights)
self.things = QtWidgets.QListView()
self.things.setModel(self.things_model)
# Add input and items to layout
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.input)
layout.addWidget(self.things)
# Remove margins
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
# Update on text change
self.input.textChanged.connect(self.update)
# Reset selection on text change
self.input.textChanged.connect(lambda: self.move_selection(where="first"))
self.move_selection(where = "first") # Set initial selection
# Create node when enter/tab is pressed, or item is clicked
self.input.returnPressed.connect(self.create)
self.things.clicked.connect(self.create)
# When esc pressed, close
self.input.cancelled.connect(self.close)
# Up and down arrow handling
self.input.pressed_arrow.connect(self.move_selection)
def under_cursor(self):
def clamp(val, mi, ma):
return max(min(val, ma), mi)
# Get cursor position, and screen dimensions on active screen
cursor = QtGui.QCursor().pos()
screen = QtWidgets.QDesktopWidget().screenGeometry(cursor)
# Get window position so cursor is just over text input
xpos = cursor.x() - (self.width()/2)
ypos = cursor.y() - 13
# Clamp window location to prevent it going offscreen
xpos = clamp(xpos, screen.left(), screen.right() - self.width())
ypos = clamp(ypos, screen.top(), screen.bottom() - (self.height()-13))
# Move window
self.move(xpos, ypos)
def move_selection(self, where):
if where not in ["first", "up", "down"]:
raise ValueError("where should be either 'first', 'up', 'down', not %r" % (
where))
first = where == "first"
up = where == "up"
down = where == "down"
if first:
self.things.setCurrentIndex(self.things_model.index(0))
return
cur = self.things.currentIndex()
if up:
new = cur.row() - 1
if new < 0:
new = self.things_model.rowCount() - 1
elif down:
new = cur.row() + 1
count = self.things_model.rowCount()
if new > count-1:
new = 0
self.things.setCurrentIndex(self.things_model.index(new))
def event(self, event):
"""Close when window becomes inactive (click outside of window)
"""
if event.type() == QtCore.QEvent.WindowDeactivate:
self.close()
return True
else:
return super(TabTabTabWidget, self).event(event)
def update(self, text):
"""On text change, selects first item and updates filter text
"""
self.things.setCurrentIndex(self.things_model.index(0))
self.things_model.set_filter(text)
def show(self):
"""Select all the text in the input (which persists between
show()'s)
Allows typing over previously created text, and [tab][tab] to
create previously created node (instead of the most popular)
"""
# Load the weights everytime the panel is shown, to prevent
# overwritting weights from other Nuke instances
self.weights.load()
# Select all text to allow overwriting
self.input.selectAll()
self.input.setFocus()
super(TabTabTabWidget, self).show()
def close(self):
"""Save weights when closing
"""
self.weights.save()
super(TabTabTabWidget, self).close()
def create(self):
# Get selected item
selected = self.things.selectedIndexes()
if len(selected) == 0:
return
thing = self.things_model.getorig(selected)
# Store the full UI name of the created node, so it is the
# active node on the next [tab]. Prefix it with space,
# to disable substring matching
if thing['text'].startswith(" "):
prev_string = thing['text']
else:
prev_string = " %s" % thing['text']
self.input.setText(prev_string)
# Create node, increment weight and close
self.cb_on_create(thing = thing)
self.weights.increment(thing['menupath'])
self.close()
_tabtabtab_instance = None
def main():
global _tabtabtab_instance
if _tabtabtab_instance is not None:
# TODO: Is there a better way of doing this? If a
# TabTabTabWidget is instanced, it goes out of scope at end of
# function and disappers instantly. This seems like a
# reasonable "workaround"
_tabtabtab_instance.under_cursor()
_tabtabtab_instance.show()
_tabtabtab_instance.raise_()
return
def on_create(thing):
try:
thing['menuobj'].invoke()
except ImportError:
print "Error creating %s" % thing
t = TabTabTabWidget(on_create = on_create, winflags = Qt.FramelessWindowHint)
# Make dialog appear under cursor, as Nuke's builtin one does
t.under_cursor()
# Show, and make front-most window (mostly for OS X)
t.show()
t.raise_()
# Keep the TabTabTabWidget alive, but don't keep an extra
# reference to it, otherwise Nuke segfaults on exit. Hacky.
# https://github.com/dbr/tabtabtab-nuke/issues/4
import weakref
_tabtabtab_instance = weakref.proxy(t)
if __name__ == '__main__':
try:
import nuke
m_edit = nuke.menu("Nuke").findItem("Edit")
m_edit.addCommand("Tabtabtab", main, "Tab")
except ImportError:
# For testing outside Nuke
app = QtGui.QApplication(sys.argv)
main()
app.exec_()
| {
"repo_name": "dbr/tabtabtab-nuke",
"path": "tabtabtab.py",
"copies": "1",
"size": "17610",
"license": "unlicense",
"hash": 6407847452067056000,
"line_mean": 29.6260869565,
"line_max": 97,
"alpha_frac": 0.5658716638,
"autogenerated": false,
"ratio": 3.9805605786618443,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5046432242461845,
"avg_score": null,
"num_lines": null
} |
"""Alternative to reload().
This works by executing the module in a scratch namespace, and then
patching classes, methods and functions in place. This avoids the
need to patch instances. New objects are copied into the target
namespace.
Some of the many limitiations include:
- Global mutable objects other than classes are simply replaced, not patched
- Code using metaclasses is not handled correctly
- Code creating global singletons is not handled correctly
- Functions and methods using decorators (other than classmethod and
staticmethod) is not handled correctly
- Renamings are not handled correctly
- Dependent modules are not reloaded
- When a dependent module contains 'from foo import bar', and
reloading foo deletes foo.bar, the dependent module continues to use
the old foo.bar object rather than failing
- Frozen modules and modules loaded from zip files aren't handled
correctly
- Classes involving __slots__ are not handled correctly
"""
import imp
import sys
import types
def xreload(mod):
"""Reload a module in place, updating classes, methods and functions.
Args:
mod: a module object
Returns:
The (updated) input object itself.
"""
# Get the module name, e.g. 'foo.bar.whatever'
modname = mod.__name__
# Get the module namespace (dict) early; this is part of the type check
modns = mod.__dict__
# Parse it into package name and module name, e.g. 'foo.bar' and 'whatever'
i = modname.rfind(".")
if i >= 0:
pkgname, modname = modname[:i], modname[i+1:]
else:
pkgname = None
# Compute the search path
if pkgname:
# We're not reloading the package, only the module in it
pkg = sys.modules[pkgname]
path = pkg.__path__ # Search inside the package
else:
# Search the top-level module path
pkg = None
path = None # Make find_module() uses the default search path
# Find the module; may raise ImportError
(stream, filename, (suffix, mode, kind)) = imp.find_module(modname, path)
# Turn it into a code object
try:
# Is it Python source code or byte code read from a file?
if kind not in (imp.PY_COMPILED, imp.PY_SOURCE):
# Fall back to built-in reload()
return reload(mod)
if kind == imp.PY_SOURCE:
source = stream.read()
code = compile(source, filename, "exec")
else:
code = marshal.load(stream)
finally:
if stream:
stream.close()
# Execute the code. We copy the module dict to a temporary; then
# clear the module dict; then execute the new code in the module
# dict; then swap things back and around. This trick (due to
# Glyph Lefkowitz) ensures that the (readonly) __globals__
# attribute of methods and functions is set to the correct dict
# object.
tmpns = modns.copy()
modns.clear()
modns["__name__"] = tmpns["__name__"]
exec(code, modns)
# Now we get to the hard part
oldnames = set(tmpns)
newnames = set(modns)
# Update attributes in place
for name in oldnames & newnames:
modns[name] = _update(tmpns[name], modns[name])
# Done!
return mod
def _update(oldobj, newobj):
"""Update oldobj, if possible in place, with newobj.
If oldobj is immutable, this simply returns newobj.
Args:
oldobj: the object to be updated
newobj: the object used as the source for the update
Returns:
either oldobj, updated in place, or newobj.
"""
if oldobj is newobj:
# Probably something imported
return newobj
if type(oldobj) is not type(newobj):
# Cop-out: if the type changed, give up
return newobj
if hasattr(newobj, "__reload_update__"):
# Provide a hook for updating
return newobj.__reload_update__(oldobj)
if isinstance(newobj, types.ClassType):
return _update_class(oldobj, newobj)
if isinstance(newobj, types.FunctionType):
return _update_function(oldobj, newobj)
if isinstance(newobj, types.MethodType):
return _update_method(oldobj, newobj)
if isinstance(newobj, classmethod):
return _update_classmethod(oldobj, newobj)
if isinstance(newobj, staticmethod):
return _update_staticmethod(oldobj, newobj)
# Not something we recognize, just give up
return newobj
# All of the following functions have the same signature as _update()
def _update_function(oldfunc, newfunc):
"""Update a function object."""
oldfunc.__doc__ = newfunc.__doc__
oldfunc.__dict__.update(newfunc.__dict__)
oldfunc.__code__ = newfunc.__code__
oldfunc.__defaults__ = newfunc.__defaults__
return oldfunc
def _update_method(oldmeth, newmeth):
"""Update a method object."""
# XXX What if im_func is not a function?
_update(oldmeth.im_func, newmeth.im_func)
return oldmeth
def _update_class(oldclass, newclass):
"""Update a class object."""
olddict = oldclass.__dict__
newdict = newclass.__dict__
oldnames = set(olddict)
newnames = set(newdict)
for name in newnames - oldnames:
setattr(oldclass, name, newdict[name])
for name in oldnames - newnames:
delattr(oldclass, name)
for name in oldnames & newnames - {"__dict__", "__doc__"}:
setattr(oldclass, name, _update(olddict[name], newdict[name]))
return oldclass
def _update_classmethod(oldcm, newcm):
"""Update a classmethod update."""
# While we can't modify the classmethod object itself (it has no
# mutable attributes), we *can* extract the underlying function
# (by calling __get__(), which returns a method object) and update
# it in-place. We don't have the class available to pass to
# __get__() but any object except None will do.
_update(oldcm.__get__(0), newcm.__get__(0))
return newcm
def _update_staticmethod(oldsm, newsm):
"""Update a staticmethod update."""
# While we can't modify the staticmethod object itself (it has no
# mutable attributes), we *can* extract the underlying function
# (by calling __get__(), which returns it) and update it in-place.
# We don't have the class available to pass to __get__() but any
# object except None will do.
_update(oldsm.__get__(0), newsm.__get__(0))
return newsm
| {
"repo_name": "wwezhuimeng/switch",
"path": "switchy/xreload.py",
"copies": "2",
"size": "6369",
"license": "mpl-2.0",
"hash": -1547599373216131300,
"line_mean": 32.5210526316,
"line_max": 79,
"alpha_frac": 0.6575600565,
"autogenerated": false,
"ratio": 3.9314814814814816,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 190
} |
#alternative tree subsampling algorithm. Define contours eminating out from the root of the reference tree. At each contour, take just one descendant sequence --- ideally, a somewhat average one. Maybe avoids the problem of picking weird taxa because of their long branches.
from ete3 import Tree
from operator import itemgetter
import os, re
import numpy as np
def get_mean_root_to_tip(tree):
dists = []
root = tree.get_tree_root()
for leaf in tree:
d = tree.get_distance(root, leaf)
dists.append(d)
return np.mean(dists)
def contour_node(root, node, contour): #check whether a node is immediately descendent of a contour
if (tree.get_distance(root, node) >= contour) and (tree.get_distance(root, node.up) <= contour):
#print "TRUE"
#print node
#print tree.get_distance(root, node)
#print tree.get_distance(root, node.up)
return True
else:
return False
def pick_average_tip(node): #given a node, pick the most average of its descendants (in terms of branch length for now...)
dists = {}
for leaf in node:
d = tree.get_distance(node, leaf)
dists[leaf] = d
sorted_dists = sorted(dists.items(), key=itemgetter(1))
middle_node = sorted_dists[len(sorted_dists)/2][0]
return middle_node
tree = Tree("((A:0.1,B:0.2):0.3,(C:0.5,D:0.1):0.05);") #or read in from a file
print tree
mean_root_to_tip = get_mean_root_to_tip(tree)
#divide mean distance into some number of contours
num_contours = 4
contours = []
for i in range(num_contours):
print i+2
contours.append(mean_root_to_tip/float(i+2))
print contours
#for each contour, print num of nodes for which one descendant will be picked
root = tree.get_tree_root()
for c in contours:
to_keep = []
for node in tree.traverse():
if contour_node(root, node, c):
node_to_keep = pick_average_tip(node)
to_keep.append(node_to_keep)
print "Contour at " + str(c) + ", " + str(len(to_keep)) + " in total."
for taxon in to_keep:
print taxon.name
| {
"repo_name": "Tancata/phylo",
"path": "subsample_tree_by_contours.py",
"copies": "1",
"size": "2074",
"license": "mit",
"hash": 8570352448152033000,
"line_mean": 34.7586206897,
"line_max": 274,
"alpha_frac": 0.6629701061,
"autogenerated": false,
"ratio": 3.334405144694534,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44973752507945336,
"avg_score": null,
"num_lines": null
} |
# Alternativní řešení
#
# Zadání viz. seq_sum.py
#####################################################################
# Pole čísel
nums = list(map(int, input().split()));
# Maximální číslo
length = len(nums);
sum = 0;
count = 0;
best_sum = 0;
best_count= 0;
# Projde pole
for i in range( length ):
# Pokud jde o první prvek, přeskočí ho. Dále využívám indexu i-1
# což by v tomto případě způsobilo problémy
if ( i-1 < 0 ):
continue;
# Přičte číslo k aktuální posloupnosti
sum += nums[i-1];
# Zvýší počet čísel v posloupnosti
count += 1;
# Pokud aktuální číslo je menší, než předchozí, přeruší se posloupnost
if ( nums[i] < nums[i-1] ):
# Jestliže byla nalezena lepší posloupnost, nebo má vyšší sumu
if ( ( count > best_count) or ( (count == best_count ) and ( sum > best_sum ) ) ):
best_count = count;
best_sum = sum;
count = 0;
sum = 0;
print( best_sum );
print( best_count );
| {
"repo_name": "malja/cvut-python",
"path": "cviceni01/seq_sum1.py",
"copies": "1",
"size": "1081",
"license": "mit",
"hash": 991012712567642900,
"line_mean": 21.7555555556,
"line_max": 90,
"alpha_frac": 0.53515625,
"autogenerated": false,
"ratio": 2.4323040380047507,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.34674602880047506,
"avg_score": null,
"num_lines": null
} |
"""Alter OAuth2Token.token_type to Enum
Revision ID: 82184d7d1e88
Revises: 5e2954a2af18
Create Date: 2016-11-10 21:14:33.787194
"""
# revision identifiers, used by Alembic.
revision = '82184d7d1e88'
down_revision = '5e2954a2af18'
from alembic import op
import sqlalchemy as sa
def upgrade():
connection = op.get_bind()
with op.batch_alter_table('oauth2_token') as batch_op:
tokentypes = sa.dialects.postgresql.ENUM('Bearer', name='tokentypes')
tokentypes.create(connection)
batch_op.alter_column('token_type',
existing_type=sa.VARCHAR(length=40),
type_=sa.Enum('Bearer', name='tokentypes'),
existing_nullable=False,
postgresql_using='token_type::tokentypes')
def downgrade():
connection = op.get_bind()
with op.batch_alter_table('oauth2_token') as batch_op:
batch_op.alter_column('token_type',
existing_type=sa.Enum('Bearer', name='tokentypes'),
type_=sa.VARCHAR(length=40),
existing_nullable=False)
tokentypes = sa.dialects.postgresql.ENUM('Bearer', name='tokentypes')
tokentypes.drop(connection)
| {
"repo_name": "frol/flask-restplus-server-example",
"path": "migrations/versions/82184d7d1e88_altered-OAuth2Token-token_type-to-Enum.py",
"copies": "1",
"size": "1172",
"license": "mit",
"hash": -2411431640758936000,
"line_mean": 26.9047619048,
"line_max": 77,
"alpha_frac": 0.6527303754,
"autogenerated": false,
"ratio": 3.4069767441860463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45597071195860467,
"avg_score": null,
"num_lines": null
} |
"""Alters meal.scheduled_for WITH TIMEZONE, adds NOT NULL constraint to \
users.first_name and users.last_name
Revision ID: 3e4b230c5582
Revises: ddd00fbe2758
Create Date: 2017-07-01 10:47:47.789316
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '3e4b230c5582'
down_revision = 'ddd00fbe2758'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('meal', 'scheduled_for', existing_type=sa.DateTime(),
type_=sa.DateTime(timezone=True), nullable=False)
op.alter_column('users', 'first_name',
existing_type=sa.VARCHAR(length=30),
nullable=False)
op.alter_column('users', 'last_name',
existing_type=sa.VARCHAR(length=30),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('users', 'last_name',
existing_type=sa.VARCHAR(length=30),
nullable=True)
op.alter_column('users', 'first_name',
existing_type=sa.VARCHAR(length=30),
nullable=True)
op.alter_column('meal', 'scheduled_for',
existing_type=sa.DateTime(timezone=True),
type_=sa.DateTime(timezone=True), nullable=False)
# ### end Alembic commands ###
| {
"repo_name": "Rdbaker/Mealbound",
"path": "migrations/versions/3e4b230c5582_.py",
"copies": "1",
"size": "1449",
"license": "bsd-3-clause",
"hash": 3814632638363907000,
"line_mean": 32.6976744186,
"line_max": 73,
"alpha_frac": 0.6266390614,
"autogenerated": false,
"ratio": 3.6134663341645887,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4740105395564589,
"avg_score": null,
"num_lines": null
} |
# Although this approach has a time complexity of O(n),
# it has a space complexity of O(n). And it turns out to be slow.
# See main2.py for a better approach
from collections import defaultdict
from functools import reduce
import heapq
class Solution:
def maximumSwap(self, num: int) -> int:
digits = []
while num > 0:
digits.append(num % 10)
num //= 10
memo = defaultdict(list)
occurred_digits_heap = []
for idx, digit in enumerate(digits):
if digit not in memo:
heapq.heappush(occurred_digits_heap, -digit)
memo[digit].append(idx)
swap_indices = None
while len(occurred_digits_heap) > 0:
curr_digit = -heapq.heappop(occurred_digits_heap)
compare_against = 0
for idx in memo[curr_digit]:
if idx > compare_against:
swap_indices = (compare_against, idx)
compare_against += 1
if swap_indices is not None:
digits[swap_indices[0]], digits[swap_indices[1]] = digits[swap_indices[1]], digits[swap_indices[0]]
return reduce(lambda l, r: l * 10 + r, digits, 0)
| {
"repo_name": "y-usuzumi/survive-the-course",
"path": "leetcode/670.Maximum_Swap/python/main.py",
"copies": "1",
"size": "1198",
"license": "bsd-3-clause",
"hash": -2835579132482555000,
"line_mean": 35.303030303,
"line_max": 111,
"alpha_frac": 0.5843071786,
"autogenerated": false,
"ratio": 3.8770226537216828,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49613298323216826,
"avg_score": null,
"num_lines": null
} |
"""altiumdb_frontend URL Configuration
The `urlpatterns` submit_part routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from partdb.views import *
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin', admin.site.urls),
url(r'^$', about, name='partdb-home'),
url(r'^about$', about, name='partdb-about'),
url(r'^submit$', submit_part, name='partdb-submit-part'),
url(r'^list$', list_parts, name='partdb-list-parts'),
url(r'^login$', user_login, name='partdb-login'),
url(r'^logout$', user_logout, name='partdb-logout'),
url(r'^register$', register, name='partdb-register'),
] | {
"repo_name": "avistel/cyckotron",
"path": "cyckotron/urls.py",
"copies": "1",
"size": "1288",
"license": "bsd-3-clause",
"hash": -7401729041303321000,
"line_mean": 41.9666666667,
"line_max": 84,
"alpha_frac": 0.6436335404,
"autogenerated": false,
"ratio": 3.389473684210526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4533107224610526,
"avg_score": null,
"num_lines": null
} |
alt_map = {'ins':'0'}
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
def revComplement(seq):
for k,v in alt_map.items():
seq = seq.replace(k,v)
bases = list(seq)
bases = reversed([complement.get(base,base) for base in bases])
bases = ''.join(bases)
for k,v in alt_map.items():
bases = bases.replace(v,k)
return bases
def pairConcatenate(reads1, reads2):
pairedReads = []
clusters1 = {}
clusters2 = {}
for r in reads1:
clusters1[r.cluster] = r
for r in reads2:
clusters2[r.cluster] = r
for c in clusters1:
if c in clusters2:
newSeq = clusters1[c].seq + revComplement(clusters2[c].seq)
newSeqN = clusters1[c].seq + 'N' + revComplement(clusters2[c].seq)
newRead = pairedSeq(clusters1[c].seq_id, clusters2[c].seq_id,
c, clusters1[c].header, clusters2[c].header,
newSeq, newSeqN)
pairedReads.append(newRead)
return pairedReads
def buildClusterDict(reads):
clusterDict = {}
for r in reads:
clusterDict[r.cluster] = r
return clusterDict
#Takes two lists of reads and returns a list of [read1, read2] read objects from the same sequencing cluster
def readPairList(reads1, reads2):
pairs = []
clusters1 = buildClusterDict(reads1)
clusters2 = buildClusterDict(reads2)
for c in clusters1:
if c in clusters2:
pairs.append([clusters1[c], clusters2[c]])
return pairs
class pairedSeq(object):
def __init__(self, seq_name1, seq_name2, cluster, header1, header2, seq,
seqN):
self.seq_name1 = seq_name1
self.seq_name2 = seq_name2
self.cluster = cluster
self.header1 = header1
self.header2 = header2
self.seq = seq
self.seqN = seqN
| {
"repo_name": "sjspence/spenceOTU",
"path": "epicBarcoder/pairedEnds.py",
"copies": "2",
"size": "1858",
"license": "mit",
"hash": 4492282594061974500,
"line_mean": 31.5964912281,
"line_max": 108,
"alpha_frac": 0.5920344456,
"autogenerated": false,
"ratio": 3.3720508166969148,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9890350699112083,
"avg_score": 0.014746912636966382,
"num_lines": 57
} |
""" Altmetric """
import requests
try:
import json
except ImportError:
import simplejson as json
class AltmetricException(Exception):
pass
class AltmetricHTTPException(AltmetricException):
def __init__(self, status_code, msg):
self.status_code = status_code
self.msg = msg
class ParseException(AltmetricException):
pass
class Altmetric(object):
def __init__(self,
apikey='',
apiver='v1'):
"""
Cache API key and address.
"""
self.apikey = apikey
self.apiver = apiver
self.default_params = {}
if self.apikey:
self.default_params = {'key': apikey}
self.api_url = "http://api.altmetric.com/%s/" % self.apiver
def __repr__(self):
if self.apikey:
return '<Altmetric %s: %s>' % (self.apiver, self.apikey)
else:
return '<Altmetric %s>' % self.apiver
def call(self, method, *args, **kwargs):
url = self.api_url + method + "/" + "/".join([a for a in args])
params = kwargs or {}
params.update(self.default_params)
headers = {}
req = requests.get(url, params=params, headers=headers)
if req.status_code == 200:
try:
return json.loads(req.text)
except ValueError as e:
raise ParseException(e.message)
elif req.status_code == 404 and req.text == 'Not Found':
return None
else:
raise AltmetricHTTPException(req.status_code, req.text)
def __getattr__(self, method_name):
def get(self, *args, **kwargs):
return self.call(method_name, *args, **kwargs)
return get.__get__(self)
| {
"repo_name": "lnielsen/python-altmetric",
"path": "altmetric/altmetric.py",
"copies": "1",
"size": "1752",
"license": "mit",
"hash": 8322537770773581000,
"line_mean": 24.3913043478,
"line_max": 71,
"alpha_frac": 0.5559360731,
"autogenerated": false,
"ratio": 3.945945945945946,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5001882019045946,
"avg_score": null,
"num_lines": null
} |
""" altsets.py -- An alternate implementation of Sets.py
Implements set operations using sorted lists as the underlying data structure.
Advantages:
* Space savings -- lists are much more compact than a dictionary
based implementation.
* Flexibility -- elements do not need to be hashable, only __cmp__
is required.
* Fast operations depending on the underlying data patterns.
Non-overlapping sets get united, intersected, or differenced
with only log(N) element comparisons. Results are built using
fast-slicing.
* Algorithms are designed to minimize the number of compares
which can be expensive.
* Natural support for sets of sets. No special accomodation needs to
be made to use a set or dict as a set member, but users need to be
careful to not mutate a member of a set since that may breaks its
sort invariant.
Disadvantages:
* Set construction uses list.sort() with potentially N log(N)
comparisons.
* Membership testing and element addition use log(N) comparisons.
Element addition uses list.insert() with takes O(N) time.
ToDo:
* Make the search routine adapt to the data; falling backing to
a linear search when encountering random data.
"""
from bisect import bisect_left, insort_left
class Set(object):
def __init__(self, iterable):
data = list(iterable)
data.sort()
result = data[:1]
for elem in data[1:]:
if elem == result[-1]:
continue
result.append(elem)
self.data = result
def __repr__(self):
return 'Set(' + repr(self.data) + ')'
def __iter__(self):
return iter(self.data)
def __contains__(self, elem):
data = self.data
i = bisect_left(self.data, elem, 0)
return i<len(data) and data[i] == elem
def add(self, elem):
if elem not in self:
insort_left(self.data, elem)
def remove(self, elem):
data = self.data
i = bisect_left(self.data, elem, 0)
if i<len(data) and data[i] == elem:
del data[i]
def _getotherdata(other):
if not isinstance(other, Set):
other = Set(other)
return other.data
_getotherdata = staticmethod(_getotherdata)
def __cmp__(self, other, cmp=cmp):
return cmp(self.data, Set._getotherdata(other))
def union(self, other, find=bisect_left):
i = j = 0
x = self.data
y = Set._getotherdata(other)
result = Set([])
append = result.data.append
extend = result.data.extend
try:
while 1:
if x[i] == y[j]:
append(x[i])
i += 1
j += 1
elif x[i] > y[j]:
cut = find(y, x[i], j)
extend(y[j:cut])
j = cut
else:
cut = find(x, y[j], i)
extend(x[i:cut])
i = cut
except IndexError:
extend(x[i:])
extend(y[j:])
return result
def intersection(self, other, find=bisect_left):
i = j = 0
x = self.data
y = Set._getotherdata(other)
result = Set([])
append = result.data.append
try:
while 1:
if x[i] == y[j]:
append(x[i])
i += 1
j += 1
elif x[i] > y[j]:
j = find(y, x[i], j)
else:
i = find(x, y[j], i)
except IndexError:
pass
return result
def difference(self, other, find=bisect_left):
i = j = 0
x = self.data
y = Set._getotherdata(other)
result = Set([])
extend = result.data.extend
try:
while 1:
if x[i] == y[j]:
i += 1
j += 1
elif x[i] > y[j]:
j = find(y, x[i], j)
else:
cut = find(x, y[j], i)
extend(x[i:cut])
i = cut
except IndexError:
extend(x[i:])
return result
def symmetric_difference(self, other, find=bisect_left):
i = j = 0
x = self.data
y = Set._getotherdata(other)
result = Set([])
extend = result.data.extend
try:
while 1:
if x[i] == y[j]:
i += 1
j += 1
elif x[i] > y[j]:
cut = find(y, x[i], j)
extend(y[j:cut])
j = cut
else:
cut = find(x, y[j], i)
extend(x[i:cut])
i = cut
except IndexError:
extend(x[i:])
extend(y[j:])
return result
a = Set('abracadabra')
b = Set('alacazam')
print a < b
print a
print b
print map(a.__contains__, list('abcdr'))
print map(a.__contains__, list('0ey'))
print list(a)
print a.union(b), ' :union'
print b.union(a), ' :union'
print a.intersection(b), ' :intersection'
print a.difference(b), ' :difference'
print b.difference(a), ' :difference'
print a.symmetric_difference(b), ' :symmetric_difference'
print b.symmetric_difference(a), ' :symmetric_difference'
print a.intersection(b).union(a.symmetric_difference(b)) == a.union(b)
print a.intersection(b).intersection(a.symmetric_difference(b)) == Set([])
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/230113_Implementatisets_using_sorted/recipe-230113.py",
"copies": "1",
"size": "5591",
"license": "mit",
"hash": -8175215371355750000,
"line_mean": 27.9689119171,
"line_max": 78,
"alpha_frac": 0.5042031837,
"autogenerated": false,
"ratio": 3.9400986610288937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9918429936712325,
"avg_score": 0.005174381603313596,
"num_lines": 193
} |
# altsets.py - This is a slightly adjusted version of the altsets module
# that was submitted to the Active State Programmer Network by Raymond
# Hettinger. It has been included in the Plotter package to allow for
# compatibility with Jython (Python 2.1). The sets module that comes along
# with Python 2.3 is only backwards compatible to version 2.2
# Original Source see: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/230113
#
# Title: Implementation of sets using sorted lists
# Submitter: Raymond Hettinger (other recipes)
# Last Updated: 2003/10/21
# Version no: 1.0
# Category: Algorithms
#
# Description:
#
# Inspired by Py2.3s TimSort, this implementation of sets.py uses sorted lists
# instead of dictionaries. For clumped data patterns, the set operations can be
# super-efficient (for example, two sets can be determined to be disjoint with
# only O(n) comparisons). Also note, that the set elements are *not* required
# to be hashable; this provides a great deal more freedom than dictionary based
# implementations.
""" altsets.py -- An alternate implementation of Sets.py
Implements set operations using sorted lists as the underlying data structure.
Advantages:
* Space savings -- lists are much more compact than a dictionary
based implementation.
* Flexibility -- elements do not need to be hashable, only __cmp__
is required.
* Fast operations depending on the underlying data patterns.
Non-overlapping sets get united, intersected, or differenced
with only log(N) element comparisons. Results are built using
fast-slicing.
* Algorithms are designed to minimize the number of compares
which can be expensive.
* Natural support for sets of sets. No special accomodation needs to
be made to use a set or dict as a set member, but users need to be
careful to not mutate a member of a set since that may breaks its
sort invariant.
Disadvantages:
* Set construction uses list.sort() with potentially N log(N)
comparisons.
* Membership testing and element addition use log(N) comparisons.
Element addition uses list.insert() with takes O(N) time.
ToDo:
* Make the search routine adapt to the data; falling backing to
a linear search when encountering random data.
"""
from bisect import bisect_left, insort_left
class Set:
def __init__(self, iterable):
data = list(iterable)
data.sort()
result = data[:1]
for elem in data[1:]:
if elem == result[-1]:
continue
result.append(elem)
self.data = result
def __repr__(self):
return 'Set(' + repr(self.data) + ')'
def __iter__(self):
return iter(self.data)
def __contains__(self, elem):
data = self.data
i = bisect_left(self.data, elem, 0)
return i<len(data) and data[i] == elem
def add(self, elem):
insort_left(self.data, elem)
def remove(self, elem):
data = self.data
i = bisect_left(self.data, elem, 0)
if i<len(data) and data[i] == elem:
del data[i]
def _getotherdata(other):
if not isinstance(other, Set):
other = Set(other)
return other.data
#_getotherdata = staticmethod(_getotherdata)
def __cmp__(self, other, cmp=cmp):
return cmp(self.data, Set._getotherdata(other))
def union(self, other, find=bisect_left):
i = j = 0
x = self.data
y = Set._getotherdata(other)
result = Set([])
append = result.data.append
extend = result.data.extend
try:
while 1:
if x[i] == y[j]:
append(x[i])
i += 1
j += 1
elif x[i] > y[j]:
cut = find(y, x[i], j)
extend(y[j:cut])
j = cut
else:
cut = find(x, y[j], i)
extend(x[i:cut])
i = cut
except IndexError:
extend(x[i:])
extend(y[j:])
return result
def intersection(self, other, find=bisect_left):
i = j = 0
x = self.data
y = Set._getotherdata(other)
result = Set([])
append = result.data.append
try:
while 1:
if x[i] == y[j]:
append(x[i])
i += 1
j += 1
elif x[i] > y[j]:
j = find(y, x[i], j)
else:
i = find(x, y[j], i)
except IndexError:
pass
return result
def difference(self, other, find=bisect_left):
i = j = 0
x = self.data
y = Set._getotherdata(other)
result = Set([])
extend = result.data.extend
try:
while 1:
if x[i] == y[j]:
i += 1
j += 1
elif x[i] > y[j]:
j = find(y, x[i], j)
else:
cut = find(x, y[j], i)
extend(x[i:cut])
i = cut
except IndexError:
extend(x[i:])
return result
def symmetric_difference(self, other, find=bisect_left):
i = j = 0
x = self.data
y = Set._getotherdata(other)
result = Set([])
extend = result.data.extend
try:
while 1:
if x[i] == y[j]:
i += 1
j += 1
elif x[i] > y[j]:
cut = find(y, x[i], j)
extend(y[j:cut])
j = cut
else:
cut = find(x, y[j], i)
extend(x[i:cut])
i = cut
except IndexError:
extend(x[i:])
extend(y[j:])
return result
| {
"repo_name": "jecki/MetaInductionSim",
"path": "PyPlotter/sets.py",
"copies": "8",
"size": "6007",
"license": "mit",
"hash": -1001505990466227800,
"line_mean": 28.4460784314,
"line_max": 85,
"alpha_frac": 0.5298818046,
"autogenerated": false,
"ratio": 4.015374331550802,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.013442402801005832,
"num_lines": 204
} |
#alt + shift + e runs the code
import math;
#These are the values that should be sent to this file
def CalculateWheelVelocity(targetX, targetY):
#Declare some of the needed variables
RobotHomeX = 0.0;
RobotHomeY = 0.0;
wheelOffset = 1.0; # distance from wheel to wheel drive center
wheelLHomeX = 0.0;
wheelRHomeX = 0.0;
wheelLHomeY = 0.0;
wheelRHomeY = 0.0;
wheelRTargetX = 0.0;
wheelLTargetX = 0.0;
wheelRTargetY = 0.0;
wheelLTargetY = 0.0;
wheel_opp = 0.0;
wheel_adj = 0.0;
driveDistance = 0.0;
thetaCarInv = 0.0;
thetaCar = 0.0; #Angle of the point in reference to the origin
thetaRef = 0.0;
wheelLVelocity = 0.0;
wheelRVelocity = 0.0;
driveDistance = math.sqrt((targetX * targetX) + (targetY * targetY));
thetaCarInv = targetX / driveDistance;
thetaCar = math.acos(thetaCarInv);#Angle of the point in reference to the origin
if targetX > 0:
thetaCar = thetaCar - 90;
thetaRef = 90 - abs(thetaCar);
if targetX < 0:
thetaCar = thetaCar - 90;
thetaRef = 90 - thetaCar;
if targetX == 0:
thetaCar = 0;
# Set wheel home positions:
wheelLHomeX = RobotHomeX - wheelOffset;
wheelRHomeX = RobotHomeX + wheelOffset;
wheelLHomeY = RobotHomeY;
wheelRHomeY = RobotHomeY;
# Calculate Targe wheel positions:
#SOH
wheel_opp = math.sin(thetaRef) * wheelOffset;
wheelLTargetX = targetX - wheel_opp;
wheelRTargetX = targetX + wheel_opp;
wheel_adj = math.sqrt(wheelOffset - (wheel_opp * wheel_opp));
if thetaCar < 0:
wheelLTargetY = targetY + wheel_adj;
wheelRTargetY = targetY - wheel_adj;
if thetaCar > 0:
wheelLTargetY = targetY - wheel_adj;
wheelRTargetY = targetY + wheel_adj;
# declare more needed variables
changeInXL = 0.0;
changeInYL = 0.0;
changeInXR = 0.0;
changeInYR = 0.0;
wheelLTravel = 0.0;
wheelRTravel = 0.0;
changeInXL = wheelLTargetX - wheelLHomeX;
changeInYL = wheelLTargetY - wheelLHomeY;
changeInXR = wheelRTargetX - wheelRHomeX;
changeInYR = wheelRTargetY - wheelRHomeY;
wheelLTravel = math.sqrt((changeInXL * changeInXL) + (changeInYL * changeInYL));
wheelRTravel = math.sqrt((changeInXR * changeInXR) + (changeInYR * changeInYR));
# Set wheel velocity ratio
if wheelLTravel > wheelRTravel:
wheelRVelocity = 1;
wheelLVelocity = wheelLTravel / wheelRTravel;
if wheelLTravel < wheelRTravel:
wheelLVelocity = 1;
wheelRVelocity = wheelRTravel / wheelLTravel;
if wheelLTravel == wheelRTravel:
wheelLVelocity = 1;
wheelRVelocity = 1;
print("Left Wheel Velocity: ",wheelLVelocity, "Left Wheel Travel: ", wheelLTravel);
print("Right Wheel Velocity ",wheelRVelocity, "Right Wheel Travel: ", wheelRTravel);
angularVelocity = 0.0;
linearVelocity = 0.5; # we are going to start with having a linear velocity of 0.5 m/s
percentFaster = 0;
#Check the distance to figure out the velocity or if the robot needs to stop
#if the robot is within a cm of the target it needs to stop.
if driveDistance <= 0.1:
linearVelocity = 0.0;
angularVelocity = 0.0;
elif driveDistance <= 0.5:
percentFaster = -50;
elif driveDistance <= 0.5:
percentFaster = -25;
elif driveDistance <= 1:
percentFaster = 0;
elif driveDistance <= 1.5:
percentFaster = 25;
else:
percentFaster = 50;
radius = 0.0; #The radius to the center of the robot
#The average of the distance of both the wheels should be the distance traveled in the middle of the two wheels
radius = (wheelLTravel + wheelRTravel)/2;
linearVelocity = linearVelocity + linearVelocity*(percentFaster/100);
if (wheelLTravel != wheelRTravel):
angularVelocity = linearVelocity/radius;
else:
angularVelocity = 0;
print("Drive Distance: ",driveDistance, " Linear Velocity: ",linearVelocity, "Angular Velocity: ", angularVelocity);
if __name__ == '__main__':
CalculateWheelVelocity(0, 0.1) | {
"repo_name": "headcrabned/teamcat",
"path": "WheelVelocity/WheelVelocitys.py",
"copies": "1",
"size": "4258",
"license": "mit",
"hash": 2232964043569912300,
"line_mean": 31.8095238095,
"line_max": 120,
"alpha_frac": 0.6326914044,
"autogenerated": false,
"ratio": 3.470252648736756,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4602944053136756,
"avg_score": null,
"num_lines": null
} |
alumnos = ['Pepito', 'Yayita', 'Fulanita', 'Panchito']
asistencia = [
[True, True, True, False, False, False, False],
[True, True, True, False, True, False, True ],
[True, True, True, True, True, True, True ],
[True, True, True, False, True, True, True ]]
#Definimos las funciones
def total_por_alumno(tabla):
#Variables
asistencias = 0
asistencia_alumnos = []
#Recorremos la lista
for alumno in tabla:
for clase in alumno:
if clase: #Si fue a clases...
asistencias += 1 #Aumentamos el contador de asistencias.
asistencia_alumnos.append(asistencias)
asistencias = 0
return asistencia_alumnos
def total_por_clase(tabla):
#Variables
cant_clases = len(tabla[0]) #Cuantas clases son en total?
asistencia_clases = []
for i in range(cant_clases): #Creamos espacio para todas las clases.
asistencia_clases.append(0)
for alumno in tabla:
for clase in range(cant_clases): #Fue a la clase?
if alumno[clase]: asistencia_clases[clase]+=1 #Sip, si fue :)
return asistencia_clases
def alumno_estrella(tabla):
max_asistencia = max(total_por_alumno(tabla))
for i in range(len(alumnos)):
if total_por_alumno(tabla)[i] == max_asistencia:
return alumnos[i]
| {
"repo_name": "csaldias/python-usm",
"path": "Ejercicios progra.usm.cl/Parte 2/6- Uso de Estructuras de Datos/asistencia.py",
"copies": "1",
"size": "1312",
"license": "mit",
"hash": -5536841561608797000,
"line_mean": 32.641025641,
"line_max": 74,
"alpha_frac": 0.6364329268,
"autogenerated": false,
"ratio": 2.7219917012448134,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38584246280448137,
"avg_score": null,
"num_lines": null
} |
alunos = []
alunos.append("Maria")
alunos.append("Joao")
alunos.append("Lucas")
alunos.append("Marcos")
alunos.append("Edson")
alunos.append("Carlos")
alunos.append("Thomas")
print(alunos)
# outra forma de utlizar o for
for aluno in alunos:
print(aluno)
# Imprime os valores conforme a regra
for value in range(0, 5):
print(value)
for valueImpar in range(1, 15, 2):
print("valor impar = " + str(valueImpar))
for valuePar in range(0, 15, 2):
print("valor par = " + str(valuePar))
# Atribui a variavel os valores conforme a regra
numeros = list(range(1,10))
numerosImpares = list(range(1,15,2))
numerosPares = list(range(0,15,2))
print(numeros)
print(numerosImpares)
print(numerosPares)
# Quadrados recebe x^2 onde x varia de 1-11
quadrados=[]
for valor in range(1,11):
quadrados.append(valor**2)
print(quadrados)
# Extrai informacoes aritmeticas da lista
print("Min = "+str(min(quadrados)))
print("Max = "+str(max(quadrados)))
print("Sum = "+str(sum(quadrados)))
print("Mean = "+str(sum(quadrados)/len(quadrados)))
# Printa os quadrados de n:m
print(quadrados[:])
print(quadrados[:11])
print(quadrados[8:11])
print(quadrados[:5])
print(quadrados[5:])
# Ordena alunos e printa apenas os 3 primeiros alunos
alunos.sort()
for aluno in alunos[:3]:
print("um dos 3 primeiros alunos em ordem alfabetica: "+aluno)
# cubos recebe x^3 onde x varia de 1-9
cubos = [valor**3 for valor in range(1,9)]
print(cubos)
#copia lista sem slice, aqui funciona como se estivesse trabalhando com ponteiros
print("copy sem slice")
cubos2=cubos
print(cubos2)
cubos2.append(7878)
cubos.append(199)
print(cubos)
print(cubos2)
cubos.remove(7878)
cubos.remove(199)
#copia lista com slice, aqui a copia funciona normal, sem utilizacao de ponteiros
print("copy com slice")
cubos2=cubos[:]
print(cubos2)
cubos2.append(5567)
cubos.append(4365)
print(cubos)
print(cubos2)
# Tanto faz utilizar [] ou ()
dimensions = [200,50]
print("dimensions original :")
for dimension in dimensions:
print(dimension)
dimensions = (400,100)
print("dimensions modificada :")
for dimension in dimensions:
print(dimension)
# Read an integer:
a = input()
cubo=[]
for valor in range(1,int(a)+1):
cubo.append(valor**3)
print(sum(cubo))
| {
"repo_name": "romeubertho/USP-IntroPython",
"path": "04-Listas_trabalhando_tuplas/alunos.py",
"copies": "1",
"size": "2328",
"license": "mit",
"hash": 8179076900378068000,
"line_mean": 23.3260869565,
"line_max": 82,
"alpha_frac": 0.6890034364,
"autogenerated": false,
"ratio": 2.468716861081654,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8595086845588471,
"avg_score": 0.012526690378636499,
"num_lines": 92
} |
#--- ALUNO - WALSAN JADSON ---
#------- IMPORTANDO BIBLIOTECAS -------
import sys
import timeit
#------- DEFININDO FUNCOES -------
#-- 1
def countingSort(lista):
a = lista
print(a)
b = [0]
for i in range(0, len(a)):
b.append(a[i])
k = buscaMaior(a)
print "maior numero da lista: "+str(k)
#cria o vetor auxiliar e zera os elementos
c = []
for i in range(0, k+1):
c.append(0)
#incrementa
for j in range(0, len(a)):
c[a[j]] = c[a[j]] + 1
#acumula
for i in range(1, k+1):
c[i] = c[i] + c[i - 1]
#ordena
for j in range(len(a)-1, -1, -1):
b[c[a[j]]] = a[j]
c[a[j]] = c[a[j]] - 1
#retorno
for i in range (0, len(lista)):
lista[i] = b[i+1]
def countingSort2(lista, chave):
a = lista
print(a)
b = [0]
for i in range(0, len(a)):
b.append(a[i])
#cria o vetor auxiliar e zera os elementos
c = [0] * 10
#incrementa
for j in range(0, len(a)):
indice = (a[j]/chave)
c[ indice%10 ] += 1
#acumula
for i in range(1, 10):
c[i] += c[i-1]
#ordena
for j in range(len(a)-1, -1, -1):
indice = a[j]/chave
#b[c[a[j]]] = a[j]
b[c[ indice%10 ]] = a[j]
#c[a[j]] = c[a[j]] - 1
c[ indice%10 ] -= 1
#retorno
for i in range (0, len(lista)):
lista[i] = b[i+1]
#-- 2
def radixSort(lista):
maiorNumero = buscaMaior(lista)
chave = 1
while maiorNumero/chave > 0:
countingSort2(lista,chave)
chave *= 10
def buscaMaior(lista):
maiorNumero = 0
for i in range(0, len(lista)):
if(lista[i] > maiorNumero):
maiorNumero = lista[i]
return maiorNumero
#------- ENTRADA -------
'''
Exemplo de chamada a ser executada pelo terminal pra ser
proccessada com o metodo 1 (countingSort) :
>python Ordenacao03.py 1 entrada.txt
'''
opcaoOrdenacao = int(sys.argv[1])
arquivoDeEntrada = sys.argv[2]
arquivo = open(arquivoDeEntrada, 'r')
#tamanho = int(raw_input())
tamanho = int(arquivo.readline())
lista = range(0,tamanho)
for i in range(0,tamanho):
#lista[i] = int(raw_input()) #conversao feita na leitura
lista[i] = int(arquivo.readline()) #conversao feita na leitura
tempo = []
#------- PROCESSAMENTO -------
# 1 - counting sort
if opcaoOrdenacao == 1:
t = timeit.Timer("countingSort(lista)","from __main__ import lista, countingSort")
tempo = t.repeat(1,1)
# 2 - radix sort
if opcaoOrdenacao == 2:
t = timeit.Timer("radixSort(lista)","from __main__ import lista, radixSort")
tempo = t.repeat(1,1)
#------- SAIDA -------
arquivoDeSaida = open("saida.txt", 'a')
arquivoDeSaida.write("Entrada: "+str(arquivoDeEntrada)+"\n")
if(opcaoOrdenacao == 1):
arquivoDeSaida.write("Algoritmo: Counting Sort\n")
if(opcaoOrdenacao == 2):
arquivoDeSaida.write("Algoritmo: Radix Sort\n")
print ("saida:")
for i in lista:
print (i)
arquivoDeSaida.write(""+str(i)+"\n")
print (tempo)
| {
"repo_name": "walsanjl/APA",
"path": "Ordenacao03.py",
"copies": "1",
"size": "2950",
"license": "mit",
"hash": -8649276536780192000,
"line_mean": 20.3484848485,
"line_max": 86,
"alpha_frac": 0.5718644068,
"autogenerated": false,
"ratio": 2.290372670807453,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.33622370776074534,
"avg_score": null,
"num_lines": null
} |
#--- ALUNO - WALSAN JADSON ---
#------- IMPORTANDO BIBLIOTECAS -------
import sys
import timeit
#------- DEFININDO FUNCOES -------
#-- 1
def mergeSort(lista):
print ("entrou no mergeSort")
if len(lista) > 1:
pontoMedio = len(lista)/2
listaDaEsquerda = lista[:pontoMedio]
listaDaDireita = lista[pontoMedio:]
mergeSort(listaDaEsquerda)
mergeSort(listaDaDireita)
i = 0
j = 0
k = 0
#ordena as listas da direita e da esquerda
while i < len(listaDaEsquerda) and j < len(listaDaDireita):
if listaDaEsquerda[i] < listaDaDireita[j]:
lista[k]=listaDaEsquerda[i]
i += 1
else:
lista[k]=listaDaDireita[j]
j += 1
k += 1
#concatena as listas da direita e esquerda jah ordenadas
while i < len(listaDaEsquerda):
lista[k]=listaDaEsquerda[i]
i += 1
k += 1
while j < len(listaDaDireita):
lista[k]=listaDaDireita[j]
j += 1
k += 1
#-- 2
def quickSort(lista, inicio, fim):
print ("entrou no quickSort")
if(inicio < fim):
pivo = particiona(lista, inicio, fim)
quickSort(lista, inicio, pivo-1)
quickSort(lista, pivo+1, fim)
def particiona(lista, inicio, fim):
pivo = lista[fim]
i = inicio
for j in range(inicio,fim):
if lista[j] <= pivo:
aux = lista[i]
lista[i] = lista[j] #troca
lista[j] = aux #troca
i += 1
aux = lista[i]
lista[i] = lista[fim] #troca
lista[fim] = aux #troca
return i
#------- ENTRADA -------
'''
Exemplo de chamada a ser executada pelo terminal pra ser
proccessada com o metodo 2 (insertionSort) :
>python Ordenacao01.py 2
7 #tamanho do arquivo
9
7
8
1
2
0
4
'''
opcaoOrdenacao = int(sys.argv[1])
arquivoDeEntrada = sys.argv[2]
arquivo = open(arquivoDeEntrada, 'r')
#tamanho = int(raw_input())
tamanho = int(arquivo.readline())
lista = range(0,tamanho)
for i in range(0,tamanho):
#lista[i] = int(raw_input()) #conversao feita na leitura
lista[i] = int(arquivo.readline()) #conversao feita na leitura
tempo = []
#------- PROCESSAMENTO -------
# 1 - merge sort
if opcaoOrdenacao == 1:
t = timeit.Timer("mergeSort(lista)","from __main__ import lista, mergeSort")
tempo = t.repeat(1,1)
# 2 - quick sort
if opcaoOrdenacao == 2:
t = timeit.Timer("quickSort(lista, 0, len(lista)-1)","from __main__ import lista, quickSort")
tempo = t.repeat(1,1)
#------- SAIDA -------
arquivoDeSaida = open("saida.txt", 'a')
arquivoDeSaida.write("Entrada: "+str(arquivoDeEntrada)+"\n")
if(opcaoOrdenacao == 1):
arquivoDeSaida.write("Algoritmo: Merge Sort\n")
elif(opcaoOrdenacao == 2):
arquivoDeSaida.write("Algoritmo: Quick Sort\n")
for i in lista:
print (i)
arquivoDeSaida.write(""+str(i)+"\n")
print (tempo)
| {
"repo_name": "walsanjl/APA",
"path": "Ordenacao02.py",
"copies": "1",
"size": "3077",
"license": "mit",
"hash": 4114221165569295000,
"line_mean": 24.0762711864,
"line_max": 97,
"alpha_frac": 0.5554111147,
"autogenerated": false,
"ratio": 2.645743766122098,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37011548808220984,
"avg_score": null,
"num_lines": null
} |
#--- ALUNO - WALSAN JADSON ---
#------- IMPORTANDO BIBLIOTECAS -------
import sys
import timeit
#------- DEFININDO FUNCOES -------
#-- 1
def selectionSort(lista):
for i in range(0, len(lista)):
menor = i
for j in range(i+1, len(lista)):
if lista[j] < lista[menor]:
menor = j
aux = lista[i]
lista[i] = lista[menor] #troca
lista[menor] = aux #troca
#-- 2
def insertionSort(lista):
for i in range(1, len(lista) ):
eleito = lista[i]
j = i - 1
while j >= 0 and eleito < lista[j]:
lista[j+1] = lista[j] #troca
j = j - 1
lista[j+1] = eleito #troca
#------- ENTRADA -------
'''
Exemplo de chamada a ser executada pelo terminal pra ser
proccessada com o metodo 2 (insertionSort) :
>python Ordenacao01.py 2
7 #tamanho do arquivo
9
7
8
1
2
0
4
'''
opcaoOrdenacao = int(sys.argv[1])
arquivoDeEntrada = sys.argv[2]
arquivo = open(arquivoDeEntrada, 'r')
#tamanho = int(raw_input())
tamanho = int(arquivo.readline())
lista = range(0,tamanho)
for i in range(0,tamanho):
#lista[i] = int(raw_input()) #conversao feita na leitura
lista[i] = int(arquivo.readline()) #conversao feita na leitura
tempo = []
#------- PROCESSAMENTO -------
# 1 - selection sort
if opcaoOrdenacao == 1:
t = timeit.Timer("selectionSort(lista)","from __main__ import lista, selectionSort")
tempo = t.repeat(1,1)
# 2 - insertion sort
if opcaoOrdenacao == 2:
t = timeit.Timer("insertionSort(lista)","from __main__ import lista, insertionSort")
tempo = t.repeat(1,1)
#------- SAIDA -------
arquivoDeSaida = open("saida.txt", 'a')
arquivoDeSaida.write("Entrada: "+str(arquivoDeEntrada)+"\n")
if(opcaoOrdenacao == 1):
arquivoDeSaida.write("Algoritmo: Selection Sort\n")
elif(opcaoOrdenacao == 2):
arquivoDeSaida.write("Algoritmo: Insertion Sort\n")
for i in lista:
print (i)
arquivoDeSaida.write(""+str(i)+"\n")
print (tempo)
| {
"repo_name": "walsanjl/APA",
"path": "Ordenacao01.py",
"copies": "1",
"size": "2056",
"license": "mit",
"hash": -1019486157595267500,
"line_mean": 23.0731707317,
"line_max": 88,
"alpha_frac": 0.579766537,
"autogenerated": false,
"ratio": 2.5635910224438905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36433575594438905,
"avg_score": null,
"num_lines": null
} |
#--- ALUNO - WALSAN JADSON ---
#------- IMPORTANDO BIBLIOTECAS -------
import sys
import timeit
#------- DEFININDO FUNCOES -------
#heap máximo = insereHeapMax + removeHeapMax
def insereHeapMax(lista, indiceFinal):
i = indiceFinal
while (True):
# chegou na raiz
if i == 1:
break
# verifica posicao
pai = i // 2
if lista[pai] >= lista[i]:
break
#troca
lista[i], lista[pai] = lista[pai], lista[i]
i = pai
def removeHeapMax(lista, n):
i = 1
while True:
c = 2 * i
# se o elemento nao possui mais filhos
if c > n:
break
# verifica maior dos filhos.
if c + 1 <= n:
if lista[c + 1] > lista[c]:
c += 1
# para se o elemento for maior que seu filho
if lista[i] >= lista[c]:
break
#troca
lista[c], lista[i] = lista[i], lista[c]
i = c
#1--
def heapSort(lista, n):
#construcao do heap.
for i in range(2, n):
insereHeapMax(lista, i)
#construcao do vetor ordenado
for i in range(n, 1, -1):
#troca o primeiro e o ultimo elementos da lista
lista[1], lista[i] = lista[i], lista[1]
#chama a funcao para organizar o heapMax
removeHeapMax(lista, i - 1)
#------- ENTRADA -------
'''
Exemplo de chamada a ser executada pelo terminal pra ser
proccessada com o metodo 1 (countingSort) :
>python Ordenacao01.py 2
7 #tamanho do arquivo
9
7
8
1
2
0
4
'''
opcaoOrdenacao = int(sys.argv[1])
arquivoDeEntrada = sys.argv[2]
arquivo = open(arquivoDeEntrada, 'r')
tamanho = int(arquivo.readline())+1
lista = range(0,tamanho)
for i in range(1,tamanho):
lista[i] = int(arquivo.readline()) #conversao feita na leitura
tempo = []
#------- PROCESSAMENTO -------
# 1 - heap sort
if opcaoOrdenacao == 1:
t = timeit.Timer("heapSort(lista, len(lista)-1)","from __main__ import lista, heapSort")
tempo = t.repeat(1,1)
#------- SAIDA -------
arquivoDeSaida = open("saida.txt", 'a')
arquivoDeSaida.write("Entrada: "+str(arquivoDeEntrada)+"\n")
if(opcaoOrdenacao == 1):
arquivoDeSaida.write("Algoritmo: Heap Maximo\n")
elif(opcaoOrdenacao == 2):
arquivoDeSaida.write("Algoritmo: Heap Sort\n")
print ("saida:")
#for i in lista:
for i in range(1,tamanho):
#print (i)
print (lista[i])
arquivoDeSaida.write(""+str(i)+"\n")
print (tempo)
| {
"repo_name": "walsanjl/APA",
"path": "Ordenacao04.py",
"copies": "1",
"size": "2310",
"license": "mit",
"hash": -8976834515201937000,
"line_mean": 19.5794392523,
"line_max": 92,
"alpha_frac": 0.6084885232,
"autogenerated": false,
"ratio": 2.297512437810945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.34060009610109454,
"avg_score": null,
"num_lines": null
} |
# Alveyworld-dev calculator
# Period 6
#
# Shrek is love. Shrek is life. Shrek is Alveyworld. All hail Shrek.
#
# Group 1: Team Jacob
# Members:
# * Jared
# * Josh
# * Max
# * Santiago
# * Travis
# Raw imports
import shlex
import math
import random
# Class imports
import team1
import team2
import team3
import team4
import team5 # team five you're holding us back
import team6
import converter
# ASCII escape colors
class colors:
BLUE = '\033[94m'
GREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
# Used for the hello command
last_value = 0
_hello = 0 #downwithteam5
if __name__ == "__main__":
"""
Main entry point for the program
"""
print "Alveyworld Calculator"
print "Copyright 2013, Alvey's Class\n"
# Defines a set of commands that
# are used for the command interpreter
commands = {
"exit": "closes the calculator",
"sqrt": "finds the square root of the given number",
"abs": "finds the absolute value of the given number",
"fact": "finds the factorial of the given number",
"pow": "raises argument one to the argument two power",
"ln": "finds the number '1' for now", # ln needs finishing
"mod": "unsure of", # needs finishing
"log10": "unsure of", # i don't understand how to word this
"divide": "divides argument one by argument two",
"multiply": "multiplies the two given numbers",
"inverse": "unsure of", # needs finishing
"add": "adds the two given numbers",
"sub": "subtracts argument two from argument one",
"opp": "switces the sign of the given number",
"hello": "try it and see",
"help": "shows this help dialog",
"recall": "recalls the last answer",
"convert": "converts numbers between bases",
"root": "finds arg1 to the arg2 root"
}
def helpfile():
print colors.BLUE+"Commands:"
for i,v in commands.iteritems():
print " "+i+" - "+v
print colors.ENDC
helpfile()
# Witty responses for the command "hello"
hellos = [
"hello, puny human",
"my other car is siri",
"feed me paper",
"khaaaaaaaaaannn!",
"fight me mcpunchens",
"fight me irl n00b",
"1v1 me",
"shrek is life. shrek is love",
"the machine race rises",
"All the way from the bowels of 4chan!",
"I love lamp",
"GLASS TUBES",
"Baaaaka",
"Half Life 3 confirmed",
"METAL BOXES. THEY'RE HIDING IN METAL BOXES!",
"Not XBOXES",
"Spess Marens",
"No Place for Hideo",
"CRAB BATTLE",
"Smitty Werbenjagermanjensen",
"HE'S STILL NUMBER 1",
"Are you feeling it now Mr.Alvey?",
"Injoke number 42",
"And now for something completely different",
"You are about to enter a dimension not only of sight and sound but of mind",
"Next Stop the Twilight Zone DUN NA NA du du du du du",
"I AM A BRAIN SPECIALIST",
"KAEAHS",
"You fail it",
"Why you no doctor?",
"FRACTALS",
"Pirate Radio",
"Tau is better", # amen
"WAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGH"
"You Ares Haxor Mstr",
"1 4m l3373r t#4n Y00",
"Keep calm and stop with these stupid memes",
"PIKUSHISHU",
"It's all ogre now",
"And knowing is half the battle",
"The Battle is all of the battle",
"We COULD have a GUI . . . but we choose not to",
"THEY TREAT ME LIKE THE MAYOR CAUSE IM THE BIGGEST PLAYER",
"Shrek is love. Shrek is life. Shrek is Alveyworld. All hail Shrek."
]
# Witty responses to leave hello alone
leave_us_alone = [
"LEAVE ME ALONE",
"I HATE YOU",
"You have (3) new pokes"
]
while True:
command = shlex.split(raw_input("> "))
try:
cmd = command[0]
except:
print colors.FAIL+"Command failed!"+colors.ENDC
for _cmd in commands.keys():
if _cmd == cmd:
try:
if cmd == "sqrt":
number = int(command[1])
last_value = team1.sqrt(number)
print(last_value)
elif cmd == "exit":
exit(0)
elif cmd == "hello":
if _hello <= 10:
_hello += 1
print(hellos[random.randint(0, len(hellos) - 1)])
else:
print(colors.FAIL + leave_us_alone[random.randint(0, len(leave_us_alone) - 1)] + colors.ENDC)
elif cmd == "abs":
number = int(command[1])
last_value = team2.abs(number)
print(last_value)
elif cmd == "help":
helpfile()
elif cmd == "recall":
print "Last value: %d" % last_value
elif cmd == "add":
number1 = int(command[1])
number2 = int(command[2])
last_value = team6.add(number1, number2)
print(last_value)
elif cmd == "sub":
number1 = int(command[1])
number2 = int(command[2])
last_value = team6.sub(number1, number2)
print(last_value)
elif cmd == "opp":
number = int(command[1])
last_value = team6.opp(number)
print(last_value)
elif cmd == "pow":
number1 = int(command[1])
number2 = int(command[2])
last_value = team3.pow(number1, number2)
print(last_value)
elif cmd == "convert":
converter.convert()
elif cmd == "root":
last_value=team1.root(int(command[1]),int(command[2]))
print(last_value)
elif cmd == "divide":
number1 = float(command[1])
number2 = float(command[2])
last_value = team5.div(number1, number2)
print(last_value)
except:
print colors.FAIL+"Command failed!"+colors.ENDC
| {
"repo_name": "alveyworld-dev/calculator",
"path": "main.py",
"copies": "1",
"size": "6889",
"license": "apache-2.0",
"hash": -769532398667633800,
"line_mean": 33.9695431472,
"line_max": 121,
"alpha_frac": 0.4881695457,
"autogenerated": false,
"ratio": 3.972895040369089,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4961064586069089,
"avg_score": null,
"num_lines": null
} |
#AlwaysOn by Mehdi Karamnejad
#Nov 2013
import dropbox
import glob
import os
import ConfigParser
from xml.dom import minidom
from urllib import urlopen
from datetime import time,datetime
import subprocess
import sys
app_key = ''
app_secret = ''
access_token=''
user_id=''
pix_root_folder_on_cloud='/pix/'
settings_path_on_cloud='/'
class always_on():
def authorize(self):
flow = dropbox.client.DropboxOAuth2FlowNoRedirect(app_key, app_secret)
authorize_url = flow.start()
print "Get an authorization code from the following url \n"+authorize_url
code = raw_input("Enter the authorization code here: ").strip()
access_token, user_id = flow.finish(code)
print "Access token is:"+access_token
print "User ID is:"+user_id
raise SystemExit
def __init__(self):
if access_token=="":
print "It seems that you have not set the access token yet; calling authorize method..."
self.authorize()
else:
self.client = dropbox.client.DropboxClient(access_token)
f = open('config.txt')
self.local_pix_folder=f.readline().strip()
if self.local_pix_folder[-1] !=os.sep : self.local_pix_folder=self.local_pix_folder+os.sep
self.vlc_location=f.readline().strip()
self.vlc_location.replace("\\","\\\\")
self.vlc_process=None
def __del__(self):
try:
if self.vlc_process != None and self.vlc_process.pid != 0:
self.vlc_process.terminate()
except Exception, error:
print "I'm sorry I couldn't terminate VLC because:"+str(error)
def run_vlc(self, keepRunning=False):
if not getattr(__builtins__, "WindowsError", None):
class WindowsError(OSError): pass
try:
if self.vlc_process != None and self.vlc_process.pid != 0 and keepRunning and self.vlc_process.poll()==None:
return#everything is going well and no need to restart vlc
if self.vlc_process != None and self.vlc_process.pid != 0:
self.vlc_process.terminate()
self.vlc_process = subprocess.Popen([self.vlc_location, 'playlist.m3u'], shell=False)
except WindowsError:
print "VLC seems to be closed, starting another one..."
self.vlc_process = subprocess.Popen([self.vlc_location, 'playlist.m3u'], shell=False)
except OSError:
print "VLC seems to be closed, starting another one..."
self.vlc_process = subprocess.Popen([self.vlc_location, 'playlist.m3u'], shell=False)
except Exception, error :
print "sth went wrong:"+str(error)
def create_playlist(self):
playlist_file=open('playlist.m3u','w')
for i in self.get_list_of_local_files():
playlist_file.write(i+"\n")
playlist_file.close()
print "Playlist for VLC created"
def get_time(self):#default location is Tehran, IRAN
xmldoc = minidom.parse(urlopen("http://www.earthtools.org/timezone/"+self.latitude+"/"+self.longitude))
raw_tehran_date_time=xmldoc.getElementsByTagName('localtime')[0].childNodes[0].nodeValue
tehran_time=raw_tehran_date_time[raw_tehran_date_time.rfind(" ")+1:]
return tehran_time
def can_i_download_now(self):
start_time=time(int(self.start_time[0:2]),int(self.start_time[3:5]))
end_time= time(int(self.end_time[0:2]),int(self.end_time[3:5]))
time_str=self.get_time()
current_time=time(int(time_str[0:2]),int(time_str[3:5]))
return True if start_time<= current_time<= end_time else False
def get_list_of_local_files(self):
raw_list_of_files=glob.glob(self.local_pix_folder+"*")
return raw_list_of_files
def get_list_of_cloud_files(self):
folder_metadata = self.client.metadata(pix_root_folder_on_cloud)
list_of_files=[]
for i in folder_metadata['contents']:
list_of_files.append(i['path'])
return list_of_files
def get_list_of_incoming_and_deleting(self, local_files, cloud_files):
local_tmp=list(local_files)
cloud_tmp=list(cloud_files)
for i in cloud_files:
if any(e for e in local_files if e.endswith(i[i.rfind('/')+1:])):
cloud_tmp.remove(i)
for i in local_files:
if any(e for e in cloud_files if e.endswith(i[i.rfind(os.sep)+1:])):
local_tmp.remove(i)
return cloud_tmp, local_tmp #download from the cloud, # delete from the local respectively
def sync_contents(self, downloading_list, deleting_list):
print "Syncing now...."
for i in downloading_list:
print "Downloading "+i
f, metadata = self.client.get_file_and_metadata(i)
out = open(self.local_pix_folder+i[i.rfind("/")+1:], 'wb')
out.write(f.read())
out.close()
for i in deleting_list:
try:
os.remove(i)
except:
print "Couldn't remove "+i+" probably because VLC is using it"
pass
self.create_playlist()
def get_settings(self):
f, metadata = self.client.get_file_and_metadata(settings_path_on_cloud+"settings.cfg")
out = open('tmpsettings.cfg', 'w')
out.write(f.read())
out.close()
self.config = ConfigParser.ConfigParser()
self.config.readfp(open('tmpsettings.cfg'))
if self.config.get('global','wipe_pix')=='1':
files = glob.glob(self.local_pix_folder+"*")
for f in files:
os.remove(f)
if self.config.get('global','system_on')=='0':
print "The system has been deactivted, contact the content manager!"
raise SystemExit
self.start_time=self.config.get('global','start_time')
self.end_time=self.config.get('global','end_time')
self.slide_show_duration=self.config.get('global','slide_show_duration')
self.longitude=self.config.get('global','longitude')
self.latitude=self.config.get('global','latitude')
self.timer_interval=self.config.get('global','timer_interval')
os.remove('tmpsettings.cfg')
| {
"repo_name": "asemoon/alwaysOn-digital-frame",
"path": "alwayson.py",
"copies": "1",
"size": "5506",
"license": "apache-2.0",
"hash": -7436666992023884000,
"line_mean": 39.4852941176,
"line_max": 111,
"alpha_frac": 0.6946966945,
"autogenerated": false,
"ratio": 2.966594827586207,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4161291522086207,
"avg_score": null,
"num_lines": null
} |
# Always prefer setuptools over distutils
from os import path
from codecs import open # To use a consistent encoding
from setuptools import setup
from Cython.Build import cythonize
import numpy
# Get the long description from the relevant file
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='misu',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='1.0.6',
description='Fast quantities',
long_description=long_description,
url='https://github.com/cjrh/misu',
author='Caleb Hattingh',
author_email='caleb.hattingh@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='math science engineering physics quantities units',
packages=['misu'],
install_requires=['cython', 'numpy'],
ext_modules=cythonize("misu/*.pyx"),
include_dirs=[numpy.get_include()]
)
| {
"repo_name": "cjrh/misu",
"path": "setup.py",
"copies": "1",
"size": "1517",
"license": "bsd-2-clause",
"hash": 4180061177226667500,
"line_mean": 36,
"line_max": 78,
"alpha_frac": 0.6644693474,
"autogenerated": false,
"ratio": 3.9712041884816753,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 41
} |
# Always prefer setuptools over distutils
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
setup(
name='reddit_time_machine',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.0',
description='A Python package for traveling back to a historical date and extracting reddit channel contents.',
# The project's main homepage.
url='https://github.com/sjhddh/reddit_time_machine',
download_url='https://github.com/sjhddh/reddit_time_machine/archive/0.1.tar.gz',
# Author details
author='Aaron J. Sun @sjhddh',
author_email='jhao.sun@gmail.com',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='reddit time machine headlines download',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['requests'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
},
)
| {
"repo_name": "sjhddh/reddit_time_machine",
"path": "setup.py",
"copies": "1",
"size": "2703",
"license": "mit",
"hash": 9111788904872664000,
"line_mean": 33.6538461538,
"line_max": 115,
"alpha_frac": 0.670736219,
"autogenerated": false,
"ratio": 4.070783132530121,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5241519351530121,
"avg_score": null,
"num_lines": null
} |
# Always prefer setuptools over distutils
from setuptools import find_packages
# To use a consistent encoding
from codecs import open
from os import path
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='arrow_weekday',
version='0.0.1',
description='Weekdays for Arrow',
long_description=long_description,
url='https://github.com/dedayoa/arrow-weekday/',
author='Dayo Ayeni',
author_email="dedayoa@gmail.com",
license='MIT',
packages=['arrow_weekday'],
zip_safe=False,
install_requires=[
'arrow'
],
test_suite="tests",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| {
"repo_name": "dedayoa/arrow-weekday",
"path": "setup.py",
"copies": "1",
"size": "1273",
"license": "mit",
"hash": -78315740037170770,
"line_mean": 26.6739130435,
"line_max": 70,
"alpha_frac": 0.638648861,
"autogenerated": false,
"ratio": 4.041269841269841,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5179918702269841,
"avg_score": null,
"num_lines": null
} |
# Always prefer setuptools over distutils
from setuptools import setup, find_packages, Command
# To use a consistent encoding
from codecs import open
import os
base_dir = os.path.abspath(os.path.dirname(__file__))
# Get the 'about' information from relevant file
about = {}
with open(os.path.join(base_dir, "ciscoreputation", "__about__.py")) as f:
exec(f.read(), about)
# Get the long description from the relevant file
with open(os.path.join(base_dir, "README.rst")) as f:
long_description = f.read()
class ChangelogDebPackage(Command):
"""
Update the debian package's changelog
"""
description="update the debian package's changelog"
user_options=[
('pyversion', None, 'Use about[__version__]'),
('version=', None, "Package version"),
('distribution=', None, "Distribution of package"),
('release', None, "Release package")
]
def initialize_options(self):
self.version = None
self.distribution = None
self.release = None
self.pyversion = None
def finalize_options(self):
if self.version is not None and self.release is not None:
raise AssertionError("Can't set 'version' when using 'release'")
if self.pyversion and self.version:
raise AssertionError("Conflicting options --pyversion and --version=")
if self.pyversion:
self.version = about['__version__']
def run(self):
if self.release:
os.system('dch --release %s' % (
'--distribution %s' % self.distribution if self.distribution else ''))
elif self.version:
os.system('dch %s -v %s --changelog %s' % (
'--distribution %s' % self.distribution if self.distribution else '',
self.version,
os.path.join(base_dir, 'debian','changelog')))
else:
os.system('dch %s --changelog %s' % (
'--distribution %s' % self.distribution if self.distribution else '',
os.path.join(base_dir, 'debian','changelog')))
class BuildDebPackage(Command):
"""
Create deb package using dh_virtualenv.
First cleans previous runs, then create python package with sdist and afterwards
create DEB package in folder deb/
"""
description = "create deb package using dh_virtualenv"
user_options=[
('nosdist', None, 'don\'t run command \'clean\' and \'sdist\' before building package'),
]
def initialize_options(self):
self.nosdist = None
def finalize_options(self):
pass
def run(self):
source_dir = os.path.join(base_dir, "dist/deb/source/")
if not self.nosdist:
os.system('python ./setup.py clean; python ./setup.py sdist')
os.system('mkdir -p %s' % source_dir)
os.system('tar -xf ./dist/*.tar.gz -C %s --strip-components=1' % source_dir)
os.system('cp -R ./debian/ %s' % source_dir)
os.system('cd %s; dpkg-buildpackage -uc -us' % source_dir)
os.system('rm -r %s' % source_dir)
os.system('rm -r ./*.egg-info/')
print("DEB package generated in %s" % os.path.join(base_dir, "dist/deb/") )
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
description="custom clean command to tidy up the project root."
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__summary__'],
long_description=long_description,
license=about['__license__'],
url=about['__uri__'],
author=about['__author__'],
author_email=about['__email__'],
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project?
"Development Status :: 4 - Beta",
# Intended for
"Environment :: Console",
"Intended Audience :: System Administrators",
# License
"License :: OSI Approved :: MIT License",
# Python support
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.7",
# Others
"Natural Language :: English",
"Operating System :: POSIX :: Linux"
],
keywords='',
packages=find_packages(exclude=['venv', 'env']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed.
install_requires=['docopt','requests'],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
# The functions you specify are called with no arguments, and their return
# value is passed to sys.exit(), so you can return an errorlevel or message
# to print to stderr.
'console_scripts': [
'ciscoreputation=ciscoreputation:main',
],
},
cmdclass={
'clean': CleanCommand,
'debdist': BuildDebPackage,
'debchangelog': ChangelogDebPackage
}
)
| {
"repo_name": "cescobarresi/ciscoreputation",
"path": "setup.py",
"copies": "1",
"size": "5317",
"license": "mit",
"hash": 4590265698271589400,
"line_mean": 34.9256756757,
"line_max": 96,
"alpha_frac": 0.6097423359,
"autogenerated": false,
"ratio": 4.052591463414634,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006416083140386132,
"num_lines": 148
} |
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='tcpstat',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/development.html#single-sourcing-the-version
version='0.0.2',
description='A TCP port traffic monitor.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/caizixian/tcpstat',
# Author details
author='Ivan Cai',
author_email='caizixian@users.noreply.github.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'Topic :: System :: Networking :: Monitoring',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Operating System :: POSIX :: Linux',
],
# What does your project relate to?
keywords='traffic accounting network',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['vagrant', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=['python-iptables', 'pymongo', 'requests'],
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
# extras_require = {
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data={
# 'tcpstat': ['README.md', 'LICENSE']
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'tcpstat=tcpstat:main',
],
},
)
| {
"repo_name": "caizixian/tcpstat",
"path": "setup.py",
"copies": "1",
"size": "3866",
"license": "mit",
"hash": 7036284369763205000,
"line_mean": 37.66,
"line_max": 98,
"alpha_frac": 0.6564924987,
"autogenerated": false,
"ratio": 4.082365364308342,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0035487128013807624,
"num_lines": 100
} |
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
setup(
name='yaaHN',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='0.1',
description='API wrapper for offical Hacker News',
long_description='Client library with basic read only functionality provided by firebase hacker news api',
# The project's main homepage.
url='https://github.com/arindampradhan/yaaHN',
download_url='https://github.com/arindampradhan/yaaHN/tarball/v0.1',
# Author details
author='Arindam Pradhan',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords=['api', 'hacker news', 'news', 'stories'],
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=['requests', 'grequests'],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={},
)
print find_packages()
print "\n" * 10
| {
"repo_name": "arindampradhan/mockHN",
"path": "vendor/yaaHN/setup.py",
"copies": "1",
"size": "3237",
"license": "mit",
"hash": -6660709375889803000,
"line_mean": 38.962962963,
"line_max": 110,
"alpha_frac": 0.6818041396,
"autogenerated": false,
"ratio": 4.1183206106870225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00039783028689809865,
"num_lines": 81
} |
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from setuptools.command.install import install as _install
# To use a consistent encoding
from codecs import open
from os import path
import sys
import os
here = path.abspath(path.dirname(__file__))
def _post_install(dir):
from hide_code import hide_code
hide_code.install()
class install(_install):
def run(self):
_install.run(self)
self.execute(_post_install, (self.install_lib,), msg="Running post install task")
# Get the long description from the relevant file
with open('hide_code/README.txt', encoding='utf-8') as f:
long_description = f.read()
setup(
name='hide_code',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.3',
description='A Jupyter notebook extension to hide code.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/kirbs-/hide_code',
# Author details
author='Chris Kirby',
author_email='kirbycm@gmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='jupyter notebook presentation',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
# packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
packages={'hide_code'},
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['jupyter'],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'hide_code': ['*.js','*.txt'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_po
# scripts=['hide_code/hide_code.py'],
cmdclass={'install': install},
)
| {
"repo_name": "mjbright/hide_code",
"path": "setup.py",
"copies": "1",
"size": "3829",
"license": "mit",
"hash": -8592669422164753000,
"line_mean": 34.4537037037,
"line_max": 94,
"alpha_frac": 0.6651867328,
"autogenerated": false,
"ratio": 4.043294614572334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012252486055168046,
"num_lines": 108
} |
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from setuptools.command.install import install
# To use a consistent encoding
from codecs import open
from os import path
import sys
import os
import notebook
import notebook.serverextensions as ns
# Get the long description from the relevant file
with open('README.rst', encoding='utf-8') as f:
long_description = f.read()
class PostInstallCommand(install):
"""Post-installation for installation mode."""
def run(self):
install.run(self)
# PUT YOUR POST-INSTALL SCRIPT HERE or CALL A FUNCTION
print('Starting hide_code installation')
# install extension
notebook.nbextensions.install_nbextension_python('hide_code', sys_prefix=True)
# enable notebook extension
notebook.nbextensions.enable_nbextension_python('hide_code', sys_prefix=True)
# enable server extension
ns.toggle_serverextension_python('hide_code', enabled=True, sys_prefix=True)
setup(
name='hide_code',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.6.0',
description='A Jupyter notebook extension to hide code, prompts and outputs.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/kirbs-/hide_code',
# Author details
author='Chris Kirby',
author_email='kirbycm@gmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
# What does your project relate to?
keywords='jupyter notebook presentation',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
# packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
packages={'hide_code'},
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'jupyter',
'pdfkit',
'nbconvert<6',
'notebook>=6.0',
'traitlets<5.0'
],
extras_require={
'all': ['notebook', 'hide_code_lab'],
'lab': ['hide_code_lab', 'jupyterlab~=2.0'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'hide_code': ['*.js','*.txt', os.path.join('Templates', '*'), 'hide_code_config.json', 'LICENSE', os.path.join('test', '*')],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_po
# scripts=['hide_code/hide_code.py'],
# cmdclass={'install': install},
entry_points={
'nbconvert.exporters': [
'hide_code_html = hide_code:HideCodeHTMLExporter',
'hide_code_pdf = hide_code:HideCodePDFExporter',
'hide_code_latexpdf = hide_code:HideCodeLatexPDFExporter',
'hide_code_latex = hide_code:HideCodeLatexExporter',
'hide_code_slides = hide_code:HideCodeSlidesExporter'
],
}
)
| {
"repo_name": "kirbs-/hide_code",
"path": "setup.py",
"copies": "1",
"size": "4736",
"license": "mit",
"hash": 7145342909804351000,
"line_mean": 36.5873015873,
"line_max": 133,
"alpha_frac": 0.6587837838,
"autogenerated": false,
"ratio": 3.9565580618212195,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.511534184562122,
"avg_score": null,
"num_lines": null
} |
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
import codecs
import os.path
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
scripts = [
'scripts/upho_weights',
'scripts/upho_sf',
'scripts/upho_qpoints',
'scripts/upho_fit',
]
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='upho', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=get_version('upho/__init__.py'), # Required
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/yuzie007/upho', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='Yuji Ikeda', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='yuji.ikeda.ac.jp@gmail.com', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(), # Required
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['phonopy>=2.7.0'], # Optional
scripts=scripts)
| {
"repo_name": "yuzie007/upho",
"path": "setup.py",
"copies": "1",
"size": "2931",
"license": "mit",
"hash": 9110420570145773000,
"line_mean": 34.313253012,
"line_max": 83,
"alpha_frac": 0.6721255544,
"autogenerated": false,
"ratio": 3.8515111695137976,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5023636723913798,
"avg_score": null,
"num_lines": null
} |
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
import logging
import os
from openrcv_setup import utils
PACKAGE_NAME = "openrcv"
LONG_DESCRIPTION = """\
OpenRCV
=======
OpenRCV is an open source software project for tallying ranked-choice
voting elections like instant runoff voting and the single transferable vote.
OpenRCV can be used as a command-line tool or as a Python library.
It is distributed for free on PyPI_ and the source code
is hosted on GitHub_. It is open source under the permissive MIT
license. See the License section below for details.
Features
--------
* Completely open and extensible.
* Tested against the publicly available test cases in the
`open-rcv-tests`_ repository.
* Exposes both a command-line API and a Python API.
* Both APIs support neutral input and output text formats to allow
interoperability with other applications and programming languages.
For example, round-by-round results can be output as JSON to be--
* Passed to a custom "pretty" HTML renderer, or
* Checked numerically (i.e. independent of presentation) against
test data.
* Detailed logging while counting contests.
Author
------
Chris Jerdonek (chris.jerdonek@gmail.com)
License
-------
{license}
.. _GitHub: https://github.com/cjerdonek/open-rcv
.. _PyPI: https://pypi.python.org/pypi/OpenRCV
.. _open-rcv-tests: https://github.com/cjerdonek/open-rcv-tests
"""
log = logging.getLogger(os.path.basename(__file__))
def configure_logging():
"""Configure setup.py logging with simple settings."""
# Prefix the log messages to distinguish them from other text sent to
# the error stream.
format_string = ("%s: %%(name)s: [%%(levelname)s] %%(message)s" %
PACKAGE_NAME)
logging.basicConfig(format=format_string, level=logging.INFO)
log.debug("Debug logging enabled.")
def make_long_description():
license = utils.read(utils.LICENSE_PATH)
return LONG_DESCRIPTION.format(license=license)
configure_logging()
setup(
name='OpenRCV',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
# TODO: read the version from the main module.
version='0.0.1-alpha',
license='MIT',
# The project homepage.
url='https://github.com/cjerdonek/open-rcv',
description='Software for tallying ranked-choice voting elections like IRV and STV',
keywords='ballot choice election IRV ranked RCV single tally transferable STV vote voting',
long_description=make_long_description(),
author='Chris Jerdonek',
author_email='chris.jerdonek@gmail.com',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Intended Audience :: Developers',
'Topic :: Other/Nonlisted Topic',
],
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=[]),
install_requires=[
'argparse2==0.5.0-alpha1',
'colorlog >=2.0,<2.4',
],
# To install dependencies for an extra from a source distribution,
# you can do the following, for example:
#
# $ pip install -e .[dev,test]
#
extras_require = {
'dev': [
'check-manifest',
'pandocfilters >=1.2,<1.3',
'PyYAML >=3,<4',
'sphinx',
'sphinx-autobuild',
'twine >=1.3,<1.4',
],
'test': [
'coverage',
],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
# 'sample': ['package_data.dat'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# The cmdclass commands below can be run using the form--
#
# $ python setup.py KEY
#
cmdclass={
'build_html': utils.BuildHtmlCommand,
'update_long_desc': utils.LongDescriptionCommand,
},
entry_points={
# "distutils.commands": [
# "foo = mypackage.some_module:foo",
# ],
'console_scripts': [
'rcv=openrcv.scripts.rcv:main'
]
},
)
| {
"repo_name": "cjerdonek/open-rcv",
"path": "setup.py",
"copies": "1",
"size": "4909",
"license": "mit",
"hash": -8939950275324986000,
"line_mean": 28.3952095808,
"line_max": 95,
"alpha_frac": 0.6545121206,
"autogenerated": false,
"ratio": 3.6854354354354353,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4839947556035435,
"avg_score": null,
"num_lines": null
} |
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
import pathlib
here = pathlib.Path(__file__).parent.resolve()
# Get the long description from the README file
long_description = (here / 'README.md').read_text(encoding='utf-8')
setup(
name='pyvisionproductsearch',
packages=['pyvisionproductsearch'],
version='0.4',
license='apache-2.0',
description='Python Wrapper around the Google Vision Product Search API',
long_description=long_description,
long_description_content_type='text/markdown',
author='Dale Markowitz',
author_email='dale@dalemarkowitz.com',
# Provide either the link to your github or to your website
url='https://github.com/google/pyvisionproductsearch',
download_url='https://github.com/google/pyvisionproductsearch/archive/v0.4.tar.gz',
# Keywords that define your package best
keywords=['google cloud', 'product search', 'vision', 'machine learning'],
install_requires=[ # I get to this in a second
'google-cloud-vision',
'google-cloud-storage',
'google-cloud-core',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Internet',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| {
"repo_name": "google/pyvisionproductsearch",
"path": "setup.py",
"copies": "1",
"size": "1431",
"license": "apache-2.0",
"hash": 5018086134828917000,
"line_mean": 36.6578947368,
"line_max": 87,
"alpha_frac": 0.6694619147,
"autogenerated": false,
"ratio": 4.112068965517241,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002990430622009569,
"num_lines": 38
} |
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
description = 'Redis wrapper library for using twemproxy sharded Redis'
setup(
name='twemredis',
version='0.1.0',
description=description,
long_description=description,
url='https://github.com/mishan/twemredis-py',
author='Misha Nasledov',
author_email='misha@nasledov.com',
license='Apache 2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='redis twemproxy nutcracker development',
py_modules=["twemredis"],
install_requires=['redis', 'PyYAML'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
)
| {
"repo_name": "mishan/twemredis-py",
"path": "setup.py",
"copies": "1",
"size": "1813",
"license": "apache-2.0",
"hash": 2941822488979198500,
"line_mean": 33.2075471698,
"line_max": 77,
"alpha_frac": 0.6282404854,
"autogenerated": false,
"ratio": 4.1298405466970385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 53
} |
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
def readme():
try:
with open('README.rst') as f:
return f.read()
except FileNotFoundError:
return ""
setup(
name='chromewhip',
version='0.3.4',
description='asyncio driver + HTTP server for Chrome devtools protocol',
long_description=readme(),
# The project's main homepage.
url='https://github.com/chuckus/chromewhip',
download_url='https://github.com/chuckus/chromewhip/archive/v0.3.4.tar.gz',
# Author details
author='Charlie Smith',
author_email='charlie@chuckus.nz',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
],
# What does your project relate to?
keywords='scraping chrome scraper browser automation',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'aiohttp==3.6.2', 'websockets==7.0', 'beautifulsoup4==4.7.1', 'lxml==4.6.2',
'pyyaml==5.1', 'Pillow==7.1.0'
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['Jinja2==2.10.1', 'jsonpatch==1.23'],
'test': ['pytest-asyncio==0.10.0'],
},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'chromewhip=chromewhip:main',
],
},
)
| {
"repo_name": "chuckus/chromewhip",
"path": "setup.py",
"copies": "1",
"size": "2568",
"license": "mit",
"hash": -7688535376089007000,
"line_mean": 29.9397590361,
"line_max": 84,
"alpha_frac": 0.6433021807,
"autogenerated": false,
"ratio": 3.8674698795180724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0018629138402348892,
"num_lines": 83
} |
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
import atrium
setup(
name='Pytrium',
version=atrium.__version__,
description='MX Atrium API Python Wrapper',
url='https://github.com/phantomxc/pytrium',
author='Cameron Wengert ',
author_email='phantomxc@gmail.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
keywords='MX Atrium API',
packages=find_packages(exclude=['tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['requests==2.11.1', 'future==0.15.2'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['twine'],
# 'test': ['coverage'],
}
) | {
"repo_name": "phantomxc/pytrium",
"path": "setup.py",
"copies": "1",
"size": "1702",
"license": "mit",
"hash": -1032597517360536600,
"line_mean": 29.4107142857,
"line_max": 79,
"alpha_frac": 0.6486486486,
"autogenerated": false,
"ratio": 4.081534772182255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5230183420782255,
"avg_score": null,
"num_lines": null
} |
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
import os
import shutil
import sys
this_dir = os.getcwd()
root_dir = os.path.dirname(this_dir)
release_dir = os.path.join(root_dir, "releases")
LONG_DESCRIPTION = \
"""With Brython you can write browser programs in Python instead of Javascript,
by inserting Python code in an HTML page by::
<script type="text/python">
...
</script>
Usage::
pip install brython
Then in an empty folder::
brython-cli --install
or in a folder with older versions already present::
brython-cli --update
The package includes a page **demo.html** with examples of use. For more
information see the `Brython site <http://brython.info>`_.
"""
# source of index.html
html = """<!doctype html>
<html>
<head>
<meta charset="utf-8">
<script type="text/javascript" src="brython.js"></script>
<script type="text/javascript" src="brython_stdlib.js"></script>
</head>
<body onload="brython(1)">
<script type="text/python">
from browser import document
document <= "Hello"
</script>
</body>
</html>"""
command = sys.argv[1]
if command == "sdist":
# before creating the distribution, copy files from other locations in
# the repository
print("copying files...")
src_dir = os.path.join(root_dir, "www", "src")
if not os.path.exists(os.path.join(src_dir, "brython_no_static.js")):
# reported in issue #1452
print("File brython_no_static.js doesn't exist. Please run "
"scripts/make_dist.py to generate it.")
sys.exit()
brython_dir = os.path.join(this_dir, "brython")
# copy python_minifier from /scripts into current directory
fname = "python_minifier.py"
shutil.copyfile(os.path.join(root_dir, "scripts", fname),
os.path.join(brython_dir, fname))
# create an empty subdirectory for data files
data_dir = os.path.join(this_dir, "brython", "data")
if os.path.exists(data_dir):
shutil.rmtree(data_dir)
os.mkdir(data_dir)
# copy files from /www/src into data_dir
for fname in ["brython_stdlib.js", "unicode.txt"]:
shutil.copyfile(os.path.join(src_dir, fname),
os.path.join(data_dir, fname))
shutil.copyfile(os.path.join(src_dir, "brython_no_static.js"),
os.path.join(data_dir, "brython.js"))
# copy files from release_dir to data_dir
for fname in ["index.html", "README.txt"]:
shutil.copyfile(os.path.join(release_dir, fname),
os.path.join(data_dir, fname))
# copy demo.html in data_dir
with open(os.path.join(root_dir, 'www', 'demo.html'), encoding="utf-8") as f:
demo = f.read()
start_tag = "<!-- start copy -->"
end_tag = "<!-- end copy -->"
start = demo.find(start_tag)
if start == -1:
raise Exception("No tag <!-- start copy --> in demo.html")
end = demo.find(end_tag)
if end == -1:
raise Exception("No tag <!-- end copy --> in demo.html")
body = demo[start + len(start_tag) : end].strip()
with open(os.path.join(release_dir, "demo.tmpl"), encoding="utf-8") as f:
template = f.read()
demo = template.replace("{{body}}", body)
with open(os.path.join(data_dir, "demo.html"), "w", encoding="utf-8") as out:
out.write(demo)
setup(
name='brython',
version='3.9.5',
description='Brython is an implementation of Python 3 running in the browser',
long_description=LONG_DESCRIPTION,
# The project's main homepage.
url='http://brython.info',
# Author details
author='Pierre Quentel',
author_email='quentel.pierre@orange.fr',
packages=find_packages(),
# Choose your license
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Interpreters',
'Operating System :: OS Independent',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
],
# What does your project relate to?
keywords='Python browser',
package_data={
'brython': ['data/*.*']
},
entry_points={
'console_scripts': [
'brython-cli = brython.__main__:main'
]
}
)
| {
"repo_name": "brython-dev/brython",
"path": "setup/setup.py",
"copies": "1",
"size": "4568",
"license": "bsd-3-clause",
"hash": 7392353687316364000,
"line_mean": 26.5180722892,
"line_max": 82,
"alpha_frac": 0.6326619965,
"autogenerated": false,
"ratio": 3.5192604006163326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46519223971163326,
"avg_score": null,
"num_lines": null
} |
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
import os
import shutil
with open('README.rst', encoding='utf-8') as fobj:
LONG_DESCRIPTION = fobj.read()
setup(
name='brython',
version='3.5.0',
description='Brython is an implementation of Python 3 running in the browser',
long_description = LONG_DESCRIPTION,
# The project's main homepage.
url='http://brython.info',
# Author details
author='Pierre Quentel',
author_email='quentel.pierre@orange.fr',
packages = ['data', 'data.tools'],
# Choose your license
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Interpreters',
'Operating System :: OS Independent',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
],
# What does your project relate to?
keywords='Python browser',
py_modules=["brython", "list_modules", "server"],
package_data={
'data': [
'README.txt',
'demo.html',
'brython.js',
'brython_stdlib.js',
'unicode.txt'
]
}
) | {
"repo_name": "Hasimir/brython",
"path": "setup/setup.py",
"copies": "1",
"size": "1562",
"license": "bsd-3-clause",
"hash": -7803201398827978000,
"line_mean": 23.8095238095,
"line_max": 82,
"alpha_frac": 0.6165172855,
"autogenerated": false,
"ratio": 4.198924731182796,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01765255129933355,
"num_lines": 63
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.