text stringlengths 0 1.05M | meta dict |
|---|---|
## A script for finding every cox coefficient and pvalue for every mRNA in LGG Tier 3 data downloaded Jan. 5th, 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_lgg.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G2']=2
grade_dict['G3']=3
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','clinical','nationwidechildrens.org_clinical_patient_lgg.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
final_clinical=[]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','LGG','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + grade2 + grade3 + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','LGG','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mrna/cox/LGG/cox_regression.py",
"copies": "1",
"size": "9834",
"license": "mit",
"hash": -7015210057540040000,
"line_mean": 34.3741007194,
"line_max": 142,
"alpha_frac": 0.6454138702,
"autogenerated": false,
"ratio": 3.1022082018927444,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42476220720927443,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in LIHC Tier 3 data downloaded Feb. 2015
##load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_lihc.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical=[['','','']]
for i in data:
try:
if clinical[-1][0]==i[0]:
if i[9]=='Alive':
clinical[-1]=[i[0],int(i[10]),'Alive']
elif i[9]=='Dead':
clinical[-1]=[i[0],int(i[11]),'Dead']
else:
pass
else:
if i[9]=='Alive':
clinical.append([i[0],int(i[10]),'Alive'])
elif i[9]=='Dead':
clinical.append([i[0],int(i[11]),'Dead'])
else:
pass
except:
pass
## Removing the empty value.
clinical=clinical[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','clinical','nationwidechildrens.org_clinical_patient_lihc.txt'))
f.readline()
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[0]]=[grade_dict[i[25]],sex_dict[i[5]],int(i[56])]
if i[13]=='Alive':
clinical4.append([i[0],int(i[71]),'Alive'])
elif i[13]=='Dead':
clinical4.append([i[0],int(i[64]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
##f=open(os.path.join(BASE_DIR,'cox_regression','LIHC','final_genes.txt'),'w')
##for i in final_genes:
## f.write(str(i))
## f.write('\n')
##f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])): ## These lists contain the clinical information and mRNA data in the same order.
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes):
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + grade1 + grade2 + grade3 + grade4 + sex + age)') ## Perform Cox regression
# Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'cox_regression','LIHC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/pan_cancer",
"path": "paper/cox_regression/LIHC/cox_regression.py",
"copies": "1",
"size": "9486",
"license": "mit",
"hash": -3237184185042027000,
"line_mean": 31.0472972973,
"line_max": 142,
"alpha_frac": 0.6205987771,
"autogenerated": false,
"ratio": 3.120394736842105,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4240993513942105,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in LIHC Tier 3 data downloaded Jan. 5th, 2016
##load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_lihc.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','clinical','nationwidechildrens.org_clinical_patient_lihc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','LIHC','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])): ## These lists contain the clinical information and mRNA data in the same order.
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes):
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + grade1 + grade2 + grade3 + grade4 + sex + age)') ## Perform Cox regression
# Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','LIHC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mrna/cox/LIHC/cox_regression.py",
"copies": "1",
"size": "10285",
"license": "mit",
"hash": -910934748359796100,
"line_mean": 32.9438943894,
"line_max": 142,
"alpha_frac": 0.6387943607,
"autogenerated": false,
"ratio": 3.105374396135266,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4244168756835266,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in LUAD Tier 3 data downloaded Feb. 2015
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
f=open(os.path.join(BASE_DIR,'tcga_data','LUAD','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_luad.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
try:
if clinical1[-1][0]==i[0]:
if i[8]=='Alive':
clinical1[-1]=[i[0],int(i[9]),'Alive']
elif i[8]=='Dead':
clinical1[-1]=[i[0],int(i[10]),'Dead']
else:
pass
else:
if i[8]=='Alive':
clinical1.append([i[0],int(i[9]),'Alive'])
elif i[8]=='Dead':
clinical1.append([i[0],int(i[10]),'Dead'])
else:
pass
except:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LUAD','clinical','nationwidechildrens.org_clinical_patient_luad.txt'))
f.readline()
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[0]]=[0,sex_dict[i[6]],int(i[-16])]
if i[42]=='Alive':
clinical4.append([i[0],int(i[52]),'Alive'])
elif i[42]=='Dead':
clinical4.append([i[0],int(i[53]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LUAD','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','LUAD','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
##f=open(os.path.join(BASE_DIR,'cox_regression','LUAD','final_genes.txt'),'w')
##for i in final_genes:
## f.write(str(i))
## f.write('\n')
##f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'cox_regression','LUAD','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/pan_cancer",
"path": "paper/cox_regression/LUAD/cox_regression.py",
"copies": "1",
"size": "8520",
"license": "mit",
"hash": -1304648225903000600,
"line_mean": 33.9180327869,
"line_max": 143,
"alpha_frac": 0.6325117371,
"autogenerated": false,
"ratio": 3.1163130943672277,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9125705481061963,
"avg_score": 0.02462387008105303,
"num_lines": 244
} |
## A script for finding every cox coefficient and pvalue for every mRNA in LUAD Tier 3 data downloaded Jan. 5th, 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','LUAD','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_luad.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LUAD','clinical','nationwidechildrens.org_clinical_patient_luad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LUAD','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','LUAD','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','LUAD','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','LUAD','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mrna/cox/LUAD/cox_regression.py",
"copies": "1",
"size": "9282",
"license": "mit",
"hash": -6365172607026777000,
"line_mean": 35.8333333333,
"line_max": 143,
"alpha_frac": 0.650937298,
"autogenerated": false,
"ratio": 3.106425702811245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9137949901454245,
"avg_score": 0.023882619871399904,
"num_lines": 252
} |
## A script for finding every cox coefficient and pvalue for every mRNA in LUSC Tier 3 data downloaded Feb. 2015
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_lusc.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical=[['','','']]
for i in data:
try:
if clinical[-1][0]==i[0]:
if i[8]=='Alive':
clinical[-1]=[i[0],int(i[9]),'Alive']
elif i[8]=='Dead':
clinical[-1]=[i[0],int(i[10]),'Dead']
else:
pass
else:
if i[8]=='Alive':
clinical.append([i[0],int(i[9]),'Alive'])
elif i[8]=='Dead':
clinical.append([i[0],int(i[10]),'Dead'])
else:
pass
except:
pass
## Removing the empty value.
clinical=clinical[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','clinical','nationwidechildrens.org_clinical_patient_lusc.txt'))
f.readline()
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[0]]=[0,sex_dict[i[6]],int(i[-14])]
if i[42]=='Alive':
clinical4.append([i[0],int(i[52]),'Alive'])
elif i[42]=='Dead':
clinical4.append([i[0],int(i[53]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
##f=open(os.path.join(BASE_DIR,'cox_regression','LUSC','final_genes.txt'),'w')
##for i in final_genes:
## f.write(str(i))
## f.write('\n')
##f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'cox_regression','LUSC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/pan_cancer",
"path": "paper/cox_regression/LUSC/cox_regression.py",
"copies": "1",
"size": "8503",
"license": "mit",
"hash": 7241484964736158000,
"line_mean": 33.8483606557,
"line_max": 142,
"alpha_frac": 0.6329530754,
"autogenerated": false,
"ratio": 3.1135115342365434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4246464609636543,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in LUSC Tier 3 data downloaded Jan. 5th, 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_lusc.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','clinical','nationwidechildrens.org_clinical_patient_lusc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','LUSC','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','LUSC','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','LUSC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mrna/cox/LUSC/cox_regression.py",
"copies": "1",
"size": "9273",
"license": "mit",
"hash": -9220558667988094000,
"line_mean": 35.652173913,
"line_max": 142,
"alpha_frac": 0.6515690715,
"autogenerated": false,
"ratio": 3.099264705882353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9133760509241919,
"avg_score": 0.023414653628086635,
"num_lines": 253
} |
## A script for finding every cox coefficient and pvalue for every mRNA in OV Tier 3 data downloaded Feb. 2015
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
## Read the follow up data
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
f=open(os.path.join(BASE_DIR,'tcga_data','OV','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_ov.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical=[['','','']]
for i in data:
try:
if clinical[-1][0]==i[0]:
if i[9]=='Alive':
clinical[-1]=[i[0],int(i[10]),'Alive']
elif i[9]=='Dead':
clinical[-1]=[i[0],int(i[11]),'Dead']
else:
pass
else:
if i[9]=='Alive':
clinical.append([i[0],int(i[10]),'Alive'])
elif i[9]=='Dead':
clinical.append([i[0],int(i[11]),'Dead'])
else:
pass
except:
pass
## Removing the empty value.
clinical=clinical[1:]
## Grade, sex and age information were taken from the "clinical_patient" file. A dictionary was created for grade and sex.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','OV','clinical','nationwidechildrens.org_clinical_patient_ov.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[0]]=[grade_dict[i[19]],sex_dict[i[5]],int(i[31])]
if i[15]=='Alive':
clinical4.append([i[0],int(i[16]),'Alive'])
elif i[15]=='Dead':
clinical4.append([i[0],int(i[17]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
f=open(os.path.join(BASE_DIR,'tcga_data','OV','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
##the gene.quantification files were used
if 'gene.quantification' in i[0] and 'hg19' not in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
##The OV mrna files were RNAseq, while most other cancers had RNAseqv2 available.
##To ensure consistency between analyses, only genes present in RNAseqv2 were considered.
##This code opens a RNAseqv2 file and grabs the gene ids, in this case a GBM file
gene_set={}
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','mrna','unc.edu.0cbec58e-f95e-4c60-a85d-210dc56bdf3c.1545137.rsem.genes.normalized_results'))
f.readline()
for i in f:
gene_set[i.split('|')[1].split()[0]]=''
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','OV','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0].split('_')[0],float(i.strip().split()[-1])] for i in f\
if len(i.split('|'))==2 and i.split('|')[1].split('_')[0]!="?" and\
i.split('|')[1].split('_')[0] in gene_set])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of .1 RPKM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4 and median>.1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
##f=open(os.path.join(BASE_DIR,'cox_regression','OV','final_genes.txt'),'w')
##for i in final_genes:
## f.write(str(i))
## f.write('\n')
##f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + grade1 + grade2 + grade3 + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'cox_regression','OV','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/pan_cancer",
"path": "paper/cox_regression/OV/cox_regression.py",
"copies": "1",
"size": "9894",
"license": "mit",
"hash": 6896578659823251000,
"line_mean": 31.4393442623,
"line_max": 143,
"alpha_frac": 0.621386699,
"autogenerated": false,
"ratio": 3.098653304102725,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42200400031027246,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in OV Tier 3 data downloaded Jan. 5th, 2016
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## Read the follow up data
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
f=open(os.path.join(BASE_DIR,'tcga_data','OV','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_ov.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex and age information were taken from the "clinical_patient" file. A dictionary was created for grade and sex.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','OV','clinical','nationwidechildrens.org_clinical_patient_ov.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
f=open(os.path.join(BASE_DIR,'tcga_data','OV','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','OV','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','OV','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + grade1 + grade2 + grade3 + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','OV','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mrna/cox/OV/cox_regression.py",
"copies": "1",
"size": "9944",
"license": "mit",
"hash": 7430374563656607000,
"line_mean": 32.7084745763,
"line_max": 142,
"alpha_frac": 0.6411906677,
"autogenerated": false,
"ratio": 3.109443402126329,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4250634069826329,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in PAAD Tier 3 data downloaded Jan. 5th, 2016
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','PAAD','clinical','nationwidechildrens.org_clinical_follow_up_v4.4_paad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
##Note: three tier and four tier systems are mixed.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','PAAD','clinical','nationwidechildrens.org_clinical_patient_paad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','PAAD','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','PAAD','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','PAAD','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])): ## These lists contain the clinical information and mRNA data in the same order.
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes):
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + grade1 + grade2 + grade3 + grade4 + sex + age)') ## Perform Cox regression
# Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','PAAD','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mrna/cox/PAAD/cox_regression.py",
"copies": "1",
"size": "10308",
"license": "mit",
"hash": -3533043723049315000,
"line_mean": 33.9423728814,
"line_max": 142,
"alpha_frac": 0.6403764067,
"autogenerated": false,
"ratio": 3.1038843721770553,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9113637202823903,
"avg_score": 0.026124715210630343,
"num_lines": 295
} |
## A script for finding every cox coefficient and pvalue for every mRNA in READ Tier 3 data downloaded Jan. 5th, 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','READ','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_read.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','READ','clinical','nationwidechildrens.org_clinical_patient_read.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','READ','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','READ','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','READ','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','READ','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mrna/cox/READ/cox_regression.py",
"copies": "1",
"size": "9273",
"license": "mit",
"hash": -8494495816482631000,
"line_mean": 35.652173913,
"line_max": 142,
"alpha_frac": 0.6515690715,
"autogenerated": false,
"ratio": 3.1159274193548385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42674964908548385,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in SARC Tier 3 data downloaded Jan. 5th, 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','SARC','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_sarc.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','SARC','clinical','nationwidechildrens.org_clinical_patient_sarc.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','SARC','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','SARC','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','SARC','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','SARC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mrna/cox/SARC/cox_regression.py",
"copies": "1",
"size": "9253",
"license": "mit",
"hash": -2974556469999755300,
"line_mean": 35.7182539683,
"line_max": 142,
"alpha_frac": 0.6511401708,
"autogenerated": false,
"ratio": 3.1060758643840214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42572160351840216,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in SKCM Tier 3 data downloaded Feb. 2015
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','clinical','nationwidechildrens.org_clinical_follow_up_v2.0_skcm.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical=[['','','']]
for i in data:
if clinical[-1][0]==i[0]:
if i[8]=='Alive':
if i[9]!='[Completed]':
clinical[-1]=[i[0],int(i[9]),'Alive']
elif i[8]=='Dead':
clinical[-1]=[i[0],int(i[10]),'Dead']
else:
pass
else:
if i[8]=='Alive':
if i[9]!='[Completed]':
clinical.append([i[0],int(i[9]),'Alive'])
elif i[8]=='Dead':
clinical.append([i[0],int(i[10]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','clinical','nationwidechildrens.org_clinical_patient_skcm.txt'))
f.readline()
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
more_clinical[i[0]]=[0,sex_dict[i[6]],int(i[25])]
if i[14]=='Alive':
if i[15]!='[Completed]':
clinical4.append([i[0],int(i[15]),'Alive'])
elif i[14]=='Dead':
if i[16]!='[Not Available]' and i[16]!='[Completed]':
clinical4.append([i[0],int(i[16]),'Dead'])
else:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor,06 a metastatic, both were allowed for SKCM
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01' or i[1].split('-')[3][:-1]=='06':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file, or metastatic, or both,
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
##f=open(os.path.join(BASE_DIR,'cox_regression','SKCM','final_genes.txt'),'w')
##for i in final_genes:
## f.write(str(i))
## f.write('\n')
##f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'cox_regression','SKCM','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/pan_cancer",
"path": "paper/cox_regression/SKCM/cox_regression.py",
"copies": "1",
"size": "8591",
"license": "mit",
"hash": -4463594101630349000,
"line_mean": 34.6473029046,
"line_max": 143,
"alpha_frac": 0.6342684204,
"autogenerated": false,
"ratio": 3.0825260136347326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9090998071318883,
"avg_score": 0.025159272543169908,
"num_lines": 241
} |
## A script for finding every cox coefficient and pvalue for every mRNA in SKCM Tier 3 data downloaded Jan. 5th, 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','clinical','nationwidechildrens.org_clinical_follow_up_v2.0_skcm.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','clinical','nationwidechildrens.org_clinical_patient_skcm.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor,06 a metastatic, both were allowed for SKCM
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01' or i[1].split('-')[3][:-1]=='06':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, 0, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file, or metastatic, or both,
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','SKCM','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','SKCM','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mrna/cox/SKCM/cox_regression.py",
"copies": "1",
"size": "9310",
"license": "mit",
"hash": 5081393592398883000,
"line_mean": 36.24,
"line_max": 143,
"alpha_frac": 0.6494092374,
"autogenerated": false,
"ratio": 3.0992010652463384,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42486103026463384,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every mRNA in STAD Tier 3 data downloaded Feb. 2015
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
## Read the follow up data
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
f=open(os.path.join(BASE_DIR,'tcga_data','STAD','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_stad.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical=[['','','']]
for i in data:
try:
if clinical[-1][0]==i[0]:
if i[8]=='Alive':
clinical[-1]=[i[0],int(i[9]),'Alive']
elif i[8]=='Dead':
clinical[-1]=[i[0],int(i[10]),'Dead']
else:
pass
else:
if i[8]=='Alive':
clinical.append([i[0],int(i[9]),'Alive'])
elif i[8]=='Dead':
clinical.append([i[0],int(i[10]),'Dead'])
else:
pass
except:
pass
## Removing the empty value.
clinical=clinical[1:]
## Grade, sex and age information were taken from the "clinical_patient" file. A dictionary was created for grade and sex.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','STAD','clinical','nationwidechildrens.org_clinical_patient_stad.txt'))
f.readline()
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[0]]=[grade_dict[i[4]],sex_dict[i[7]],int(i[41])]
if i[26]=='Alive':
clinical4.append([i[0],int(i[27]),'Alive'])
elif i[26]=='Dead':
clinical4.append([i[0],int(i[28]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
f=open(os.path.join(BASE_DIR,'tcga_data','STAD','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
##the v2.gene.quantification files were used
if 'v2.gene.quantification' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
##The STAD mrna files were RNAseq, while most other cancers had RNAseqv2 available.
##To ensure consistency between analyses, only genes present in RNAseqv2 were considered.
##This code opens a RNAseqv2 file and grabs the gene ids, in this case a GBM file
gene_set={}
f=open(os.path.join(BASE_DIR,'tcga_data','GBM','mrna','unc.edu.0cbec58e-f95e-4c60-a85d-210dc56bdf3c.1545137.rsem.genes.normalized_results'))
f.readline()
for i in f:
gene_set[i.split('|')[1].split()[0]]=''
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','STAD','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0].split('_')[0],float(i.strip().split()[-1])] for i in f\
if len(i.split('|'))==2 and i.split('|')[1].split('_')[0]!="?" and\
i.split('|')[1].split()[0].split('_')[0] in gene_set])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of .1 RPKM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4 and median>.1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
##f=open(os.path.join(BASE_DIR,'cox_regression','STAD','final_genes.txt'),'w')
##for i in final_genes:
## f.write(str(i))
## f.write('\n')
##f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + sex + grade1 + grade2 + grade3 + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'cox_regression','STAD','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/pan_cancer",
"path": "paper/cox_regression/STAD/cox_regression.py",
"copies": "1",
"size": "9886",
"license": "mit",
"hash": -4445600273606843000,
"line_mean": 32.8561643836,
"line_max": 143,
"alpha_frac": 0.6235079911,
"autogenerated": false,
"ratio": 3.0874453466583387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9084844573801103,
"avg_score": 0.025221752791447005,
"num_lines": 292
} |
## A script for finding every cox coefficient and pvalue for every mRNA in UCEC Tier 3 data downloaded Jan. 5th 2016
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## There were three clinical files with nonredundant data. V4.0 is in general the most uptodate, but it is possible
## for data in the other files to be more uptodate. As a result, clinical data will be merged.
f=open(os.path.join(BASE_DIR,'tcga_data','UCEC','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_ucec.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical1=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','UCEC','clinical','nationwidechildrens.org_clinical_follow_up_v2.0_ucec.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if clinical2[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical2[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical2[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical2.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical2.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
##removing the empty value
clinical2=clinical2[1:]
##merging the data
new_clinical=[]
for i in clinical2:
if i[0] not in [j[0] for j in clinical1]:
new_clinical.append(i)
else:
if i[1]<=clinical1[[j[0] for j in clinical1].index(i[0])][1]:
new_clinical.append(clinical1[[j[0] for j in clinical1].index(i[0])])
else:
new_clinical.append(i)
for i in clinical1:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
f=open(os.path.join(BASE_DIR,'tcga_data','UCEC','clinical','nationwidechildrens.org_clinical_follow_up_v1.7_ucec.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical3=[['','','']]
for i in data:
if clinical3[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical3[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical3[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical3.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical3.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
##removing the empty value
clinical3=clinical3[1:]
##merging the data
newer_clinical=[]
for i in clinical3:
if i[0] not in [j[0] for j in new_clinical]:
newer_clinical.append(i)
else:
if i[1]<=new_clinical[[j[0] for j in new_clinical].index(i[0])][1]:
newer_clinical.append(new_clinical[[j[0] for j in new_clinical].index(i[0])])
else:
newer_clinical.append(i)
for i in new_clinical:
if i[0] not in [j[0] for j in newer_clinical]:
newer_clinical.append(i)
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['High Grade']=3
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','UCEC','clinical','nationwidechildrens.org_clinical_patient_ucec.txt'))
columns=f.readline().split('\t')
grade_column=columns.index('neoplasm_histologic_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
newest_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in newer_clinical]:
newest_clinical.append(i)
else:
if i[1]<=newer_clinical[[j[0] for j in newer_clinical].index(i[0])][1]:
newest_clinical.append(newer_clinical[[j[0] for j in newer_clinical].index(i[0])])
else:
newest_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in newer_clinical:
if i[0] not in [j[0] for j in newest_clinical]:
newest_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in newest_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','UCEC','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','UCEC','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4.0 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'mrna','cox','UCEC','final_genes.txt'),'w')
for i in final_genes:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])):
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes): ## These lists contain the clinical information and mRNA data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + grade1 + grade2 + grade3 + age)') ## Perform Cox regression
## Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'mrna','cox','UCEC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mrna/cox/UCEC/cox_regression.py",
"copies": "1",
"size": "13424",
"license": "mit",
"hash": 8730643039326008000,
"line_mean": 33.0710659898,
"line_max": 142,
"alpha_frac": 0.6353545888,
"autogenerated": false,
"ratio": 3.0268320180383315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.904369002223363,
"avg_score": 0.023699316920940232,
"num_lines": 394
} |
## A script for finding every cox coefficient and pvalue for every OV lncRNA in the beta MiTranscriptome data set (normalized counts)
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## Read the follow up data
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
f=open(os.path.join(BASE_DIR,'tcga_data','OV','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_ov.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex and age information were taken from the "clinical_patient" file. A dictionary was created for grade and sex.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','OV','clinical','nationwidechildrens.org_clinical_patient_ov.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the GBM patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
##create a dictionary to check for duplicated data
lncrna_dict={}
##I have the list of transcripts saved in a file
f=open(os.path.join(BASE_DIR,'lncrna','transcripts.txt'))
transcripts=eval(f.read())
f=open(os.path.join(BASE_DIR,'tcga_data','OV','lncrna','OV.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
lncrnas=[[]]*len(patients)
for i,j in zip(transcripts,f):
if i not in lncrna_dict:
data=eval(j.strip())
for index, k in enumerate(data):
lncrnas[index]=lncrnas[index]+[[i,float(k)]]
lncrna_dict[i]=''
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=patient_dict.get(i[:12],[])+[lncrnas[index]]
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
ordered_lncrnas=[]
for i in clinical_and_files:
temp=[]
for j in patient_dict[i[0]]:
temp.append(j)
if len(temp)==1:
ordered_lncrnas.append(temp[0])
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
ordered_lncrnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want lncras that meet an expression cutoff
## It is not known what expression level of lncrnas is needed for function, so a soft value for median was chosen.
## I don't want to perform an analysis with all 0 expression however, so zeros are still counted.
## A cutoff of .1 and no more than a fourth of the patients containing no expression was chosen
final_lncrnas=[[]]*len(ordered_lncrnas)
for i in range(len(ordered_lncrnas[0])):
temp=[]
for j in ordered_lncrnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(ordered_lncrnas)/4.0 and median>.1:
for index, kk in enumerate(temp):
final_lncrnas[index]=final_lncrnas[index]+[kk]
## This will write the final lncrnas to a medium sized file ~10-50MB which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'lncrna','cox','OV','final_lncrnas.txt'),'w')
for i in final_lncrnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the lncrnas in final_lncrnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
lncrnas=[] ##This list tracks the lncrna names
for i in range(len(final_lncrnas[0])):
kaplan=[]
lncrnas.append(final_lncrnas[0][i][0])
for k,j in zip(clinical_and_files,final_lncrnas): ## These lists contain the clinical information and lncrna data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the lncrna values for the current lncrna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['lncrna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ lncrna + grade1 + grade2 + grade3 + age)') ## Perform Cox regression
## Parse the string of the result with python for the lncrna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='lncrna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with lncrna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'lncrna','cox','OV','coeffs_pvalues.txt'),'w')
for i,j,k in zip(lncrnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "lncrna/cox/OV/cox_regression.py",
"copies": "1",
"size": "10212",
"license": "mit",
"hash": 8403198060269602000,
"line_mean": 33.9726027397,
"line_max": 142,
"alpha_frac": 0.6619663141,
"autogenerated": false,
"ratio": 3.1286764705882355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4290642784688235,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every READ lncRNA in the beta MiTranscriptome data set (normalized counts)
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','READ','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_read.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','READ','clinical','nationwidechildrens.org_clinical_patient_read.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the GBM patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
##create a dictionary to check for duplicated data
lncrna_dict={}
##I have the list of transcripts saved in a file
f=open(os.path.join(BASE_DIR,'lncrna','transcripts.txt'))
transcripts=eval(f.read())
f=open(os.path.join(BASE_DIR,'tcga_data','READ','lncrna','READ.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
lncrnas=[[]]*len(patients)
for i,j in zip(transcripts,f):
if i not in lncrna_dict:
data=eval(j.strip())
for index, k in enumerate(data):
lncrnas[index]=lncrnas[index]+[[i,float(k)]]
lncrna_dict[i]=''
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=patient_dict.get(i[:12],[])+[lncrnas[index]]
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
ordered_lncrnas=[]
for i in clinical_and_files:
temp=[]
for j in patient_dict[i[0]]:
temp.append(j)
if len(temp)==1:
ordered_lncrnas.append(temp[0])
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
ordered_lncrnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want lncras that meet an expression cutoff
## It is not known what expression level of lncrnas is needed for function, so a soft value for median was chosen.
## I don't want to perform an analysis with all 0 expression however, so zeros are still counted.
## A cutoff of .1 and no more than a fourth of the patients containing no expression was chosen
final_lncrnas=[[]]*len(ordered_lncrnas)
for i in range(len(ordered_lncrnas[0])):
temp=[]
for j in ordered_lncrnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(ordered_lncrnas)/4.0 and median>.1:
for index, kk in enumerate(temp):
final_lncrnas[index]=final_lncrnas[index]+[kk]
## This will write the final lncrnas to a medium sized file ~10-50MB which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'lncrna','cox','READ','final_lncrnas.txt'),'w')
for i in final_lncrnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the lncrnas in final_lncrnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
lncrnas=[] ##This list tracks the lncrna names
for i in range(len(final_lncrnas[0])):
kaplan=[]
lncrnas.append(final_lncrnas[0][i][0])
for k,j in zip(clinical_and_files,final_lncrnas): ## These lists contain the clinical information and lncrna data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the lncrna values for the current lncrna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['lncrna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ lncrna + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the lncrna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='lncrna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with lncrna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'lncrna','cox','READ','coeffs_pvalues.txt'),'w')
for i,j,k in zip(lncrnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "lncrna/cox/READ/cox_regression.py",
"copies": "1",
"size": "9500",
"license": "mit",
"hash": -4865724338649435000,
"line_mean": 36.2549019608,
"line_max": 142,
"alpha_frac": 0.6736842105,
"autogenerated": false,
"ratio": 3.1322123310253875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43058965415253875,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every SKCM lncRNA in the beta MiTranscriptome data set (normalized counts)
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','clinical','nationwidechildrens.org_clinical_follow_up_v2.0_skcm.txt'))
##get the column indexes needed
columns=f.readline().strip().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Sex and age information were taken from the "clinical_patient" file. A dictionary was created for sex.
more_clinical={}
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','clinical','nationwidechildrens.org_clinical_patient_skcm.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[0,sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and sex and age is constructed.
## Only patients with sex and age information are included.
## Data is [[Patient ID, time (days), vital status, 0, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the GBM patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
##create a dictionary to check for duplicated data
lncrna_dict={}
##I have the list of transcripts saved in a file
f=open(os.path.join(BASE_DIR,'lncrna','transcripts.txt'))
transcripts=eval(f.read())
f=open(os.path.join(BASE_DIR,'tcga_data','SKCM','lncrna','SKCM.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
lncrnas=[[]]*len(patients)
for i,j in zip(transcripts,f):
if i not in lncrna_dict:
data=eval(j.strip())
for index, k in enumerate(data):
lncrnas[index]=lncrnas[index]+[[i,float(k)]]
lncrna_dict[i]=''
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=patient_dict.get(i[:12],[])+[lncrnas[index]]
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
ordered_lncrnas=[]
for i in clinical_and_files:
temp=[]
for j in patient_dict[i[0]]:
temp.append(j)
if len(temp)==1:
ordered_lncrnas.append(temp[0])
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
ordered_lncrnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want lncras that meet an expression cutoff
## It is not known what expression level of lncrnas is needed for function, so a soft value for median was chosen.
## I don't want to perform an analysis with all 0 expression however, so zeros are still counted.
## A cutoff of .1 and no more than a fourth of the patients containing no expression was chosen
final_lncrnas=[[]]*len(ordered_lncrnas)
for i in range(len(ordered_lncrnas[0])):
temp=[]
for j in ordered_lncrnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(ordered_lncrnas)/4.0 and median>.1:
for index, kk in enumerate(temp):
final_lncrnas[index]=final_lncrnas[index]+[kk]
## This will write the final lncrnas to a medium sized file ~10-50MB which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'lncrna','cox','SKCM','final_lncrnas.txt'),'w')
for i in final_lncrnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the lncrnas in final_lncrnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
lncrnas=[] ##This list tracks the lncrna names
for i in range(len(final_lncrnas[0])):
kaplan=[]
lncrnas.append(final_lncrnas[0][i][0])
for k,j in zip(clinical_and_files,final_lncrnas): ## These lists contain the clinical information and lncrna data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the lncrna values for the current lncrna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['lncrna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ lncrna + sex + age)') ## Perform Cox regression
## Parse the string of the result with python for the lncrna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='lncrna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with lncrna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'lncrna','cox','SKCM','coeffs_pvalues.txt'),'w')
for i,j,k in zip(lncrnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "lncrna/cox/SKCM/cox_regression.py",
"copies": "1",
"size": "9487",
"license": "mit",
"hash": 4684703284188051000,
"line_mean": 36.796812749,
"line_max": 142,
"alpha_frac": 0.6728154316,
"autogenerated": false,
"ratio": 3.1248353096179184,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4297650741217918,
"avg_score": null,
"num_lines": null
} |
## A script for finding every cox coefficient and pvalue for every STAD lncRNA in the beta MiTranscriptome data set (normalized counts)
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## Read the follow up data
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
f=open(os.path.join(BASE_DIR,'tcga_data','STAD','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_stad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex and age information were taken from the "clinical_patient" file. A dictionary was created for grade and sex.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','STAD','clinical','nationwidechildrens.org_clinical_patient_stad.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the GBM patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
##create a dictionary to check for duplicated data
lncrna_dict={}
##I have the list of transcripts saved in a file
f=open(os.path.join(BASE_DIR,'lncrna','transcripts.txt'))
transcripts=eval(f.read())
f=open(os.path.join(BASE_DIR,'tcga_data','STAD','lncrna','STAD.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
lncrnas=[[]]*len(patients)
for i,j in zip(transcripts,f):
if i not in lncrna_dict:
data=eval(j.strip())
for index, k in enumerate(data):
lncrnas[index]=lncrnas[index]+[[i,float(k)]]
lncrna_dict[i]=''
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=patient_dict.get(i[:12],[])+[lncrnas[index]]
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
ordered_lncrnas=[]
for i in clinical_and_files:
temp=[]
for j in patient_dict[i[0]]:
temp.append(j)
if len(temp)==1:
ordered_lncrnas.append(temp[0])
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
ordered_lncrnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want lncras that meet an expression cutoff
## It is not known what expression level of lncrnas is needed for function, so a soft value for median was chosen.
## I don't want to perform an analysis with all 0 expression however, so zeros are still counted.
## A cutoff of .1 and no more than a fourth of the patients containing no expression was chosen
final_lncrnas=[[]]*len(ordered_lncrnas)
for i in range(len(ordered_lncrnas[0])):
temp=[]
for j in ordered_lncrnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(ordered_lncrnas)/4.0 and median>.1:
for index, kk in enumerate(temp):
final_lncrnas[index]=final_lncrnas[index]+[kk]
## This will write the final lncrnas to a medium sized file ~10-50MB which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'lncrna','cox','STAD','final_lncrnas.txt'),'w')
for i in final_lncrnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the lncrnas in final_lncrnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
lncrnas=[] ##This list tracks the lncrna names
for i in range(len(final_lncrnas[0])):
kaplan=[]
lncrnas.append(final_lncrnas[0][i][0])
for k,j in zip(clinical_and_files,final_lncrnas): ## These lists contain the clinical information and lncrna data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the lncrna values for the current lncrna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['lncrna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ lncrna + sex + grade1 + grade2 + grade3 + age)') ## Perform Cox regression
## Parse the string of the result with python for the lncrna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='lncrna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with lncrna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'lncrna','cox','STAD','coeffs_pvalues.txt'),'w')
for i,j,k in zip(lncrnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "lncrna/cox/STAD/cox_regression.py",
"copies": "1",
"size": "10299",
"license": "mit",
"hash": 1151842840812833300,
"line_mean": 34.1501706485,
"line_max": 142,
"alpha_frac": 0.6623944072,
"autogenerated": false,
"ratio": 3.120909090909091,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9160544937877086,
"avg_score": 0.024551712046400786,
"num_lines": 293
} |
## A script for finding every cox coefficient and pvalue for every UCEC lncRNA in the beta MiTranscriptome data set (normalized counts)
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## There were three clinical files with nonredundant data. V4.0 is in general the most uptodate, but it is possible
## for data in the other files to be more uptodate. As a result, clinical data will be merged.
f=open(os.path.join(BASE_DIR,'tcga_data','UCEC','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_ucec.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical1=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','UCEC','clinical','nationwidechildrens.org_clinical_follow_up_v2.0_ucec.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if clinical2[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical2[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical2[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical2.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical2.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
##removing the empty value
clinical2=clinical2[1:]
##merging the data
new_clinical=[]
for i in clinical2:
if i[0] not in [j[0] for j in clinical1]:
new_clinical.append(i)
else:
if i[1]<=clinical1[[j[0] for j in clinical1].index(i[0])][1]:
new_clinical.append(clinical1[[j[0] for j in clinical1].index(i[0])])
else:
new_clinical.append(i)
for i in clinical1:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
f=open(os.path.join(BASE_DIR,'tcga_data','UCEC','clinical','nationwidechildrens.org_clinical_follow_up_v1.7_ucec.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical3=[['','','']]
for i in data:
if clinical3[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical3[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical3[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical3.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical3.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
##removing the empty value
clinical3=clinical3[1:]
##merging the data
newer_clinical=[]
for i in clinical3:
if i[0] not in [j[0] for j in new_clinical]:
newer_clinical.append(i)
else:
if i[1]<=new_clinical[[j[0] for j in new_clinical].index(i[0])][1]:
newer_clinical.append(new_clinical[[j[0] for j in new_clinical].index(i[0])])
else:
newer_clinical.append(i)
for i in new_clinical:
if i[0] not in [j[0] for j in newer_clinical]:
newer_clinical.append(i)
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['High Grade']=3
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','UCEC','clinical','nationwidechildrens.org_clinical_patient_ucec.txt'))
columns=f.readline().split('\t')
grade_column=columns.index('neoplasm_histologic_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
newest_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in newer_clinical]:
newest_clinical.append(i)
else:
if i[1]<=newer_clinical[[j[0] for j in newer_clinical].index(i[0])][1]:
newest_clinical.append(newer_clinical[[j[0] for j in newer_clinical].index(i[0])])
else:
newest_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in newer_clinical:
if i[0] not in [j[0] for j in newest_clinical]:
newest_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in newest_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
##In a separate script I parsed the mitranscriptome.expr.counts.tsv file and extracted the GBM patient and expression values.
##From this file I will load the expression data.
##There are duplicated transcripts and the possibility of a patient having multiple sequencing files.
##create a dictionary to check for duplicated data
lncrna_dict={}
##I have the list of transcripts saved in a file
f=open(os.path.join(BASE_DIR,'lncrna','transcripts.txt'))
transcripts=eval(f.read())
f=open(os.path.join(BASE_DIR,'tcga_data','UCEC','lncrna','UCEC.txt'))
##patient list is at the top of the file
patients=f.readline().strip().split()
lncrnas=[[]]*len(patients)
for i,j in zip(transcripts,f):
if i not in lncrna_dict:
data=eval(j.strip())
for index, k in enumerate(data):
lncrnas[index]=lncrnas[index]+[[i,float(k)]]
lncrna_dict[i]=''
##create a dictionary mapping patient to all of their lncrna expression data
patient_dict={}
for index, i in enumerate(patients):
patient_dict[i[:12]]=patient_dict.get(i[:12],[])+[lncrnas[index]]
##find which patients have complete clinical data, order the data, and average data if necessary
##it's possible there are expression data for patients without clinical data, and clinical data without expression data
##create a new clinical list called clinical_and_files for consistency with previous scripts
clinical_and_files=[]
for i in final_clinical:
if i[0] in patient_dict:
clinical_and_files.append(i)
ordered_lncrnas=[]
for i in clinical_and_files:
temp=[]
for j in patient_dict[i[0]]:
temp.append(j)
if len(temp)==1:
ordered_lncrnas.append(temp[0])
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
ordered_lncrnas.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want lncras that meet an expression cutoff
## It is not known what expression level of lncrnas is needed for function, so a soft value for median was chosen.
## I don't want to perform an analysis with all 0 expression however, so zeros are still counted.
## A cutoff of .1 and no more than a fourth of the patients containing no expression was chosen
final_lncrnas=[[]]*len(ordered_lncrnas)
for i in range(len(ordered_lncrnas[0])):
temp=[]
for j in ordered_lncrnas:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(ordered_lncrnas)/4.0 and median>.1:
for index, kk in enumerate(temp):
final_lncrnas[index]=final_lncrnas[index]+[kk]
## This will write the final lncrnas to a medium sized file ~10-50MB which could be useful for further analyses, this step can be skipped.
f=open(os.path.join(BASE_DIR,'lncrna','cox','UCEC','final_lncrnas.txt'),'w')
for i in final_lncrnas:
f.write(str(i))
f.write('\n')
f.close()
##Performing Cox regression on all of the lncrnas in final_lncrnas
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
lncrnas=[] ##This list tracks the lncrna names
for i in range(len(final_lncrnas[0])):
kaplan=[]
lncrnas.append(final_lncrnas[0][i][0])
for k,j in zip(clinical_and_files,final_lncrnas): ## These lists contain the clinical information and lncrna data in the same order.
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the lncrna values for the current lncrna being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['lncrna']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ lncrna + grade1 + grade2 + grade3 + age)') ## Perform Cox regression
## Parse the string of the result with python for the lncrna coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='lncrna':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with lncrna name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'lncrna','cox','UCEC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(lncrnas,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "lncrna/cox/UCEC/cox_regression.py",
"copies": "1",
"size": "13659",
"license": "mit",
"hash": -8010158717242352000,
"line_mean": 33.4924242424,
"line_max": 142,
"alpha_frac": 0.6503404349,
"autogenerated": false,
"ratio": 3.040739091718611,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9073882077724991,
"avg_score": 0.02343948977872413,
"num_lines": 396
} |
# A script for finding the missing sections given a multi-beam wafer directory
import os
import sys
import glob
import stat
import csv
import argparse
import sqlite3
import re
import time
debug_input_dir = '/n/lichtmanfs2/SCS_2015-9-14_C1_W05_mSEM'
FOCUS_FAIL_STR = 'FOCUS_FAIL'
MISSING_SECTION_STR = 'MISSING_SECTION'
MFOVS_MISSING_STR = 'MISSING_MFOVS'
def create_db(db_fname):
print("Using database file: {}".format(db_fname))
db = sqlite3.connect(db_fname)
db.isolation_level = None
cursor = db.cursor()
cursor.execute('''CREATE TABLE IF NOT EXISTS parsed_folders(id INTEGER PRIMARY KEY AUTOINCREMENT,
wafer_dir TEXT NOT NULL,
batch_dir TEXT NOT NULL,
dir TEXT NOT NULL,
section_num INTEGER NOT NULL,
errors TEXT)''')
cursor.execute('''CREATE TABLE IF NOT EXISTS final_folders(wafer_dir TEXT NOT NULL,
section_num INTEGER NOT NULL,
parsed_folder_id INTEGER NOT NULL,
FOREIGN KEY(parsed_folder_id) REFERENCES parsed_folders(id))''')
cursor.execute('''CREATE UNIQUE INDEX IF NOT EXISTS unique_folders ON final_folders(wafer_dir, section_num)''')
db.commit()
return db, cursor
def get_wafers_dirs(cursor):
cursor.execute("SELECT DISTINCT wafer_dir FROM parsed_folders")
data = cursor.fetchall()
return [entry[0] for entry in data]
def get_batch_dirs(cursor, wafer_dir):
cursor.execute("SELECT DISTINCT batch_dir FROM parsed_folders WHERE wafer_dir=?",
(wafer_dir,))
data = cursor.fetchall()
return [entry[0] for entry in data]
def get_parsed_dirs(cursor, wafer_dir):
cursor.execute("SELECT DISTINCT dir FROM parsed_folders WHERE wafer_dir=?",
(wafer_dir,))
data = cursor.fetchall()
return [entry[0] for entry in data]
def add_parsed_folder(cursor, db, wafer_dir, batch_dir, dir, section_num, errors):
cursor.execute("INSERT INTO parsed_folders (wafer_dir, batch_dir, dir, section_num, errors) VALUES (?, ?, ?, ?, ?)",
(wafer_dir, batch_dir, dir, section_num, errors))
db.commit()
return cursor.lastrowid
def update_final_folder(cursor, db, wafer_dir, section_num, parsed_folder_id):
cursor.execute("INSERT OR REPLACE INTO final_folders (wafer_dir, section_num, parsed_folder_id) VALUES (?, ?, ?)",
(wafer_dir, section_num, parsed_folder_id))
db.commit()
def get_final_folder_id(cursor, wafer_dir, section_num):
cursor.execute("SELECT parsed_folder_id FROM final_folders WHERE wafer_dir=? AND section_num=?",
(wafer_dir, section_num))
data = cursor.fetchall()
return [entry[0] for entry in data][0]
def get_wafer_final_folder_ids(cursor, wafer_dir):
cursor.execute("SELECT parsed_folder_id FROM final_folders WHERE wafer_dir=?",
(wafer_dir,))
data = cursor.fetchall()
return [entry[0] for entry in data]
def get_wafer_final_folders(cursor, wafer_dir):
cursor.execute("SELECT * FROM final_folders JOIN parsed_folders ON final_folders.parsed_folder_id = parsed_folders.id WHERE final_folders.wafer_dir=?",
(wafer_dir,))
data = cursor.fetchall()
returned_data = [{
'parsed_folder_id' : entry[3],
'wafer_dir' : entry[4],
'batch_dir' : entry[5],
'section_dir' : entry[6],
'section_num' : entry[7],
'errors' : entry[8]
} for entry in data]
return returned_data
def normalize_path(dir):
# Normalize the path name, in case we are in windows or given a relative path
dir_normalized = os.path.abspath(dir).replace('\\','/')
dir_normalized = dir_normalized[dir_normalized.find('/'):]
return dir_normalized
def read_image_files(folder):
# Yields non-thumbnail image files from the given folder
for fname in glob.glob(os.path.join(folder, '*.bmp')):
fstat = os.stat(fname)
# Verify that it is a bmp file, non-thumbnail, that is actually a file, and non-empty
if (not os.path.basename(fname).startswith('thumbnail')) and stat.S_ISREG(fstat.st_mode) and fstat.st_size > 0:
yield fname
def verify_mfov_folder(folder):
# Make sure that there are 61 (non-zero) file images for the mfov
img_counter = 0
for img in read_image_files(folder):
img_counter += 1
return img_counter == 61
def read_region_metadata_csv_file(fname):
#fname = '/n/lichtmanfs2/SCS_2015-9-21_C1_W04_mSEM/_20150930_21-50-13/090_S90R1/region_metadata.csv'
with open(fname, 'r') as f:
reader = csv.reader(f, delimiter=';', quoting=csv.QUOTE_NONE)
reader.next() # Skip the sep=';' row
reader.next() # Skip the headers row
for row in reader:
# print row
yield row
def verify_mfovs(folder):
# if the region_metadata file doesn't exist, need to skip that folder
if not os.path.exists(os.path.join(folder, "region_metadata.csv")):
return -1, -1
csv_reader = read_region_metadata_csv_file(os.path.join(folder, "region_metadata.csv"))
max_mfov_num = 0
mfovs = []
for line in csv_reader:
mfov_num = int(line[0])
if mfov_num <= 0:
print("Skipping mfov {} in folder {}".format(mfov_num, folder))
continue
# Read the mfov number
max_mfov_num = max(max_mfov_num, mfov_num)
mfov_folder = os.path.join(folder, str(mfov_num).zfill(6))
if not os.path.exists(mfov_folder):
print("Error: mfov folder {} not found".format(mfov_folder))
elif not verify_mfov_folder(mfov_folder):
print("Error: # of images in mfov directory {} is not 61".format(mfov_folder))
else:
mfovs.append(mfov_num)
return mfovs, max_mfov_num
def parse_batch_dir(batch_dir, wafer_dir, wafer_dir_normalized, prev_section_dirs, db, cursor):
batch_section_data = {}
last_section_folder = None
print("Parsing batch dir: {}".format(batch_dir))
# Get all section folders in the sub-folder
all_sections_folders = sorted(glob.glob(os.path.join(batch_dir, '*_*')))
for section_folder in all_sections_folders:
# If already parsed that section dir
if normalize_path(section_folder) in prev_section_dirs:
print("Previously parsed section dir: {}, skipping...".format(section_folder))
continue
print("Parsing section dir: {}".format(section_folder))
if os.path.isdir(section_folder):
# Found a section directory, now need to find out if it has a focus issue or not
# (if it has any sub-dir that is all numbers, it hasn't got an issue)
section_num = os.path.basename(section_folder).split('_')[0]
relevant_mfovs, max_mfov_num = verify_mfovs(section_folder)
if relevant_mfovs == -1:
# The folder doesn't have the "region_metadata.csv" file, need to skip it
continue
batch_section_data[section_num] = {'folder': section_folder}
if len(relevant_mfovs) > 0:
# a good section
if min(relevant_mfovs) == 1 and max_mfov_num == len(relevant_mfovs):
# The directories in the wafer directory are sorted by the timestamp, and so here we'll get the most recent scan of the section
batch_section_data[section_num]['errors'] = None
else:
missing_mfovs = []
for i in range(0, max(relevant_mfovs)):
if i+1 not in relevant_mfovs:
missing_mfovs.append(str(i+1))
batch_section_data[section_num]['errors'] = MFOVS_MISSING_STR + ':"{}"'.format(','.join(missing_mfovs))
else:
batch_section_data[section_num]['errors'] = FOCUS_FAIL_STR
# No need to verify that the last section is not being imaged at the moment
# because we only consider sections that have the "region_metadata.csv", and if it is not there,
# the folder is skipped
# Insert all parsed section folders to the database
for section_num in batch_section_data:
row_id = add_parsed_folder(cursor, db, wafer_dir_normalized, normalize_path(batch_dir), normalize_path(batch_section_data[section_num]['folder']),
section_num, batch_section_data[section_num]['errors'])
batch_section_data[section_num]['parsed_folder_id'] = row_id
return batch_section_data
def find_missing_sections(wafer_dir, wafer_dir_normalized, db, cursor):
all_sections = {}
# update all_sections with data from the previously parsed folders
prev_final_data = get_wafer_final_folders(cursor, wafer_dir_normalized)
for entry in prev_final_data:
section_num_str = str(entry['section_num']).zfill(3)
all_sections[section_num_str] = {
'folder': entry['section_dir'],
'errors': entry['errors'],
'parsed_folder_id': entry['parsed_folder_id']
}
# Fetch the previously parsed dirs
prev_section_dirs = get_parsed_dirs(cursor, wafer_dir_normalized)
print("prev_dirs:", prev_section_dirs)
# The batch directories are sorted by the timestamp in the directory name. Need to store it in a hashtable for sorting
all_batch_files = glob.glob(os.path.join(wafer_dir, '*'))
all_batch_dirs = []
dir_to_time = {}
for folder in all_batch_files:
# Assuming folder names similar to: scs_20151217_19-45-07 (scs can be changed to any other name)
if os.path.isdir(folder):
m = re.match('.*_([0-9]{8})_([0-9]{2})-([0-9]{2})-([0-9]{2})$', folder)
if m is not None:
dir_to_time[folder] = "{}_{}-{}-{}".format(m.group(1), m.group(2), m.group(3), m.group(4))
all_batch_dirs.append(folder)
# Parse the batch directories
for sub_folder in sorted(all_batch_dirs, key=lambda folder: dir_to_time[folder]):
if os.path.isdir(sub_folder):
batch_section_data = parse_batch_dir(sub_folder, wafer_dir, wafer_dir_normalized, prev_section_dirs, db, cursor)
# Update the sections that were parsed during this execution
all_sections.update(batch_section_data)
# Insert a missing section for each non-seen section number starting from 001 (and ending with the highest number)
all_sections_keys = sorted(all_sections.keys())
max_section = int(all_sections_keys[-1])
prev_section = 0
missing_sections = []
focus_failed_sections = []
missing_mfovs_sections = []
cursor.execute("begin")
for i in range(1, max_section + 1):
section_num_str = str(i).zfill(3)
if section_num_str in all_sections:
# Found a section
if all_sections[section_num_str]['errors'] == FOCUS_FAIL_STR:
focus_failed_sections.append(i)
elif all_sections[section_num_str]['errors'] is not None:
missing_mfovs_section.append(i)
# Add the section to the final folders in the db
update_final_folder(cursor, db, wafer_dir_normalized, i, all_sections[section_num_str]['parsed_folder_id'])
else:
# Found a missing section
missing_sections.append(i)
all_sections[section_num_str] = {
'folder': MISSING_SECTION_STR,
'errors': MISSING_SECTION_STR }
db.commit()
return all_sections, missing_sections, focus_failed_sections, missing_mfovs_sections
def find_and_save_missing_sections(wafer_dir, db_fname, output_fname):
db, cursor = create_db(db_fname)
all_sections, missing_sections, focus_failed_sections, missing_mfovs_sections = find_missing_sections(wafer_dir, normalize_path(wafer_dir), db, cursor)
print("Found {} sections, {} missing sections, {} focus failed sections, {} sections with missing mfovs".format(len(all_sections), len(missing_sections), len(focus_failed_sections), len(missing_mfovs_sections)))
print("Missing sections: {}".format(missing_sections))
print("Focus failed sections: {}".format(focus_failed_sections))
print("Missing mfovs sections: {}".format(missing_mfovs_sections))
# Output everything to csv
print("Saving CSV file to: {}".format(output_fname))
with open(output_fname, 'wb') as f:
section_keys = sorted(all_sections.keys())
w = csv.writer(f, delimiter=',')
for section_num in section_keys:
w.writerow([section_num, all_sections[section_num]['folder'], all_sections[section_num]['errors']])
print("Done")
if __name__ == '__main__':
# Command line parser
parser = argparse.ArgumentParser(description='A script for finding the missing sections given a multi-beam wafer directory')
parser.add_argument('wafer_dir', metavar='wafer_dir', type=str,
help='a directory where the wafer sections are located (e.g., /n/lichtmanfs2/SCS_2015-9-14_C1_W05_mSEM)')
parser.add_argument('-o', '--output_fname', type=str,
help='an output CSV file (default: ./wafer_data.csv)',
default='./wafer_data.csv')
parser.add_argument('-d', '--db_fname', type=str,
help='a db file that stores all the parsed directories (default: ./parsed_folders.db)',
default='./parsed_folders.db')
args = parser.parse_args()
find_and_save_missing_sections(args.wafer_dir, args.db_fname, args.output_fname)
| {
"repo_name": "Rhoana/rh_aligner",
"path": "scripts/check_missing_sections.py",
"copies": "1",
"size": "14180",
"license": "mit",
"hash": -2872469478524627500,
"line_mean": 45.6447368421,
"line_max": 215,
"alpha_frac": 0.6035260931,
"autogenerated": false,
"ratio": 3.7256962690488704,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48292223621488706,
"avg_score": null,
"num_lines": null
} |
##a script for getting updated readcounts for human mirnas
##the id is used when available
##when unannotated the coordinates are used
f=open('human_mirnas.txt')
mirna_dict=eval(f.read())
f.close()
f=open('coordinates_dict.txt')
coordinates_dict=eval(f.read())
f.close()
##files.txt is a list of the files you want to recount
f=open('files.txt')
data=[i.split('.txt')[0] for i in f]
for k in data:
f=open(k+'.txt')
f.readline()
count_dict={}
for i in f:
if 'MIMA' in i:
x=i.strip().split('\t')
try:
count_dict[mirna_dict[x[-1].split(',')[1]]]=count_dict.get(mirna_dict[x[-1].split(',')[1]],0)+int(x[2])
except:
chromosome='chr'+x[1].split(':')[1]
start=int(x[1].split(':')[2].split('-')[0])
end=int(x[1].split(':')[2].split('-')[1])
sign=x[1].split(':')[-1]
overlaps=[]
for ii in coordinates_dict:
for j in coordinates_dict[ii]:
if chromosome==j[0]:
if abs(start-j[1])<=3 and abs(end-j[2])<=3 and sign==j[3]:
overlaps.append(ii)
break
if len(overlaps)==1:
count_dict[overlaps[0]]=count_dict.get(overlaps[0],0)+int(x[2])
if len(overlaps)>1:
print overlaps, x
raise
elif 'unannotated' in i:
x=i.strip().split('\t')
chromosome='chr'+x[1].split(':')[1]
start=int(x[1].split(':')[2].split('-')[0])
end=int(x[1].split(':')[2].split('-')[1])
sign=x[1].split(':')[-1]
overlaps=[]
for ii in coordinates_dict:
for j in coordinates_dict[ii]:
if chromosome==j[0]:
if abs(start-j[1])<=3 and abs(end-j[2])<=3 and sign==j[3]:
overlaps.append(ii)
break
if len(overlaps)==1:
count_dict[overlaps[0]]=count_dict.get(overlaps[0],0)+int(x[2])
if len(overlaps)>1:
print overlaps, x
raise
else:
pass
w=open(k+'new.txt','w')
total=sum(count_dict.values())/1000000.0
for i in count_dict:
w.write(i)
w.write('\t')
w.write(str(count_dict[i]/total))
w.write('\n')
w.close()
| {
"repo_name": "OmnesRes/onco_lnc",
"path": "mirna/counting.py",
"copies": "1",
"size": "2507",
"license": "mit",
"hash": 497065417313135500,
"line_mean": 32.4266666667,
"line_max": 119,
"alpha_frac": 0.4555245313,
"autogenerated": false,
"ratio": 3.535966149506347,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4491490680806347,
"avg_score": null,
"num_lines": null
} |
## A script for obtaining the normalized expression values of genes of interest and preparing them for R
## Load necessary modules
import numpy as np
import os
from rpy2 import robjects as ro
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_lgg.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical=[['','','']]
for i in data:
if clinical[-1][0]==i[0]:
if i[8]=='Alive':
clinical[-1]=[i[0],int(i[9]),'Alive']
elif i[8]=='Dead':
clinical[-1]=[i[0],int(i[10]),'Dead']
else:
pass
else:
if i[8]=='Alive':
clinical.append([i[0],int(i[9]),'Alive'])
elif i[8]=='Dead':
clinical.append([i[0],int(i[10]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G2']=2
grade_dict['G3']=3
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','clinical','nationwidechildrens.org_clinical_patient_lgg.txt'))
f.readline()
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
more_clinical[i[0]]=[grade_dict[i[4]],sex_dict[i[10]],int(i[-12])]
if i[39]=='Alive':
clinical4.append([i[0],int(i[40]),'Alive'])
elif i[39]=='Dead':
clinical4.append([i[0],int(i[41]),'Dead'])
else:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
final_clinical=[]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
##this assumes you ran cox_regression.py and saved final_genes
f=open(os.path.join(BASE_DIR,'cox_regression','LGG','final_genes.txt'))
final_genes=[eval(i.strip()) for i in f]
##final_genes is a list of gene expression values for each patient included in the study
##convert to a dictionary to be able to access values of interest
##the values are in the same order as the patients in clinical_and_files
final_dict={}
for i in final_genes:
for j in i:
final_dict[j[0]]=final_dict.get(j[0],[])+[j[1]]
f=open(os.path.join(BASE_DIR,'cox_regression','LGG','coeffs_pvalues.txt'))
data2=[i.strip().split() for i in f]
pvalues=[]
for i in data2:
pvalues.append([float(i[-1]),float(i[1]),i[0]])
pvalues.sort()
##get the normalized expression values for the good genes
for_matrix=[]
x=0
for i in pvalues:
if i[1]<0:
ro.globalenv['expression']=ro.FloatVector(final_dict[i[-1]])
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)')
inverse_norm=list(res)
for_matrix.append([i[-1]]+inverse_norm)
x+=1
if x==100:
break
f=open('for_clustering_100_good_100_bad.txt','w')
##write the header (patient names)
f.write('\t')
##don't want a trailing tab, may cause a NA value in R
for i in clinical_and_files[:-1]:
f.write(i[0])
f.write('\t')
f.write(clinical_and_files[-1][0])
f.write('\n')
##write the rows for good genes
for i in for_matrix:
##don't want a trailing tab, may cause a NA value in R
for j in i[:-1]:
f.write(str(j))
f.write('\t')
f.write(str(i[-1]))
f.write('\n')
f.close()
##need a file for a color bar
f=open('genes_with_prognosis.txt','w')
##just want the gene names
for i in for_matrix:
f.write(i[0])
f.write('\t')
f.close()
##get the normalized expression values for the bad genes
for_matrix=[]
x=0
for i in pvalues:
if i[1]>0:
ro.globalenv['expression']=ro.FloatVector(final_dict[i[-1]])
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)')
inverse_norm=list(res)
for_matrix.append([i[-1]]+inverse_norm)
x+=1
if x==100:
break
f=open('for_clustering_100_good_100_bad.txt','a')
##write the rows for bad genes
for i in for_matrix:
##don't want a trailing tab, may cause a NA value in R
for j in i[:-1]:
f.write(str(j))
f.write('\t')
f.write(str(i[-1]))
f.write('\n')
f.close()
##finish writing color bar file
f=open('genes_with_prognosis.txt','a')
##just want the gene names
for i in for_matrix[:-1]:
f.write(i[0])
f.write('\t')
f.write(for_matrix[-1][0])
f.write('\n')
f.write(''.join([i+j for i,j in zip(['Good']*100,['\t']*100)]))
f.write(''.join([i+j for i,j in zip(['Bad']*99,['\t']*99)]))
f.write('Bad')
f.close()
| {
"repo_name": "OmnesRes/pan_cancer",
"path": "paper/figures/figure_1/clustergrams/LGG/for_clustering.py",
"copies": "1",
"size": "7607",
"license": "mit",
"hash": 5575701238520577000,
"line_mean": 28.831372549,
"line_max": 132,
"alpha_frac": 0.6382279479,
"autogenerated": false,
"ratio": 2.9347993827160495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40730273306160497,
"avg_score": null,
"num_lines": null
} |
## A script for obtaining the normalized expression values of genes of interest and preparing them for R
## Load necessary modules
import os
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_lgg.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical=[['','','']]
for i in data:
if clinical[-1][0]==i[0]:
if i[8]=='Alive':
clinical[-1]=[i[0],int(i[9]),'Alive']
elif i[8]=='Dead':
clinical[-1]=[i[0],int(i[10]),'Dead']
else:
pass
else:
if i[8]=='Alive':
clinical.append([i[0],int(i[9]),'Alive'])
elif i[8]=='Dead':
clinical.append([i[0],int(i[10]),'Dead'])
else:
pass
## Removing the empty value.
clinical=clinical[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G2']=2
grade_dict['G3']=3
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','clinical','nationwidechildrens.org_clinical_patient_lgg.txt'))
f.readline()
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
more_clinical[i[0]]=[grade_dict[i[4]],sex_dict[i[10]],int(i[-12])]
if i[39]=='Alive':
clinical4.append([i[0],int(i[40]),'Alive'])
elif i[39]=='Dead':
clinical4.append([i[0],int(i[41]),'Dead'])
else:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
final_clinical=[]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
##need to get the column order of the clustergram
f=open(os.path.join(BASE_DIR,'figures','figure_1','clustergrams','LGG','column_order.txt'))
##double loop list comprehension
new_order=[int(j) for i in f for j in i.strip().split()]
##get the original order of the patients
f=open(os.path.join(BASE_DIR,'figures','figure_1','clustergrams','LGG','for_clustering_100_good_100_bad.txt'))
original_order=f.readline().split()
##using the column order, rearrange the patients so that they are the order in the clustergram
order_dict={}
for index,i in enumerate(original_order):
order_dict[index+1]=i
final_order=[order_dict[i] for i in new_order]
##split the groups at TCGA-DU-7019
finaldata=[]
for i in clinical_and_files:
if i[0] in final_order[:final_order.index('TCGA-DU-7019')]:
finaldata.append([i[1],i[2],'1'])
elif i[0] in final_order[final_order.index('TCGA-DU-7019'):]:
finaldata.append([i[1],i[2],'2'])
else:
pass
f=open('for_kaplan.txt','w')
f.write('time')
f.write('\t')
f.write('Died')
f.write('\t')
f.write('group')
f.write('\n')
for i in finaldata:
f.write(str(i[0]))
f.write('\t')
f.write(str(death_dic[i[1]]))
f.write('\t')
f.write(i[2])
f.write('\n')
f.close()
| {
"repo_name": "OmnesRes/pan_cancer",
"path": "paper/figures/figure_1/kaplans/LGG/for_kaplan.py",
"copies": "1",
"size": "5991",
"license": "mit",
"hash": -5411653531795091000,
"line_mean": 30.5315789474,
"line_max": 132,
"alpha_frac": 0.6506426306,
"autogenerated": false,
"ratio": 2.9555994079921066,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41062420385921067,
"avg_score": null,
"num_lines": null
} |
"""A script for parsing the Alov bounding box `.ann` files."""
import sys
import os
import itertools
import shutil
import pandas as pd
import numpy as np
def parse_file(bbox_dir, filename):
"""Parse an individual `.ann` file and output the relevant elements.
Args:
----
bbox_dir: str
filename: str
Output:
------
parsed_df: pandas DataFrame
"""
parsed_df = pd.read_table(bbox_dir + filename, sep=' ', header=None)
parsed_df['filename'] = filename
parsed_df['directory'] = (parsed_df['filename']
.apply(lambda fn: fn.split('_')[0]))
return parsed_df
def add_filepaths(parsed_df, input_dir, output_dir):
"""Add the necessary filepaths for saving and later accessing.
Args:
----
parsed_df: pandas DataFrame
input_dir: str
output_dir: str
Returns:
-------
parsed_df: pandas DataFrame
"""
# The video numbers in the filenames are zero padded on the left.
parsed_df['vid_num'] = (parsed_df['frame'].astype(str)
.apply(lambda frame: frame.zfill(8)))
parsed_df['filename'] = (parsed_df['filename']
.apply(lambda filename:
filename.replace('.ann', '')))
# Need to replace the filename ext. in the DataFrame with `.jpg` and provide
# the full relative input and output paths.
parsed_df['input_filepath'] = (input_dir + parsed_df['directory_path'] +
'/' + parsed_df['filename'] + '/' + parsed_df['vid_num'] + '.jpg')
parsed_df['output_filepath'] = (output_dir + parsed_df['filename'] +
'_' + parsed_df['vid_num'] + '.jpg')
# To be used as the full path in the image generator.
parsed_df['jpg_filename'] = (parsed_df['output_filepath']
.apply(lambda filepath:
filepath.split('/')[3]))
return parsed_df
def cp_files(parsed_df, input_dir, output_dir):
"""Copy over the files given by the attributes in the dataframe.
Args:
-----
parsed_df: pandas DataFrame
input_dir: str
output_dir: str
Returns:
-------
filepaths_df: pandas DataFrame
"""
filepaths_df = add_filepaths(parsed_df, input_dir, output_dir)
for input_fp, output_fp in zip(filepaths_df['input_filepath'],
filepaths_df['output_filepath']):
shutil.copy(input_fp, output_fp)
return filepaths_df
def fix_box_coords(filepaths_df):
"""Associate the correct bounding box coords with the right variables.
Args:
----
filepaths_df: pandas DataFrame
Returns:
-------
filepaths_df: pandas DataFrame
"""
# Rename to match the project terminology and what's used with imagenet.
Xs = filepaths_df[['x1', 'x2', 'x3', 'x4']]
filepaths_df['x_max'] = np.max(Xs, axis=1)
filepaths_df['x_min'] = np.min(Xs, axis=1)
Ys = filepaths_df[['y1', 'y2', 'y3', 'y4']]
filepaths_df['y_max'] = np.max(Ys, axis=1)
filepaths_df['y_min'] = np.min(Ys, axis=1)
filepaths_df.drop(['x1', 'x2', 'x3', 'x4', 'y1', 'y2', 'y3', 'y4'],
axis=1, inplace=True)
filepaths_df.rename(columns={'x_min': 'x0', 'x_max': 'x1',
'y_min': 'y0', 'y_max': 'y1'}, inplace=True)
return filepaths_df
def calc_frame_pairs(frames_df):
"""Add columns denoting the relevant info for the current and previous frame.
Each video has loads of frames, and we need to get information for subsequent
frames in the same row in the DataFrame. This will make it easy for our image
generator to easily cycle through pairs. We'll accomplish this by taking the
`frames_df`, lopping off the last row, placing in a filler row, and merging it
back onto the original `frames_df`. The rest will be cleanup.
Args:
----
frames_df: pandas DataFrame
Returns:
-------
save_df: pandas DataFrame
"""
filler_row = pd.DataFrame(np.zeros((1, frames_df.shape[1])),
columns=frames_df.columns)
less_one_df = frames_df[:-1]
lagged_df = pd.concat([filler_row, less_one_df], axis=0)
lagged_cols = [col + '_start' for col in frames_df.columns]
lagged_df.columns = lagged_cols
lagged_df.reset_index(inplace=True, drop=True)
end_cols = [col + '_end' for col in frames_df.columns]
frames_df.columns = end_cols
merged_df = pd.concat([lagged_df, frames_df], axis=1)
max_frames_df = merged_df.groupby('filename_start')['frame_start'].max()
max_frames_df.name = 'max_frame'
temp_df = merged_df.join(max_frames_df, on='filename_start')
save_df = temp_df.query('max_frame != frame_start')
return save_df
if __name__ == '__main__':
input_dir = sys.argv[1] # Holds location to find alov `.ann` files.
output_filepath = sys.argv[2] # File to give to the resulting .csv.
output_dir = sys.argv[3] # Output directory to save resulting .csv in.
bbox_dir = input_dir + 'bb/'
ann_files_by_dir = (i[2] for i in os.walk(bbox_dir))
bbox_ann_filenames = itertools.chain(*ann_files_by_dir)
cols = ['frame', 'x1', 'y1', 'x2', 'y2', 'x3', 'y3', 'x4',
'y4', 'filename', 'directory_path']
parsed_df = pd.concat(parse_file(bbox_dir, filename) for
filename in bbox_ann_filenames)
parsed_df.columns = cols
parsed_df.reset_index(inplace=True, drop=True)
frames_dir = input_dir + 'frames/'
filepaths_df = cp_files(parsed_df, frames_dir, output_dir)
fixed_coords_df = fix_box_coords(filepaths_df)
save_df = calc_frame_pairs(fixed_coords_df)
# Only keep what we'll need for the generator.
keep_cols = ['x0_start', 'y0_start', 'x1_start', 'y1_start',
'jpg_filename_start', 'x0_end', 'x1_end', 'y0_end', 'y1_end',
'jpg_filename_end']
save_df = save_df[keep_cols]
save_df.rename(columns={'jpg_filename_start': 'filename_start',
'jpg_filename_end': 'filename_end'}, inplace=True)
save_df.to_csv(output_filepath, index=False)
| {
"repo_name": "dansbecker/motion-tracking",
"path": "motion_tracker/data_setup/parse_alov_bb.py",
"copies": "1",
"size": "6199",
"license": "mit",
"hash": 8212785732987451000,
"line_mean": 32.6902173913,
"line_max": 82,
"alpha_frac": 0.589772544,
"autogenerated": false,
"ratio": 3.447719688542825,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9529701677138309,
"avg_score": 0.0015581110809030547,
"num_lines": 184
} |
"""A script for parsing the Imagenet bounding box XML files."""
import sys
import os
import itertools
import pandas as pd
import xml.etree.ElementTree as ET
def parse_file(bbox_dir, filename):
"""Parse an individual XML file and grab the relevant elements.
Args:
----
bbox_dir: str
filename: str
Output:
------
output_lst: list of lists
"""
tree = ET.parse(bbox_dir + filename)
root = tree.getroot()
output_lst = []
filename = root.find('filename').text
width, height = parse_size(root)
for child in root.findall('./'):
if child.tag == "object":
name = child.find('name').text
subcategory = child.find('subcategory')
subcategory = None if subcategory is None else subcategory.text
bounding_box = child.find('bndbox')
xmin, xmax, ymin, ymax = parse_bb(bounding_box)
object_lst = [filename, width, height, name, subcategory, xmin,
xmax, ymin, ymax]
output_lst.append(object_lst)
return output_lst
def parse_size(root):
"""Parse the width and height of the image out of the root node.
Args:
----
root: xml.etree.ElementTree.Element
Output:
------
width: str
height str
"""
size_node = root.find('size')
width = size_node.find('width').text
height = size_node.find('height').text
return width, height
def parse_bb(bounding_box):
"""Parse the coordinates of the bounding box from the bounding box node.
Args:
----
bounding_box: xml.etree.ElementTree.Element
Output:
------
xmin: str
xmax: str
ymin: str
ymax: str
"""
xmin = bounding_box.find('xmin').text
xmax = bounding_box.find('xmax').text
ymin = bounding_box.find('ymin').text
ymax = bounding_box.find('ymax').text
return xmin, xmax, ymin, ymax
if __name__ == '__main__':
bbox_dir = sys.argv[1]
output_filepath = sys.argv[2]
xml_files_by_dir = (i[2] for i in os.walk(bbox_dir))
bbox_xml_filenames = itertools.chain(*xml_files_by_dir)
all_bboxes = (parse_file(bbox_dir, filename) for filename in bbox_xml_filenames)
end_lst = list(itertools.chain(*all_bboxes))
cols = ['filename', 'width', 'height', 'name',
'subcategory', 'x0', 'x1', 'y0', 'y1']
output_df = pd.DataFrame(data=end_lst, columns=cols)
output_df.to_csv(output_filepath, index=False)
| {
"repo_name": "dansbecker/motion-tracking",
"path": "motion_tracker/data_setup/parse_imagenet_bb.py",
"copies": "1",
"size": "2506",
"license": "mit",
"hash": -2795283325361774600,
"line_mean": 24.06,
"line_max": 84,
"alpha_frac": 0.594972067,
"autogenerated": false,
"ratio": 3.7071005917159763,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4802072658715976,
"avg_score": null,
"num_lines": null
} |
""" A script for recalculating visit statistics (by deleting and re-getting).
It recalculates only those where util.isNaN(stats.body_mass_index) is True.
This is a manage.py command. Run with --help for documentation.
Example usage:
To run on localhost:
> manage.py recalc_visit_stats --org maventy
To run on production:
> manage.py recalc_visit_stats --org maventy --remote
"""
import logging
from optparse import make_option
from healthdb import models
from healthdb import util
from healthdb.management.commands.commandutil import ManageCommand
ROWS_PER_BATCH=200
def recalc_visit_statistics(org):
visits = models.Visit.all().filter('organization =', org).order('__key__').fetch(ROWS_PER_BATCH)
num = 0
# Still don't understand why we have to iterate over visits in batches, but
# we do, or it gets stuck at ~1000.
while visits:
for visit in visits:
num += 1
if (num % 100) == 0: logging.info("traversed %d visits" % num)
stats = visit.get_visit_statistics()
if util.isNaN(stats.body_mass_index):
# recalculate by deleting and re-getting
visit.delete_statistics()
stats = visit.get_visit_statistics()
pat = visit.get_patient()
logging.info("Visit %s/%d has NaN BMI, recalculating stats.."
% (pat.short_string, visit.short_string))
# pat = visit.get_patient()
# logging.info("Visit %s/%d %s.." % (pat.short_string, visit.short_string, visit.key()))
visits = models.Visit.all().filter('organization =', org).order('__key__').filter(
'__key__ >', visits[-1].key()).fetch(ROWS_PER_BATCH)
logging.info("traversed %d visits" % num)
class Command(ManageCommand):
option_list = ManageCommand.option_list + (
make_option('--organization', dest='organization',
help='Organization'),
)
help = 'recalculate visit statistics'
def handle(self, *app_labels, **options):
self.connect(*app_labels, **options)
recalc_visit_statistics(options.get('organization'))
| {
"repo_name": "avastjohn/maventy_new",
"path": "healthdb/management/commands/recalc_visit_stats.py",
"copies": "1",
"size": "2018",
"license": "bsd-3-clause",
"hash": -4058518073069169700,
"line_mean": 30.53125,
"line_max": 98,
"alpha_frac": 0.6754212091,
"autogenerated": false,
"ratio": 3.4853195164075994,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9562129467245142,
"avg_score": 0.01972225165249151,
"num_lines": 64
} |
""" A script for recounting # of patients and visits
This is a manage.py command. Run with --help for documentation.
Example usage:
To run on localhost:
> manage.py recount
To run on production:
> manage.py recount --remote
"""
import getpass
import logging
import settings
from django.core.management.base import BaseCommand, CommandError
from google.appengine.ext.remote_api import remote_api_stub
from optparse import make_option
from healthdb import models
def auth_func():
"""Get username and password (for access to localhost)"""
return raw_input('Username:'), getpass.getpass('Password:')
# Number of rows to read/write at once
ROWS_PER_BATCH = 200
class LoadError():
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def set_count(model_class):
'''Set count of class instances.
NOTE: This is not accurate if data is added while this count is
happening, but it's not a problem for now.
'''
count = 0
keys = model_class.all(keys_only=True).order('__key__').fetch(ROWS_PER_BATCH)
while keys:
count += len(keys)
logging.info('Downloaded %d keys' % count)
keys = model_class.all(keys_only=True).order('__key__').filter(
'__key__ >', keys[-1]).fetch(ROWS_PER_BATCH)
model_class.set_count(count)
logging.info('Set counter to %d' % (count))
def set_patient_count():
'''Set count of Patient and Visit class instances.'''
logging.info('Patient ..')
set_count(models.Patient)
logging.info('Visit ..')
set_count(models.Visit)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--host', dest='host', default='localhost:8080',
help='Specifies the URL of the local application. Use -- remote '
'to modify the production site.'),
)
help = 'Sets counts'
args = ''
def handle(self, *app_labels, **options):
# Turn off copious DEBUG logging
logging.getLogger().setLevel(logging.INFO)
# Note: this app is only supported for decisionapp
if len(app_labels) != 0:
raise CommandError("This command doesn't take a list of parameters"
"...it only runs against the 'childdb' app.")
# Configure local server to run against, if we're not --remote
# TODO(max): I couldn't get this to run against the correct local
# instance of the datastore, so we'll connect this way. It remains
# a TODO to just run this script directly, without this block.
remote = options.get('remote') # None==local, True==remote (production)
if not remote:
remote_api_url = settings.DATABASE_OPTIONS['remote_url']
host = options.get('host')
remote_api_stub.ConfigureRemoteDatastore(
"childdb", remote_api_url, auth_func, host)
set_patient_count()
| {
"repo_name": "avastjohn/maventy_new",
"path": "healthdb/management/commands/recount.py",
"copies": "1",
"size": "2877",
"license": "bsd-3-clause",
"hash": -5601889987035116000,
"line_mean": 28.2842105263,
"line_max": 79,
"alpha_frac": 0.6534584637,
"autogenerated": false,
"ratio": 3.76078431372549,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49142427774254904,
"avg_score": null,
"num_lines": null
} |
""" A script for running a console.
This is a manage.py command. Run with --help for documentation.
Example usage:
To run on localhost:
> manage.py console --app_id <appid>
To run on production:
> manage.py console --remote --app_id <appid>
"""
import getpass
import logging
import settings
import code
from django.core.management.base import BaseCommand, CommandError
from google.appengine.ext.remote_api import remote_api_stub
from optparse import make_option
def auth_func():
"""Get username and password (for access to localhost)"""
return raw_input('Username:'), getpass.getpass('Password:')
def console(app_id):
code.interact('App Engine interactive console for %s' % (app_id,),
None, locals())
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--app-id', dest='app_id', help='The app id'),
make_option('--host', dest='host', default='localhost:8080',
help='Specifies the URL of the local application. Use -- remote '
'to modify the production site.'),
)
help = 'Runs a console'
args = ''
def handle(self, *app_labels, **options):
# Turn off copious DEBUG logging
logging.getLogger().setLevel(logging.INFO)
app_id = options.get('app_id')
if not app_id:
raise CommandError('Must give --app-id')
# Note: this app is only supported for healthdb
if len(app_labels) != 0:
raise CommandError("This command doesn't take a list of parameters"
"...it only runs against the 'childdb' app.")
# TODO(dan): Factor this out
# Configure local server to run against, if we're not --remote
# TODO(max): I couldn't get this to run against the correct local
# instance of the datastore, so we'll connect this way. It remains
# a TODO to just run this script directly, without this block.
remote = options.get('remote') # None==local, True==remote (production)
if not remote:
remote_api_url = settings.DATABASE_OPTIONS['remote_url']
host = options.get('host')
remote_api_stub.ConfigureRemoteDatastore(
"childdb", remote_api_url, auth_func, host)
console(app_id)
| {
"repo_name": "avastjohn/maventy_new",
"path": "healthdb/management/commands/console.py",
"copies": "1",
"size": "2254",
"license": "bsd-3-clause",
"hash": 8070028965097099000,
"line_mean": 29.7464788732,
"line_max": 75,
"alpha_frac": 0.6486246673,
"autogenerated": false,
"ratio": 3.8795180722891565,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5028142739589156,
"avg_score": null,
"num_lines": null
} |
""" A script for setting each Visit org from its parent Patient.
This is a manage.py command. Run with --help for documentation.
Example usage:
To run on localhost:
> manage.py setvisitorg
To run on production:
> manage.py setvisitorg --remote
NOTE: This should be no longer needed once the first initialization is done.
"""
import getpass
import logging
import settings
from django.core.management.base import BaseCommand, CommandError
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.ext import db
from optparse import make_option
from healthdb import models
def auth_func():
"""Get username and password (for access to localhost)"""
return raw_input('Username:'), getpass.getpass('Password:')
# Number of rows to read/write at once
ROWS_PER_BATCH = 50
def run():
count = 0
# TODO(dan): Shouldn't be necessary to fetch in batches, but if I don't it hangs
visits = models.Visit.all().order('__key__').fetch(ROWS_PER_BATCH)
visits_to_put = []
while visits:
for visit in visits:
if not visit.organization:
visit.organization = visit.get_patient().organization
visits_to_put.append(visit)
db.put(visits_to_put)
visits_to_put = []
count += len(visits_to_put)
logging.info('Updated %d visits' % count)
visits = models.Visit.all().order('__key__').filter(
'__key__ >', visits[-1].key()).fetch(ROWS_PER_BATCH)
db.put(visits_to_put)
count += len(visits_to_put)
logging.info('Updated %d visits. Done' % count)
# TODO(dan): Factor out app-id, host, etc.
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--app-id', dest='app_id', help='The app id'),
make_option('--host', dest='host', default='localhost:8080',
help='Specifies the URL of the local application. Use -- remote '
'to modify the production site.'),
)
help = 'Sets visit orgs'
args = ''
def handle(self, *app_labels, **options):
# Turn off copious DEBUG logging
logging.getLogger().setLevel(logging.INFO)
# Note: this app is only supported for decisionapp
if len(app_labels) != 0:
raise CommandError("This command doesn't take a list of parameters"
"...it only runs against the 'childdb' app.")
app_id = options.get('app_id')
# app_id is optional for the local app
# if not app_id:
# raise CommandError('Must give --app-id')
# Configure local server to run against, if we're not --remote
# TODO(max): I couldn't get this to run against the correct local
# instance of the datastore, so we'll connect this way. It remains
# a TODO to just run this script directly, without this block.
remote = options.get('remote') # None==local, True==remote (production)
if not remote:
remote_api_url = settings.DATABASE_OPTIONS['remote_url']
host = options.get('host')
remote_api_stub.ConfigureRemoteDatastore(
app_id, remote_api_url, auth_func, host)
run()
| {
"repo_name": "avastjohn/maventy_new",
"path": "healthdb/management/commands/setvisitorg.py",
"copies": "1",
"size": "3015",
"license": "bsd-3-clause",
"hash": -395908788331739840,
"line_mean": 31.0744680851,
"line_max": 82,
"alpha_frac": 0.6736318408,
"autogenerated": false,
"ratio": 3.6064593301435406,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47800911709435406,
"avg_score": null,
"num_lines": null
} |
""" A script for testing DraftRegistrationApprovals. Automatically adds comments to and rejects
pending DraftRegistrationApprovals
"""
import sys
import logging
import datetime as dt
from django.utils import timezone
from website.app import init_app
from website.project.model import DraftRegistration, Sanction
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.WARN)
logging.disable(level=logging.INFO)
def add_comments(draft):
comment = [{
'user': {
'id': 'itsMe',
'name': 'Mario!'
},
'value': 'Ahoy! This is a comment!',
'lastModified': timezone.now().isoformat()
}]
for question_id, value in draft.registration_metadata.iteritems():
value['comments'] = comment
draft.save()
def main(dry_run=True):
if dry_run:
logger.warn('DRY RUN mode')
pending_approval_drafts = DraftRegistration.find()
need_approval_drafts = [draft for draft in pending_approval_drafts
if draft.requires_approval and draft.approval and draft.approval.state == Sanction.UNAPPROVED]
for draft in need_approval_drafts:
add_comments(draft)
sanction = draft.approval
try:
if not dry_run:
sanction.forcibly_reject()
#manually do the on_reject functionality to prevent send_mail problems
sanction.meta = {}
sanction.save()
draft.approval = None
draft.save()
logger.warn('Rejected {0}'.format(draft._id))
except Exception as e:
logger.error(e)
if __name__ == '__main__':
dry_run = 'dry' in sys.argv
init_app(routes=False)
main(dry_run=dry_run)
| {
"repo_name": "alexschiller/osf.io",
"path": "scripts/prereg/reject_draft_registrations.py",
"copies": "9",
"size": "1747",
"license": "apache-2.0",
"hash": -8491878775636070000,
"line_mean": 30.1964285714,
"line_max": 122,
"alpha_frac": 0.6239267315,
"autogenerated": false,
"ratio": 4.025345622119816,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9149272353619815,
"avg_score": null,
"num_lines": null
} |
""" A script for testing DraftRegistrationApprovals. Automatically adds comments to and rejects
pending DraftRegistrationApprovals
"""
import sys
import logging
import datetime as dt
from website.app import init_app
from website.models import DraftRegistration, Sanction, User
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.WARN)
logging.disable(level=logging.INFO)
def add_comments(draft):
comment = [{
'user': {
'id': 'itsMe',
'name': 'Mario!'
},
'value': 'Ahoy! This is a comment!',
'lastModified': dt.datetime.utcnow().isoformat()
}]
for question_id, value in draft.registration_metadata.iteritems():
value['comments'] = comment
draft.save()
def main(dry_run=True):
if dry_run:
logger.warn('DRY RUN mode')
pending_approval_drafts = DraftRegistration.find()
need_approval_drafts = [draft for draft in pending_approval_drafts
if draft.requires_approval and draft.approval and draft.approval.state == Sanction.UNAPPROVED]
for draft in need_approval_drafts:
add_comments(draft)
sanction = draft.approval
try:
if not dry_run:
sanction.forcibly_reject()
#manually do the on_reject functionality to prevent send_mail problems
sanction.meta = {}
sanction.save()
draft.approval = None
draft.save()
logger.warn('Rejected {0}'.format(draft._id))
except Exception as e:
logger.error(e)
if __name__ == '__main__':
dry_run = 'dry' in sys.argv
init_app(routes=False)
main(dry_run=dry_run)
| {
"repo_name": "brandonPurvis/osf.io",
"path": "scripts/prereg/reject_draft_registrations.py",
"copies": "9",
"size": "1717",
"license": "apache-2.0",
"hash": 4584485334581321700,
"line_mean": 30.7962962963,
"line_max": 122,
"alpha_frac": 0.619685498,
"autogenerated": false,
"ratio": 4.011682242990654,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00222664735959833,
"num_lines": 54
} |
""" A script for testing DraftRegistrationApprovals. Automatically approves all pending
DraftRegistrationApprovals.
"""
import sys
import logging
from framework.celery_tasks.handlers import celery_teardown_request
from website.app import init_app
from website.project.model import DraftRegistration, Sanction
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.WARN)
logging.disable(level=logging.INFO)
def main(dry_run=True):
if dry_run:
logger.warn('DRY RUN mode')
pending_approval_drafts = DraftRegistration.find()
need_approval_drafts = [draft for draft in pending_approval_drafts
if draft.approval and draft.requires_approval and draft.approval.state == Sanction.UNAPPROVED]
for draft in need_approval_drafts:
sanction = draft.approval
try:
if not dry_run:
sanction.state = Sanction.APPROVED
sanction._on_complete(None)
sanction.save()
logger.warn('Approved {0}'.format(draft._id))
except Exception as e:
logger.error(e)
if __name__ == '__main__':
dry_run = 'dry' in sys.argv
app = init_app(routes=False)
main(dry_run=dry_run)
celery_teardown_request()
| {
"repo_name": "mluo613/osf.io",
"path": "scripts/prereg/approve_draft_registrations.py",
"copies": "28",
"size": "1260",
"license": "apache-2.0",
"hash": 1072098318091961100,
"line_mean": 31.3076923077,
"line_max": 122,
"alpha_frac": 0.6706349206,
"autogenerated": false,
"ratio": 3.9009287925696596,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
""" A script for testing DraftRegistrationApprovals. Automatically approves all pending
DraftRegistrationApprovals.
"""
import sys
import logging
from framework.tasks.handlers import celery_teardown_request
from website.app import init_app
from website.project.model import DraftRegistration, Sanction
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.WARN)
logging.disable(level=logging.INFO)
def main(dry_run=True):
if dry_run:
logger.warn('DRY RUN mode')
pending_approval_drafts = DraftRegistration.find()
need_approval_drafts = [draft for draft in pending_approval_drafts
if draft.approval and draft.requires_approval and draft.approval.state == Sanction.UNAPPROVED]
for draft in need_approval_drafts:
sanction = draft.approval
try:
if not dry_run:
sanction.state = Sanction.APPROVED
sanction._on_complete(None)
sanction.save()
logger.warn('Approved {0}'.format(draft._id))
except Exception as e:
logger.error(e)
if __name__ == '__main__':
dry_run = 'dry' in sys.argv
app = init_app(routes=False)
main(dry_run=dry_run)
celery_teardown_request()
| {
"repo_name": "KAsante95/osf.io",
"path": "scripts/prereg/approve_draft_registrations.py",
"copies": "4",
"size": "1253",
"license": "apache-2.0",
"hash": 4036909659410998300,
"line_mean": 31.1282051282,
"line_max": 122,
"alpha_frac": 0.6695929769,
"autogenerated": false,
"ratio": 3.9034267912772584,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6573019768177258,
"avg_score": null,
"num_lines": null
} |
""" A script for testing DraftRegistrationApprovals. Automatically approves all pending
DraftRegistrationApprovals.
"""
import sys
import logging
from website.app import init_app
from website.models import DraftRegistration, Sanction, User
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.WARN)
logging.disable(level=logging.INFO)
def main(dry_run=True):
if dry_run:
logger.warn('DRY RUN mode')
pending_approval_drafts = DraftRegistration.find()
need_approval_drafts = [draft for draft in pending_approval_drafts
if draft.requires_approval and draft.approval and draft.approval.state == Sanction.UNAPPROVED]
for draft in need_approval_drafts:
sanction = draft.approval
try:
if not dry_run:
sanction.state = Sanction.APPROVED
sanction._on_complete(None)
sanction.save()
logger.warn('Approved {0}'.format(draft._id))
except Exception as e:
logger.error(e)
if __name__ == '__main__':
dry_run = 'dry' in sys.argv
init_app(routes=False)
main(dry_run=dry_run)
| {
"repo_name": "ticklemepierce/osf.io",
"path": "scripts/prereg/approve_draft_registrations.py",
"copies": "3",
"size": "1154",
"license": "apache-2.0",
"hash": -8036463927482016000,
"line_mean": 31.0555555556,
"line_max": 122,
"alpha_frac": 0.6585788562,
"autogenerated": false,
"ratio": 3.87248322147651,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.603106207767651,
"avg_score": null,
"num_lines": null
} |
""" A script for updating the patient organization of a user.
This is a manage.py command. Run with --help for documentation.
Example usage:
To run on localhost:
> manage.py print_all_visits --org maventy
To run on production:
> manage.py print_all_visits --org maventy --remote
"""
import logging
from optparse import make_option
import sys
from google.appengine.ext import db
from healthdb import models
from healthdb.management.commands.commandutil import ManageCommand
def print_all_visits(org):
patient_cache, visits = models.Visit.get_all_visits(org)
print "%s" % models.Visit.export_csv_header()
num = 0
for visit in visits:
num += 1
if (num % 100) == 0: logging.info("printed %d visits" % num)
print(visit.export_csv_line(patient_cache.get_patient(
visit.parent_key())))
logging.info("printed %d visits" % num)
class Command(ManageCommand):
option_list = ManageCommand.option_list + (
make_option('--organization', dest='organization',
help='Organization'),
)
help = 'print all visits'
def handle(self, *app_labels, **options):
self.connect(*app_labels, **options)
print_all_visits(options.get('organization'))
| {
"repo_name": "avastjohn/maventy_new",
"path": "healthdb/management/commands/print_all_visits.py",
"copies": "1",
"size": "1191",
"license": "bsd-3-clause",
"hash": -1208119756659356400,
"line_mean": 24.8913043478,
"line_max": 66,
"alpha_frac": 0.7061293031,
"autogenerated": false,
"ratio": 3.4322766570605188,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4638405960160519,
"avg_score": null,
"num_lines": null
} |
""" A script for updating the patient organization of a user.
This is a manage.py command. Run with --help for documentation.
Example usage:
To run on localhost:
> manage.py setpassword --user dfrankow --password foo
To run on production:
> manage.py setpassword --user dfrankow --password foo --remote
"""
import logging
import sys
from optparse import make_option
from google.appengine.ext import db
from django.contrib.auth.models import User
from healthdb.management.commands.commandutil import ManageCommand
def set_password(username, password):
# userobj = User.objects.get(username__exact=username)
# HACK(dan): This gets the user object, but it's probably not the right way.
userobj = User.get_by_key_name('key_' + username)
if userobj:
userobj.set_password(password)
userobj.save()
logging.info("Set password for %s" % username)
else:
logging.warning("No such user %s" % username)
class Command(ManageCommand):
option_list = ManageCommand.option_list + (
make_option('--username', dest='username', help='User name'),
make_option('--password', dest='password', help='Password'),
)
# args = ''
help = 'set user password'
def handle(self, *app_labels, **options):
self.connect(*app_labels, **options)
username = options.get('username')
if not username:
logging.error("Must give --username")
sys.exit(1)
password = options.get('password')
if not password:
logging.error("Must give --password")
sys.exit(1)
set_password(username, password)
| {
"repo_name": "avastjohn/maventy_new",
"path": "healthdb/management/commands/setpassword.py",
"copies": "1",
"size": "1550",
"license": "bsd-3-clause",
"hash": 5674828841575621000,
"line_mean": 26.1929824561,
"line_max": 78,
"alpha_frac": 0.7,
"autogenerated": false,
"ratio": 3.6729857819905214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9830732314471838,
"avg_score": 0.008450693503736603,
"num_lines": 57
} |
""" A script for updating the patient organization of a user.
This is a manage.py command. Run with --help for documentation.
Example usage:
To run on localhost:
> manage.py setuserorg --user dfrankow --org maventy
To run on production:
> manage.py setuserorg --user dfrankow --org maventy --remote
"""
import logging
from optparse import make_option
from google.appengine.ext import db
from django.contrib.auth.models import User
from healthdb.management.commands.commandutil import ManageCommand
def set_user_org(username, org):
# userobj = User.objects.get(username__exact=username)
# HACK(dan): This gets the user object, but it's probably not the right way.
userobj = User.get_by_key_name('key_' + username)
if userobj:
userobj.organization = org
userobj.save()
logging.info("Set %s organization to %s" % (username, org))
else:
logging.warning("No such user %s" % username)
def set_all_user_orgs_to_maventy():
'''Iterate over all the users in the DB and set org to "maventy".
This was useful once, and may be useful if we modify User.all() to some
other query'''
org = 'maventy'
for userobj in User.all():
userobj.organization = org
userobj.save()
logging.info("Set %s organization to %s" % (userobj.username, org))
class Command(ManageCommand):
option_list = ManageCommand.option_list + (
make_option('--username', dest='username', help='User name'),
make_option('--organization', dest='organization',
help='Organization'),
)
# args = ''
help = 'set user organization'
def handle(self, *app_labels, **options):
self.connect(*app_labels, **options)
set_user_org(options.get('username'), options.get('organization'))
# set_all_user_orgs_to_maventy()
| {
"repo_name": "avastjohn/maventy_new",
"path": "healthdb/management/commands/setuserorg.py",
"copies": "1",
"size": "1755",
"license": "bsd-3-clause",
"hash": 2394271305266928600,
"line_mean": 27.7704918033,
"line_max": 78,
"alpha_frac": 0.6997150997,
"autogenerated": false,
"ratio": 3.4683794466403164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4668094546340316,
"avg_score": null,
"num_lines": null
} |
""" A script for updating the search index.
This is a manage.py command. Run with --help for documentation.
Example usage:
To run on localhost:
> manage.py updatesearchindex
To run on production:
> manage.py updatesearchindex --remote
"""
import getpass
import logging
import settings
from django.core.management.base import BaseCommand, CommandError
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.ext import db
from optparse import make_option
from healthdb import models
def auth_func():
"""Get username and password (for access to localhost)"""
return raw_input('Username:'), getpass.getpass('Password:')
# Number of rows to read/write at once
ROWS_PER_BATCH = 50
def update_search_index():
'''Set count of class instances.
NOTE: This is not accurate if data is added while this count is
happening, but it's not a problem for now.
'''
count = 0
pats = models.Patient.all().order('__key__').fetch(ROWS_PER_BATCH)
while pats:
count += len(pats)
# This is terribly expensive.
# TODO(dan): Might be able to call search.core.post, but it's complicated
db.put(pats)
logging.info('Updated %d' % count)
pats = models.Patient.all().order('__key__').filter(
'__key__ >', pats[-1].key()).fetch(ROWS_PER_BATCH)
logging.info('Updated %d' % (count))
# TODO(dan): Factor out app-id, host, etc.
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--app-id', dest='app_id', help='The app id'),
make_option('--host', dest='host', default='localhost:8080',
help='Specifies the URL of the local application. Use -- remote '
'to modify the production site.'),
)
help = 'Sets counts'
args = ''
def handle(self, *app_labels, **options):
# Turn off copious DEBUG logging
logging.getLogger().setLevel(logging.INFO)
# Note: this app is only supported for decisionapp
if len(app_labels) != 0:
raise CommandError("This command doesn't take a list of parameters"
"...it only runs against the 'childdb' app.")
app_id = options.get('app_id')
if not app_id:
raise CommandError('Must give --app-id')
# Configure local server to run against, if we're not --remote
# TODO(max): I couldn't get this to run against the correct local
# instance of the datastore, so we'll connect this way. It remains
# a TODO to just run this script directly, without this block.
remote = options.get('remote') # None==local, True==remote (production)
if not remote:
remote_api_url = settings.DATABASE_OPTIONS['remote_url']
host = options.get('host')
remote_api_stub.ConfigureRemoteDatastore(
app_id, remote_api_url, auth_func, host)
update_search_index()
| {
"repo_name": "avastjohn/maventy_new",
"path": "healthdb/management/commands/updatesearchindex.py",
"copies": "1",
"size": "2889",
"license": "bsd-3-clause",
"hash": 6245131244341277000,
"line_mean": 30.4606741573,
"line_max": 77,
"alpha_frac": 0.6542056075,
"autogenerated": false,
"ratio": 3.7863695937090434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9898553243775297,
"avg_score": 0.008404391486749575,
"num_lines": 89
} |
"""A script for watching all traffic on the IOPub channel (stdout/stderr/pyerr) of engines.
This connects to the default cluster, or you can pass the path to your ipcontroller-client.json
Try running this script, and then running a few jobs that print (and call sys.stdout.flush),
and you will see the print statements as they arrive, notably not waiting for the results
to finish.
You can use the zeromq SUBSCRIBE mechanism to only receive information from specific engines,
and easily filter by message type.
Authors
-------
* MinRK
"""
import sys
import json
import zmq
from IPython.kernel.zmq.session import Session
from IPython.utils.py3compat import str_to_bytes
from IPython.utils.path import get_security_file
def main(connection_file):
"""watch iopub channel, and print messages"""
ctx = zmq.Context.instance()
with open(connection_file) as f:
cfg = json.loads(f.read())
reg_url = cfg['interface']
iopub_port = cfg['iopub']
iopub_url = "%s:%s"%(reg_url, iopub_port)
session = Session(key=str_to_bytes(cfg['key']))
sub = ctx.socket(zmq.SUB)
# This will subscribe to all messages:
sub.setsockopt(zmq.SUBSCRIBE, b'')
# replace with b'' with b'engine.1.stdout' to subscribe only to engine 1's stdout
# 0MQ subscriptions are simple 'foo*' matches, so 'engine.1.' subscribes
# to everything from engine 1, but there is no way to subscribe to
# just stdout from everyone.
# multiple calls to subscribe will add subscriptions, e.g. to subscribe to
# engine 1's stderr and engine 2's stdout:
# sub.setsockopt(zmq.SUBSCRIBE, b'engine.1.stderr')
# sub.setsockopt(zmq.SUBSCRIBE, b'engine.2.stdout')
sub.connect(iopub_url)
while True:
try:
idents,msg = session.recv(sub, mode=0)
except KeyboardInterrupt:
return
# ident always length 1 here
topic = idents[0]
if msg['msg_type'] == 'stream':
# stdout/stderr
# stream names are in msg['content']['name'], if you want to handle
# them differently
print("%s: %s" % (topic, msg['content']['data']))
elif msg['msg_type'] == 'pyerr':
# Python traceback
c = msg['content']
print(topic + ':')
for line in c['traceback']:
# indent lines
print(' ' + line)
if __name__ == '__main__':
if len(sys.argv) > 1:
cf = sys.argv[1]
else:
# This gets the security file for the default profile:
cf = get_security_file('ipcontroller-client.json')
main(cf)
| {
"repo_name": "pioneers/topgear",
"path": "ipython-in-depth/examples/Parallel Computing/iopubwatcher.py",
"copies": "4",
"size": "2618",
"license": "apache-2.0",
"hash": -5540270860744334000,
"line_mean": 33,
"line_max": 95,
"alpha_frac": 0.6375095493,
"autogenerated": false,
"ratio": 3.7134751773049643,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6350984726604966,
"avg_score": null,
"num_lines": null
} |
"""A script generating the visibility graph for problem 1"""
import json
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
import numpy as np
import os
from pprint import pprint
class Problem:
""" Class containing a path-finding problem instance"""
def __init__(self, path):
base = os.path.dirname(__file__)
with open(os.path.join(base,path)) as problem_json:
problem = json.load(problem_json)
self.start_pos = np.array(problem['start_pos'])
self.start_vel = np.array(problem['start_vel'])
self.goal_pos = np.array(problem['goal_pos'])
self.goal_vel = np.array(problem['goal_vel'])
self.v_max = np.array(problem['v_max'])
i = 0
self.obstacles = {}
while True:
key = "polygon" + str(i)
if key in problem:
self.obstacles[i] = np.array(problem[key])
else:
break
i += 1
self.boundary = np.array(problem['boundary_polygon'])
def get_vertecies(self):
V = 2
for polygon in self.obstacles.values():
V += len(polygon)
V += len(self.boundary)
return V
def get_edges(self):
return self.V - 2
def get_edges(self):
E = self.get_edges()
Edges = np.zeros(E,2,2)
e = 0
for polygon in self.obstacles.values():
for i in range(len(polygon) - 1):
Edges[e] = [polygon[i], polygon[i+1]]
e += 1
Edges[e] = [polygon[len(polygon)-1], polygon[0]]
e += 1
for i in range(len(self.boundary) - 1):
Edges[e] = [self.boundary[i], self.boundary[i+1]]
e += 1
Edges[e] = [self.boundary[len(self.boundary)-1], self.boundary[0]]
e += 1
assert(e == E)
return Edges
def get_enclosing_square(self):
"""Returns a touple of two points, one point with the maximum values of x and y
and one point with the minimum values of x and y"""
max_x = self.boundary[0][0]
min_x = self.boundary[0][0]
max_y = self.boundary[0][1]
min_y = self.boundary[0][1]
for x_cord, y_cord in self.boundary:
if x_cord > max_x:
max_x = x_cord
elif x_cord < min_x:
min_x = x_cord
if y_cord > max_y:
max_y = y_cord
elif y_cord < min_y:
min_y = y_cord
return [max_x, max_y], [min_x, min_y]
def show(self,VG):
"""plots a visual representation of the problem instance"""
fig = plt.figure()
ax = fig.add_subplot(111)
[x_max, y_max], [x_min, y_min] = self.get_enclosing_square()
ax.set_xlim(x_min - 1, x_max + 1)
ax.set_ylim(y_min - 1, y_max + 1)
plt.hold(True)
def plot_ploygon(polygon):
"""Plotts a polygon on ax"""
codes = [Path.MOVETO] + [Path.LINETO] * (len(polygon))
verts = [(x, y) for [x, y] in polygon]
verts.append(verts[0])
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor='none',lw = 5)
ax.add_patch(patch)
plot_ploygon(self.boundary)
for polygon in self.obstacles.values():
plot_ploygon(polygon)
for v1 in VG.keys():
x1,y1 = v1
for v2 in VG[v1]:
x2,y2 = v2
codes = [Path.MOVETO,Path.LINETO]
verts = [(x1,y1),(x2,y2)]
path = Path(verts,codes)
patch = patches.PathPatch(path,edgecolor = "red")
ax.add_patch(patch)
#verts = [(x,y) for [x, y] in VG[vertex]]
#codes = [Path.MOVETO] + [Path.LINETO] * (len(verts) - 1)
#path = Path(verts,codes)
#patch = patches.PathPatch(path, facecolor = 'none',edgecolor = "blue")
#ax.add_patch(patch)
plt.show()
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
RIGHT = "RIGHT"
LEFT = "LEFT"
#stolen
def inside_convex_polygon(point, vertices):
previous_side = None
vertices = list(vertices)
n_vertices = len(vertices)
for n in range(n_vertices):
a, b = vertices[n], vertices[(n+1)%n_vertices]
affine_segment = v_sub(b, a)
affine_point = v_sub(point, a)
current_side = get_side(affine_segment, affine_point)
if current_side is None:
return False #outside or over an edge
elif previous_side is None: #first segment
previous_side = current_side
elif previous_side != current_side:
return False
return True
def get_side(a, b):
x = x_product(a, b)
if x < 0:
return LEFT
elif x > 0:
return RIGHT
else:
return None
def v_sub(a, b):
return (a[0]-b[0], a[1]-b[1])
def x_product(a, b):
return a[0]*b[1]-a[1]*b[0]
def point_inside(point,polygons):
"""Returns true if point is inside any polygon in polygons"""
for polygon in polygons.values():
if inside_convex_polygon(point,polygon):
return True
return False
def create_visibility_graph(Polygons,Points,Border):
edges = {}
for polygon in Polygons.values():
for vertex in polygon:
V = visible_vertecies(vertex,Polygons,Points,Border)
vertex_tuple = (vertex[0],vertex[1])
if vertex_tuple in edges:
edges[vertex_tuple].extend(V)
else:
edges[vertex_tuple] = V
for vertex in Points:
V = visible_vertecies(vertex,Polygons,Points,Border)
vertex_tuple = (vertex[0],vertex[1])
if vertex_tuple in edges:
edges[vertex_tuple].extend(V)
else:
edges[vertex_tuple] = V
return edges
def visible_vertecies(p,Polygons,Points,Border):
V = []
#Add visible vertecies from polygons
for polygon in Polygons.values():
for vertex in polygon:
if visible(p,vertex,Polygons,Border):
V.append(vertex)
#Add visible vertecies from Points
for vertex in Points:
if visible(p,vertex,Polygons,Border):
V.append(vertex)
return V
def visible(p,vertex,Polygons,Border):
A1 = p
A2 = vertex
#Check for intersections with pollygons
is_touching_edge = False
for polygon in Polygons.values():
for j in range(-1,polygon.shape[0] - 1):
B0 = polygon[j - 1]
B1 = polygon[j]
B2 = polygon[j + 1]
if line_segments_intersect(A1,A2,B1,B2):
return False
if np.all(A1 == B1) and np.all(A2 != B0) and np.all(A2 != B2):
is_touching_edge = True
#Check if middel of line between p and vertex is inside a polygon
#fig = plt.figure()
middel = np.divide(p + vertex,2)
#plt.plot([p[0],middel[0],vertex[0]],[p[1],middel[1],vertex[1]],'*r')
if point_inside(middel,Polygons):
return False
#Check for intersections with Border
for j in range(Border.shape[0] - 1):
B1 = Border[j]
B2 = Border[j + 1]
if line_segments_intersect(A1,A2,B1,B2):
return False
B1 = Border[Border.shape[0] - 1]
B2 = Border[0]
if line_segments_intersect(A1,A2,B1,B2):
return False
return True
def d(P,P1,P2):
return (P[0] - P1[0]) * (P2[1] - P1[1]) - (P[1] - P1[1]) * (P2[0] - P1[0])
def line_segments_intersect(A1,A2,B1,B2):
"""
input: two 2D points on line segment A and B respectively
output: true if the line segments intersect at an
inner point of the lines, else false
"""
# d=(x−x1)(y2−y1)−(y−y1)(x2−x1)
# d = 0 => x,y on line
# d<0 => x,y on one side
# d > 0 => x,y on the other side
# check if A1 and A2 are on the same side of B
side_A1 = np.sign(d(A1,B1,B2))
side_A2 = np.sign(d(A2,B1,B2))
if side_A1 == 0 or side_A2 == 0:
#Atleast one point intersects the other line.
#This counts as beeing on the same side
return False
if side_A1 == side_A2:
#Both of the points are on the same side.
return False
# check if B1 and B2 are on the same side of A
side_B1 = np.sign(d(B1,A1,A2))
side_B2 = np.sign(d(B2,A1,A2))
if side_B1 == 0 or side_B2 == 0:
return False
if side_B1 == side_B2:
return False
#non of the points are on the same side of the other line => lines intersect
return True
if __name__ == "__main__":
P = Problem("../Data/problem_B.json")
VG = create_visibility_graph(P.obstacles,[P.start_pos,P.goal_pos],P.boundary)
P.show(VG)
# TODO: Detect if a line is entierly on the inside of a polygon.
# Need only check the lines that have d=0 for poth points, thay are suspects. | {
"repo_name": "chm90/Multi_agent_A1",
"path": "src/visibility_graph.py",
"copies": "1",
"size": "9225",
"license": "mit",
"hash": 9193182454756168000,
"line_mean": 30.8892733564,
"line_max": 87,
"alpha_frac": 0.5496473142,
"autogenerated": false,
"ratio": 3.2735346358792183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9237858753636221,
"avg_score": 0.017064639288599512,
"num_lines": 289
} |
""" A script illustrating how to evolve a simple Capture-Game Player
which uses a MDRNN as network, with a simple ES algorithm."""
__author__ = 'Tom Schaul, tom@idsia.ch'
from pybrain.rl.tasks.capturegame import CaptureGameTask
from pybrain.structure.evolvables.cheaplycopiable import CheaplyCopiable
from pybrain.rl.learners import ES
from pybrain.utilities import storeCallResults
from pybrain.rl.agents.capturegameplayers.killing import KillingPlayer
# task settings: opponent, averaging to reduce noise, board size, etc.
size = 5
simplenet = False
task = CaptureGameTask(size, averageOverGames = 40, opponent = KillingPlayer)
# keep track of evaluations for plotting
res = storeCallResults(task)
if simplenet:
# simple network
from pybrain.tools.shortcuts import buildNetwork
from pybrain import SigmoidLayer
net = buildNetwork(task.outdim, task.indim, outclass = SigmoidLayer)
else:
# specialized mdrnn variation
from pybrain.structure.networks.custom.capturegame import CaptureGameNetwork
net = CaptureGameNetwork(size = size, hsize = 2, simpleborders = True)
net = CheaplyCopiable(net)
print net.name, 'has', net.paramdim, 'trainable parameters.'
learner = ES(task, net, mu = 5, lambada = 5, verbose = True, noisy = True)
newnet, f = learner.learn(50)
# now, let's take the result, and compare it's performance on a larger game-baord (to the original one)
newsize = 7
bignew = newnet.getBase().resizedTo(newsize)
bigold = net.getBase().resizedTo(newsize)
print 'The rescaled network,', bignew.name, ', has', bignew.paramdim, 'trainable parameters.'
newtask = CaptureGameTask(newsize, averageOverGames = 50, opponent = KillingPlayer)
print 'Old net on big board score:', newtask(bigold)
print 'New net on big board score:', newtask(bignew)
# plot the progression
from pylab import plot, show #@UnresolvedImport
plot(res)
show() | {
"repo_name": "daanwierstra/pybrain",
"path": "examples/capturegame/evolvingplayer.py",
"copies": "1",
"size": "1877",
"license": "bsd-3-clause",
"hash": 2825888687245123000,
"line_mean": 35.8235294118,
"line_max": 103,
"alpha_frac": 0.7650506127,
"autogenerated": false,
"ratio": 3.33392539964476,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9521854853921894,
"avg_score": 0.015424231684573282,
"num_lines": 51
} |
"""A script is a series of operations."""
import json
import os
from .ops import create
class Script(object):
"""A script is a series of operations."""
def __init__(self, s=None):
"""Parse a script from a JSON string."""
if s is not None:
self.parsed_script = json.loads(s)
self.operations = [create(params)
for params in self.parsed_script]
def __len__(self):
"""Return the number of operations."""
return len(self.operations)
def execute(self, data):
"""Execute all operations on the provided dataset.
Args:
data (:class:`pandas.DataFrame`): The data to transform. Not
guaranteed immutable.
Returns:
:class:`pandas.DataFrame`: The transformed data.
"""
for op in self.operations:
data = op(data)
return data
def load_script(f):
"""Load and parse the script given.
Args:
f (:class:`file` or :class:`str`): Open file object or filename.
Returns:
:class:`Script`: The parsed script object.
"""
if isinstance(f, (str, os.PathLike)):
f = open(f)
with f:
return parse(f.read())
parse = Script
| {
"repo_name": "jezcope/pyrefine",
"path": "pyrefine/script.py",
"copies": "1",
"size": "1267",
"license": "mit",
"hash": 7575640011268687000,
"line_mean": 21.2280701754,
"line_max": 72,
"alpha_frac": 0.5580110497,
"autogenerated": false,
"ratio": 4.309523809523809,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5367534859223809,
"avg_score": null,
"num_lines": null
} |
"""A script testing the extraction pipeline of RHEA
Steps
1) Initialise Format, Extractor and RadialVelocity
2) Define file paths for science, flat and dark frames
3) Extract/import spectra
4) Create/import reference spectra
5) Calculate radial velocities
6) Plot radial velocities
"""
import numpy as np
try:
import pyfits
except:
import astropy.io.fits as pyfits
import pymfe
import glob
from astropy.coordinates import SkyCoord
from astropy import units as u
#===============================================================================
# Parameters/Constants/Variables/Initialisation
#===============================================================================
# Constants/Variables
do_bcor = False
med_cut = 0.6
# Specified header parameters
xbin = 2
ybin = 1
exptime = 60
badpixel_mask= pyfits.getdata('/priv/mulga1/jbento/rhea2_data/badpix.fits')
badpix=np.where(badpixel_mask==1)
# Initialise objects
rhea2_format = pymfe.rhea.Format()
rhea2_extract = pymfe.Extractor(rhea2_format, transpose_data=False,
badpixmask=badpix)
xx, wave, blaze = rhea2_format.spectral_format()
rv = pymfe.rv.RadialVelocity()
#===============================================================================
# File paths (Observations, Flats and Darks, save/load directories)
#===============================================================================
# Science Frames
star = "thar"
base_path = "/priv/mulga1/jbento/rhea2_data/tauCeti/"
# Find all Tau Ceti ThAr files and sort by observation date in MJD
all_files = np.array(glob.glob(base_path + "2015*/*" + star + "_*.fit*"))
sorted = np.argsort([pyfits.getheader(e)['JD'] for e in all_files])
all_files = all_files[sorted]
files = []
# Only consider files that have the same exposure time and correct binning
for f in all_files:
fits = pyfits.open(f)
header = fits[0].header
x_head = header["XBINNING"]
y_head = header["YBINNING"]
exp_head = header["EXPTIME"]
if x_head == xbin and y_head == ybin and exp_head == exptime:
files.append(f)
fits.close()
# Flats and Darks
star_dark = pyfits.getdata(base_path + "Dark frames/MasterDarkThar.fits")
flat_dark = pyfits.getdata(base_path + "Dark frames/MasterDarkFlat.fits")
# Note: this particular flat was chosen as it has an exposure time of 2.5
# seconds, the same length as the flat dark that will be used to correct it
flat_path = base_path + "20151116/20151116_Masterflat.fit"
flat_files = [flat_path]*len(files)
# Extracted spectra output
out_path = "/priv/mulga1/arains/TauCeti_Extracted/"
extracted_files = np.array(glob.glob(out_path + "*" + star + "*extracted.fits"))
# Sort to account for files not being labelled with MJD
#sorted = np.argsort([pyfits.getheader(e)['JD'] for e in extracted_files])
#extracted_files = extracted_files[sorted]
# RV csv output
base_rv_path = out_path + star
#===============================================================================
# Extract and save spectra/load previously extracted spectra
#===============================================================================
# Extract spectra ("wave" removed)
# OPTION 1: Extract and save spectra
fluxes, vars, bcors, mjds = rv.extract_spectra(files, rhea2_extract,
star_dark=star_dark,
flat_files=flat_files,
flat_dark=flat_dark,
do_bcor=do_bcor)
# Save spectra (Make sure to save "wave" generated from rhea2_format)
rv.save_fluxes(files, fluxes, vars, bcors, wave, mjds, out_path)
# OPTION 2: Load previously extracted spectra
#fluxes, vars, wave, bcors, mjds = rv.load_fluxes(extracted_files)
#===============================================================================
# Create and save/import reference spectrum
#===============================================================================
# Number of frames to use for reference spectrum
n = 10
# OPTION 1: Create and save a new reference spectrum
wave_ref, ref_spect = rv.create_ref_spect(wave, fluxes[:n,:,:], vars[:n,:,:],
bcors[:n], med_cut=med_cut)
rv.save_ref_spect(files[:n], ref_spect, vars[:n,:,:], wave_ref, bcors[:n],
mjds[:n], out_path, star)
# OPTION 2: Import a pre-existing reference spectrum
#ref_spect, vars_ref, wave_ref, bcors_ref, mjds_ref = rv.load_ref_spect(ref_path)
#===============================================================================
# Calculate, save and plot radial velocities
#===============================================================================
# Calculate RVs
rvs, rv_sigs = rv.calculate_rv_shift(wave_ref, ref_spect, fluxes, vars, bcors,
wave)
nf = fluxes.shape[0]
nm = fluxes.shape[1]
bcor_rvs = rvs + bcors.repeat(nm).reshape( (nf, nm) )
# Save RVs
rv.save_rvs(rvs, rv_sigs, bcors, mjds, bcor_rvs, base_rv_path)
| {
"repo_name": "mikeireland/pymfe",
"path": "tauceti_thar_extraction.py",
"copies": "1",
"size": "5471",
"license": "mit",
"hash": 3205346437561254000,
"line_mean": 39.2279411765,
"line_max": 133,
"alpha_frac": 0.5161762018,
"autogenerated": false,
"ratio": 3.893950177935943,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9818487549578416,
"avg_score": 0.01832776603150538,
"num_lines": 136
} |
"""A script testing the extraction pipeline of RHEA
Steps
1) Initialise Format, Extractor and RadialVelocity
2) Define file paths for science, flat and dark frames
3) Extract/import spectra
4) Create/import reference spectra
5) Calculate radial velocities
6) Plot radial velocities
"""
import numpy as np
import astropy.io.fits as pyfits
import pymfe
import glob
from astropy.time import Time
import astropy.coordinates as coordinates
from astropy.coordinates import SkyCoord
from astropy import units as u
import PyAstronomy.pyasl as pyasl
#===============================================================================
# Parameters/Constants/Variables/Initialisation
#===============================================================================
# Constants/Variables
do_bcor = False
med_cut = 0.6
coord = SkyCoord('01 44 04.08338 -15 56 14.9262',unit=(u.hourangle, u.deg))
badpixel_mask= pyfits.getdata('/priv/mulga1/jbento/rhea2_data/badpix.fits')
badpix=np.where(badpixel_mask==1)
# Initialise objects
rhea2_format = pymfe.rhea.Format()
rhea2_extract = pymfe.Extractor(rhea2_format, transpose_data=False,
badpixmask=badpix)
xx, wave, blaze = rhea2_format.spectral_format()
rv = pymfe.rv.RadialVelocity()
#===============================================================================
# File paths (Observations, Flats and Darks, save/load directories)
#===============================================================================
# Science Frames
star = "thar-"
base_path = "/priv/mulga1/jbento/rhea2_data/20160221_sun/"
files = glob.glob(base_path + "*" + star + "*.fit*")
files.sort()
# Flats and Darks
star_dark = pyfits.getdata(base_path + "20151130_Masterdark_thar.fit")
flat_files = [base_path + "20151130_Masterflat_calibrated.fit"]*len(files)
# Set to len(0) arrays when extracting ThAr
#star_dark = np.empty(0)
flat_dark = np.empty(0)
#flat_files = np.empty(0)
# Extracted spectra output
out_path = "/priv/mulga1/arains/Solar_Extracted/"
#extracted_files = glob.glob(out_path + "*" + star + "*extracted.fits")
#extracted_files.sort()
# Saved reference spectrum
ref_path = out_path + "reference_spectrum_74gammaCrucis.fits"
# RV csv output
base_rv_path = out_path + star
#===============================================================================
# Extract and save spectra/load previously extracted spectra
#===============================================================================
# OPTION 1: Extract and save spectra
fluxes, vars, bcors, mjds = rv.extract_spectra(files, rhea2_extract,
star_dark=star_dark,
flat_files=flat_files,
flat_dark=flat_dark,
coord=coord, do_bcor=do_bcor)
# Save spectra (Make sure to save "wave" generated from rhea2_format)
rv.save_fluxes(files, fluxes, vars, bcors, wave, mjds, out_path)
# OPTION 2: Load previously extracted spectra
#fluxes, vars, wave, bcors, mjds = rv.load_fluxes(extracted_files)
#===============================================================================
# Create and save/import reference spectrum
#===============================================================================
# OPTION 1: Create and save a new reference spectrum
wave_ref, ref_spect = rv.create_ref_spect(wave, fluxes, vars, bcors,
med_cut=med_cut)
rv.save_ref_spect(files, ref_spect, vars, wave_ref, bcors, mjds,
out_path, star)
# OPTION 2: Import a pre-existing reference spectrum
#ref_spect, vars_ref, wave_ref, bcors_ref, mjds_ref = rv.load_ref_spect(ref_path)
#===============================================================================
# Calculate and save radial velocities
#===============================================================================
# Calculate RVs
rvs, rv_sigs = rv.calculate_rv_shift(wave_ref, ref_spect, fluxes, vars, bcors,
wave)
# Save RVs
rv.save_rvs(rvs, rv_sigs, bcors, mjds, base_rv_path)
#bcor_rvs = all_rvs + all_bcors.repeat(nm).reshape( (num_files,nm) )
| {
"repo_name": "mikeireland/pymfe",
"path": "thar_extraction_test.py",
"copies": "1",
"size": "4732",
"license": "mit",
"hash": 8159742888251268000,
"line_mean": 42.2242990654,
"line_max": 133,
"alpha_frac": 0.4860524091,
"autogenerated": false,
"ratio": 3.872340425531915,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4858392834631915,
"avg_score": null,
"num_lines": null
} |
"""A script that builds all the information from the OpenAL headers."""
import build_metadata.extract_from_headers
import os.path
import yaml
import collections
headers = set([
os.path.join('OpenAL-Soft', 'include', 'al', 'al.h'),
os.path.join('OpenAL-Soft', 'include', 'al', 'alc.h'),
os.path.join('OpenAL-Soft', 'include', 'al', 'efx.h')
])
my_directory = os.path.dirname(os.path.abspath(__file__))
for i in headers:
headers.remove(i)
headers.add(os.path.join(my_directory, i))
yml_files = build_metadata.extract_from_headers.extract_from_headers(headers, target_directory = os.path.join(my_directory, 'extracted_data'), macros = ["AL_ALEXT_PROTOTYPES"])
macros = yml_files['macros']
functions = yml_files['functions']
#For each macro, we now assign a class (if ppossible).
#pass 1: replace all macros with dicts with default values.
new_macros = dict()
for i, v in macros.iteritems():
new_macros[i] = {'value': v, 'object' : '', 'range' : '', 'setter' : ''}
macros = new_macros
#pass 2: assignment of associated objects, where possible.
effects = set(['reverb', 'eaxreverb'])
for name, value in macros.iteritems():
for classname in effects:
prefix = 'AL_' + classname.upper() + '_'
if name.startswith(prefix):
value['object'] = classname
#pass 3: assignment of ranges where possible.
for k, v in macros.iteritems():
min_macro = k.split('_')
min_macro = min_macro[:2] + ['MIN'] + min_macro[2:]
max_macro = k.split('_')
max_macro = max_macro[:2] + ['MAX'] + max_macro[2:]
min_macro = '_'.join(min_macro)
max_macro = '_'.join(max_macro)
# print min_macro, max_macro
if min_macro in macros and max_macro in macros:
v['range'] = [min_macro, max_macro]
#min and max macros have no associated object. Kill this info.
macros[min_macro]['object'] = None
macros[max_macro]['object'] = None
with file(os.path.join(my_directory, 'extracted_data', 'functions_raw.yml'), 'w') as outfile:
yaml.dump(data = functions, stream = outfile)
with file(os.path.join(my_directory, 'extracted_data', 'macros_raw.yml'), 'w') as outfile:
yaml.dump(data = macros, stream = outfile)
| {
"repo_name": "camlorn/camlorn_audio_rewrite",
"path": "build_data.py",
"copies": "1",
"size": "2094",
"license": "bsd-2-clause",
"hash": -932045381195006300,
"line_mean": 36.3928571429,
"line_max": 176,
"alpha_frac": 0.6843361987,
"autogenerated": false,
"ratio": 3.0172910662824206,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42016272649824205,
"avg_score": null,
"num_lines": null
} |
"A script that calculates allele frequencies from plink data"
import argparse
import pydigree as pyd
parser = argparse.ArgumentParser()
parser.add_argument('--ped', required=True, help='Plink formatted PED file')
parser.add_argument('--map', required=True, help='Plink formatted MAP file')
parser.add_argument('--snps', required=None, nargs='*', metavar='SNP',
default=None,
help='Only calculate frequencies for the specified SNPS')
args = parser.parse_args()
peds = pyd.io.plink.read_plink(pedfile=args.ped, mapfile=args.map)
if args.snps is not None:
onlysnps = set(args.snps)
def formatted(*cells):
"Print values in tab-delimited format"
return '\t'.join([str(x) for x in cells])
for chromidx, chromobj in enumerate(peds.chromosomes):
for locidx, markername in enumerate(chromobj.labels):
if args.snps is not None and markername not in onlysnps:
continue
locus = chromidx, locidx
freqs = list(peds.allele_frequencies(locus).items())
freqs = sorted(freqs, key=lambda x: x[1], reverse=True)
maj_allele = freqs[0][0]
for min_allele, maf in freqs[1:]:
maf_str = '{:5.4g}'.format(maf)
print(formatted(chromobj.label, chromobj.physical_map[locidx],
chromobj.labels[locidx], maj_allele, min_allele,
maf_str))
| {
"repo_name": "jameshicks/pydigree",
"path": "scripts/frequencies.py",
"copies": "1",
"size": "1422",
"license": "apache-2.0",
"hash": 7582885871019944000,
"line_mean": 34.55,
"line_max": 77,
"alpha_frac": 0.6378340366,
"autogenerated": false,
"ratio": 3.4100719424460433,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45479059790460435,
"avg_score": null,
"num_lines": null
} |
"""A script that changes a scan parameter (usually PlsrDAC, innermost loop) in a certain range for selected pixels and measures the length of the HitOR signal with ToT and TDC method.
The TDC method gives higher precision charge information than the TOT method. The TDC method is limited to single pixel cluster. During the calibration only one pixel is enabled at a time.
"""
import logging
import os.path
import numpy as np
import tables as tb
import progressbar
from pybar.fei4.register_utils import make_pixel_mask_from_col_row, make_box_pixel_mask_from_col_row
from pybar.fei4_run_base import Fei4RunBase
from pybar.run_manager import RunManager
from pybar.analysis.analysis_utils import get_scan_parameter, get_unique_scan_parameter_combinations, get_scan_parameters_table_from_meta_data, get_ranges_from_array, AnalysisError
from pybar.analysis.analyze_raw_data import AnalyzeRawData
from pybar.analysis.plotting.plotting import plot_scurves, plot_tot_tdc_calibration
def create_hitor_calibration(output_filename, plot_pixel_calibrations=False):
'''Generating HitOr calibration file (_calibration.h5) from raw data file and plotting of calibration data.
Parameters
----------
output_filename : string
Input raw data file name.
plot_pixel_calibrations : bool, iterable
If True, genearating additional pixel calibration plots. If list of column and row tuples (from 1 to 80 / 336), print selected pixels.
Returns
-------
nothing
'''
logging.info('Analyze HitOR calibration data and plot results of %s', output_filename)
with AnalyzeRawData(raw_data_file=output_filename, create_pdf=True) as analyze_raw_data: # Interpret the raw data file
analyze_raw_data.create_occupancy_hist = False # too many scan parameters to do in ram histogramming
analyze_raw_data.create_hit_table = True
analyze_raw_data.create_tdc_hist = True
analyze_raw_data.align_at_tdc = True # align events at TDC words, first word of event has to be a tdc word
analyze_raw_data.interpret_word_table()
analyze_raw_data.interpreter.print_summary()
analyze_raw_data.plot_histograms()
n_injections = analyze_raw_data.n_injections # use later
meta_data = analyze_raw_data.out_file_h5.root.meta_data[:]
scan_parameters_dict = get_scan_parameter(meta_data)
inner_loop_parameter_values = scan_parameters_dict[next(reversed(scan_parameters_dict))] # inner loop parameter name is unknown
scan_parameter_names = scan_parameters_dict.keys()
# col_row_combinations = get_unique_scan_parameter_combinations(analyze_raw_data.out_file_h5.root.meta_data[:], scan_parameters=('column', 'row'), scan_parameter_columns_only=True)
meta_data_table_at_scan_parameter = get_unique_scan_parameter_combinations(meta_data, scan_parameters=scan_parameter_names)
scan_parameter_values = get_scan_parameters_table_from_meta_data(meta_data_table_at_scan_parameter, scan_parameter_names)
event_number_ranges = get_ranges_from_array(meta_data_table_at_scan_parameter['event_number'])
event_ranges_per_parameter = np.column_stack((scan_parameter_values, event_number_ranges))
if analyze_raw_data.out_file_h5.root.Hits.nrows == 0:
raise AnalysisError("Found no hits.")
hits = analyze_raw_data.out_file_h5.root.Hits[:]
event_numbers = hits['event_number'].copy() # create contigous array, otherwise np.searchsorted too slow, http://stackoverflow.com/questions/15139299/performance-of-numpy-searchsorted-is-poor-on-structured-arrays
output_filename = os.path.splitext(output_filename)[0]
with tb.open_file(output_filename + "_calibration.h5", mode="w") as calibration_data_file:
logging.info('Create calibration')
calibration_data = np.full(shape=(80, 336, len(inner_loop_parameter_values), 4), fill_value=np.nan, dtype='f4') # result of the calibration is a histogram with col_index, row_index, plsrDAC value, mean discrete tot, rms discrete tot, mean tot from TDC, rms tot from TDC
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(event_ranges_per_parameter), term_width=80)
progress_bar.start()
for index, (actual_scan_parameter_values, event_start, event_stop) in enumerate(event_ranges_per_parameter):
if event_stop is None: # happens for the last chunk
event_stop = hits[-1]['event_number'] + 1
array_index = np.searchsorted(event_numbers, np.array([event_start, event_stop]))
actual_hits = hits[array_index[0]:array_index[1]]
for item_index, item in enumerate(scan_parameter_names):
if item == "column":
actual_col = actual_scan_parameter_values[item_index]
elif item == "row":
actual_row = actual_scan_parameter_values[item_index]
elif item == "PlsrDAC":
plser_dac = actual_scan_parameter_values[item_index]
else:
raise ValueError("Unknown scan parameter %s" % item)
# Only pixel of actual column/row should be in the actual data chunk but since FIFO is not cleared for each scan step due to speed reasons and there might be noisy pixels this is not always the case
n_wrong_pixel = np.count_nonzero(np.logical_or(actual_hits['column'] != actual_col, actual_hits['row'] != actual_row))
if n_wrong_pixel != 0:
logging.warning('%d hit(s) from other pixels for scan parameters %s', n_wrong_pixel, ', '.join(['%s=%s' % (name, value) for (name, value) in zip(scan_parameter_names, actual_scan_parameter_values)]))
actual_hits = actual_hits[np.logical_and(actual_hits['column'] == actual_col, actual_hits['row'] == actual_row)] # Only take data from selected pixel
actual_tdc_hits = actual_hits[(actual_hits['event_status'] & 0b0000111110011100) == 0b0000000100000000] # only take hits from good events (one TDC word only, no error)
actual_tot_hits = actual_hits[(actual_hits['event_status'] & 0b0000100010011100) == 0b0000000000000000] # only take hits from good events for tot
tot, tdc = actual_tot_hits['tot'], actual_tdc_hits['TDC']
if tdc.shape[0] < n_injections:
logging.info('%d of %d expected TDC hits for scan parameters %s', tdc.shape[0], n_injections, ', '.join(['%s=%s' % (name, value) for (name, value) in zip(scan_parameter_names, actual_scan_parameter_values)]))
if tot.shape[0] < n_injections:
logging.info('%d of %d expected hits for scan parameters %s', tot.shape[0], n_injections, ', '.join(['%s=%s' % (name, value) for (name, value) in zip(scan_parameter_names, actual_scan_parameter_values)]))
inner_loop_scan_parameter_index = np.where(plser_dac == inner_loop_parameter_values)[0][0] # translate the scan parameter value to an index for the result histogram
# numpy mean and std return nan if array is empty
calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 0] = np.mean(tot)
calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 1] = np.mean(tdc)
calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 2] = np.std(tot)
calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 3] = np.std(tdc)
progress_bar.update(index)
progress_bar.finish()
calibration_data_out = calibration_data_file.create_carray(calibration_data_file.root, name='HitOrCalibration', title='Hit OR calibration data', atom=tb.Atom.from_dtype(calibration_data.dtype), shape=calibration_data.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
calibration_data_out[:] = calibration_data
calibration_data_out.attrs.dimensions = scan_parameter_names
calibration_data_out.attrs.scan_parameter_values = inner_loop_parameter_values
calibration_data_out.flush()
# with PdfPages(output_filename + "_calibration.pdf") as output_pdf:
plot_scurves(calibration_data[:, :, :, 0], inner_loop_parameter_values, "ToT calibration", "ToT", 15, "Charge [PlsrDAC]", filename=analyze_raw_data.output_pdf)
plot_scurves(calibration_data[:, :, :, 1], inner_loop_parameter_values, "TDC calibration", "TDC [ns]", None, "Charge [PlsrDAC]", filename=analyze_raw_data.output_pdf)
tot_mean_all_pix = np.nanmean(calibration_data[:, :, :, 0], axis=(0, 1))
tot_error_all_pix = np.nanstd(calibration_data[:, :, :, 0], axis=(0, 1))
tdc_mean_all_pix = np.nanmean(calibration_data[:, :, :, 1], axis=(0, 1))
tdc_error_all_pix = np.nanstd(calibration_data[:, :, :, 1], axis=(0, 1))
plot_tot_tdc_calibration(scan_parameters=inner_loop_parameter_values, tot_mean=tot_mean_all_pix, tot_error=tot_error_all_pix, tdc_mean=tdc_mean_all_pix, tdc_error=tdc_error_all_pix, filename=analyze_raw_data.output_pdf, title="Mean charge calibration of %d pixel(s)" % np.count_nonzero(~np.all(np.isnan(calibration_data[:, :, :, 0]), axis=2)))
# plotting individual pixels
if plot_pixel_calibrations is True:
# selecting pixels with non-nan entries
col_row_non_nan = np.nonzero(~np.all(np.isnan(calibration_data[:, :, :, 0]), axis=2))
plot_pixel_calibrations = np.dstack(col_row_non_nan)[0]
elif plot_pixel_calibrations is False:
plot_pixel_calibrations = np.array([], dtype=np.int)
else: # assuming list of column / row tuples
plot_pixel_calibrations = np.array(plot_pixel_calibrations) - 1
# generate index array
pixel_indices = np.arange(plot_pixel_calibrations.shape[0])
plot_n_pixels = 10 # number of pixels at the beginning, center and end of the array
np.random.seed(0)
# select random pixels
if pixel_indices.size - 2 * plot_n_pixels >= 0:
random_pixel_indices = np.sort(np.random.choice(pixel_indices[plot_n_pixels:-plot_n_pixels], min(plot_n_pixels, pixel_indices.size - 2 * plot_n_pixels), replace=False))
else:
random_pixel_indices = np.array([], dtype=np.int)
selected_pixel_indices = np.unique(np.hstack([pixel_indices[:plot_n_pixels], random_pixel_indices, pixel_indices[-plot_n_pixels:]]))
# plotting individual pixels
for (column, row) in plot_pixel_calibrations[selected_pixel_indices]:
logging.info("Plotting charge calibration for pixel column " + str(column + 1) + " / row " + str(row + 1))
tot_mean_single_pix = calibration_data[column, row, :, 0]
tot_std_single_pix = calibration_data[column, row, :, 2]
tdc_mean_single_pix = calibration_data[column, row, :, 1]
tdc_std_single_pix = calibration_data[column, row, :, 3]
plot_tot_tdc_calibration(scan_parameters=inner_loop_parameter_values, tot_mean=tot_mean_single_pix, tot_error=tot_std_single_pix, tdc_mean=tdc_mean_single_pix, tdc_error=tdc_std_single_pix, filename=analyze_raw_data.output_pdf, title="Charge calibration for pixel column " + str(column + 1) + " / row " + str(row + 1))
class HitOrCalibration(Fei4RunBase):
''' HitOR calibration scan
'''
_default_run_conf = {
"broadcast_commands": True,
"threaded_scan": True,
"n_injections": 200, # number of injections
"injection_delay": 5000, # for really low feedbacks (ToT >> 300 ns) one needs to increase the injection delay
"scan_parameters": [('column', None),
('row', None),
('PlsrDAC', [40, 50, 60, 80, 130, 180, 230, 280, 340, 440, 540, 640, 740])], # 0 400 sufficient for most tunings
"reset_rx_on_error": True, # reset RX on errors that may occur during scan
"pixels": (np.dstack(np.where(make_box_pixel_mask_from_col_row([40, 41], [150, 151]) == 1)) + 1).tolist()[0], # list of (col, row) tupels. From 1 to 80/336.
"enable_shift_masks": ["Enable", "C_Low", "C_High"], # enable masks shifted during scan
"disable_shift_masks": ["Imon"] # disable masks shifted during scan
}
def configure(self):
commands = []
commands.extend(self.register.get_commands("ConfMode"))
self.register.set_global_register_value("Trig_Count", 6) # decrease trigger count to reduce data
self.register.set_global_register_value("Trig_Lat", 215) # adjust delay for smaller bcid window
self.register.set_global_register_value("ErrorMask", 1536) # deactivate hit bus service record
commands.extend(self.register.get_commands("WrRegister", name=["Trig_Lat", "Trig_Count", "ErrorMask"]))
self.register_utils.send_commands(commands)
def scan(self):
def write_double_column(column):
return (column - 1) / 2
def inject_double_column(column):
if column == 80:
return 39
else:
return (column) / 2
cal_lvl1_command = self.register.get_commands("CAL")[0] + self.register.get_commands("zeros", length=40)[0] + self.register.get_commands("LV1")[0] + self.register.get_commands("zeros", length=self.injection_delay)[0]
scan_parameter_name = self.scan_parameters._fields[-1] # scan parameter is in inner loop
scan_parameter_values = self.scan_parameters[-1][:] # create deep copy of scan_parameters, they are overwritten in self.readout
pixels_sorted = sorted(self.pixels) # , key=lambda tup: tup[0])
logging.info("Scanning %d pixels" % len(self.pixels))
# use sorted pixels to prevent overwriting of raw data file when writing a file per column
for pixel_index, pixel in enumerate(pixels_sorted):
if self.stop_run.is_set():
break
column = pixel[0]
row = pixel[1]
logging.info('Scanning pixel: %d / %d (column / row)', column, row)
if pixel_index:
dcs = [write_double_column(column)]
dcs.append(write_double_column(self.pixels[pixel_index - 1][0]))
else:
dcs = []
commands = []
commands.extend(self.register.get_commands("ConfMode"))
single_pixel_enable_mask = make_pixel_mask_from_col_row([column], [row])
map(lambda mask_name: self.register.set_pixel_register_value(mask_name, single_pixel_enable_mask), self.enable_shift_masks)
commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, dcs=dcs, name=self.enable_shift_masks, joint_write=True))
single_pixel_disable_mask = make_pixel_mask_from_col_row([column], [row], default=1, value=0)
map(lambda mask_name: self.register.set_pixel_register_value(mask_name, single_pixel_disable_mask), self.disable_shift_masks)
commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, dcs=dcs, name=self.disable_shift_masks, joint_write=True))
self.register.set_global_register_value("Colpr_Addr", inject_double_column(column))
commands.append(self.register.get_commands("WrRegister", name=["Colpr_Addr"])[0])
self.register_utils.send_commands(commands)
self.dut['TDC']['ENABLE'] = True
self.dut['TDC']['EN_NO_WRITE_TRIG_ERR'] = False # Do not trigger TDC words
for scan_parameter_value in scan_parameter_values:
if self.stop_run.is_set():
break
commands = []
commands.extend(self.register.get_commands("ConfMode"))
self.register.set_global_register_value(scan_parameter_name, scan_parameter_value)
commands.extend(self.register.get_commands("WrRegister", name=[scan_parameter_name]))
commands.extend(self.register.get_commands("RunMode"))
self.register_utils.send_commands(commands)
self.dut['TDC']['EN_ARMING'] = True
with self.readout(reset_fifo=True, column=column, row=row, **{scan_parameter_name: scan_parameter_value}):
self.register_utils.send_command(command=cal_lvl1_command, repeat=self.n_injections)
self.dut['TDC']['EN_ARMING'] = False
self.dut['TDC']['ENABLE'] = False
def handle_data(self, data, new_file=['column'], flush=True): # Create new file for each scan parameter change
super(HitOrCalibration, self).handle_data(data=data, new_file=new_file, flush=flush)
def analyze(self):
create_hitor_calibration(self.output_filename, plot_pixel_calibrations=True)
if __name__ == "__main__":
with RunManager('configuration.yaml') as runmngr:
runmngr.run_run(HitOrCalibration)
| {
"repo_name": "SiLab-Bonn/pyBAR",
"path": "pybar/scans/calibrate_hit_or.py",
"copies": "1",
"size": "17648",
"license": "bsd-3-clause",
"hash": 9025521105500629000,
"line_mean": 69.4493927126,
"line_max": 355,
"alpha_frac": 0.6367860381,
"autogenerated": false,
"ratio": 3.68203630294179,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9805152840557556,
"avg_score": 0.0027339000968467472,
"num_lines": 247
} |
"""A script that changes the PlsrDAC in a certain range and measures the voltage step from the transient injection signal.
Since the minimum and maximum of the signal is measured, this script gives a more precise PlsrDAC calibration than
the normal PlsrDAC calibration. Do not forget to add the oscilloscope device in dut_mio.yaml.
The oscilloscope can be any device supported by basil, but the string interpretation here is only implemented for Tektronix oscilloscopes!
"""
import logging
import time
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.patches as mpatches
import numpy as np
import tables as tb
import progressbar
from pybar.run_manager import RunManager
from pybar.scans.scan_analog import AnalogScan
from pybar.scans.calibrate_plsr_dac import plot_pulser_dac
# Add oscilloscope interpretation functions below
def interpret_data_from_tektronix(preamble, data):
''' Interprets raw data from Tektronix
returns: lists of x, y values in seconds/volt'''
# Y mode ("WFMPRE:PT_FMT"):
# Xn = XZEro + XINcr (n - PT_Off)
# Yn = YZEro + YMUlt (yn - YOFf)
voltage = np.array(data, dtype=np.float)
meta_data = preamble.split(',')[5].split(';')
time_unit = meta_data[3][1:-1]
XZEro = float(meta_data[5])
XINcr = float(meta_data[4])
PT_Off = float(meta_data[6])
voltage_unit = meta_data[7][1:-1]
YZEro = float(meta_data[10])
YMUlt = float(meta_data[8])
YOFf = float(meta_data[9])
time = XZEro + XINcr * (np.arange(0, voltage.size) - PT_Off)
voltage = YZEro + YMUlt * (voltage - YOFf)
return time, voltage, time_unit, voltage_unit
# Select actual interpretation function
interpret_oscilloscope_data = interpret_data_from_tektronix
class PlsrDacTransientCalibration(AnalogScan):
''' Transient PlsrDAC calibration scan
'''
_default_run_conf = AnalogScan._default_run_conf.copy()
_default_run_conf.update({
"broadcast_commands": False,
"threaded_scan": False,
"scan_parameters": [('PlsrDAC', range(0, 1024, 25))], # plsr dac settings, be aware: too low plsDAC settings are difficult to trigger
"enable_double_columns": [20], # double columns which will be enabled during scan, default: use column 20
"enable_mask_steps": [0], # Scan only one mask step to save time
"n_injections": 512, # number of injections, has to be > 260 to allow for averaging 256 injection signals
"channel": 1, # oscilloscope channel
"trigger_level_offset": 25, # offset of the PlsrDAC baseline in mV, usually the offset voltage at PlsrDAC=0
"data_points": 10000,
"max_data_index": None, # maximum data index to be read out; e.g. 2000 reads date from 1 to 2000, if None, use max record length
"horizontal_scale": 0.0000004, # 0.0000020 for longer range
"horizontal_delay_time": 0.0000016, # 0.0000080 for longer range
"vertical_scale": 0.2,
"vertical_offset": 0.0,
"vertical_position": -4,
"coupling": "DC",
"bandwidth": "20E6", # reject noise, set to lower bandwidth (20MHz)
"trigger_pulse_width": "500.0E-9", # 500ns or more, roughly the lenghth of the injection pulse at lowest potential
"trigger_level": 0.0, # trigger level in V of for the first measurement
"fit_ranges": [(-1000, -100), (25, 50)], # the fit range (in ns) relative to the trigger (t=0ns), first tuple: baseline, second tuple: peak
})
def write_global_register(self, parameter, value):
commands = []
commands.extend(self.register.get_commands("ConfMode"))
self.register.set_global_register_value(parameter, value)
commands.extend(self.register.get_commands("WrRegister", name=[parameter]))
commands.extend(self.register.get_commands("RunMode"))
self.register_utils.send_commands(commands)
def configure(self):
super(PlsrDacTransientCalibration, self).configure()
# data acquisition
self.dut['Oscilloscope'].data_init() # Resert to factory settings
self.dut['Oscilloscope'].set_data_width(2) # 2 byte per value
self.dut['Oscilloscope'].set_data_encoding("RIBINARY") # signed integer
self.dut['Oscilloscope'].set_horizontal_record_length(self.data_points)
self.dut['Oscilloscope'].set_data_start(1) # Set readout fraction of waveform
if self.max_data_index is None:
self.data_index = int(self.dut['Oscilloscope'].get_horizontal_record_length())
else:
self.data_index = self.max_data_index
self.dut['Oscilloscope'].set_data_stop(self.data_index) # Set readout fraction of waveform
# waveform parameters
self.dut['Oscilloscope'].set_average_waveforms(self.n_injections) # For Tektronix it has to be power of 2
# horizontal axis
self.dut['Oscilloscope'].set_horizontal_scale(self.horizontal_scale)
self.dut['Oscilloscope'].set_horizontal_delay_time(self.horizontal_delay_time)
# vertical axis
self.dut['Oscilloscope'].set_vertical_scale(self.vertical_scale, channel=self.channel)
self.dut['Oscilloscope'].set_vertical_offset(self.vertical_offset, channel=self.channel)
self.dut['Oscilloscope'].set_vertical_position(self.vertical_position, channel=self.channel)
# input
self.dut['Oscilloscope'].set_impedance("MEG", channel=self.channel)
self.dut['Oscilloscope'].set_coupling(self.coupling, channel=self.channel)
self.dut['Oscilloscope'].set_bandwidth(self.bandwidth, channel=self.channel)
# pulse width trigger
self.dut['Oscilloscope'].set_trigger_mode("NORMal")
self.dut['Oscilloscope'].set_trigger_type("PULSe")
self.dut['Oscilloscope'].set_trigger_pulse_class("WIDth")
self.dut['Oscilloscope'].set_trigger_pulse_width_source("CH%d" % self.channel)
self.dut['Oscilloscope'].set_trigger_pulse_width_polarity("POSitive")
self.dut['Oscilloscope'].set_trigger_pulse_width_when("MOREthan")
self.dut['Oscilloscope'].set_trigger_pulse_width_width(self.trigger_pulse_width)
self.dut['Oscilloscope'].set_trigger_level(self.trigger_level)
logging.info('Initialized oscilloscope %s' % self.dut['Oscilloscope'].get_name())
# Route Vcal to pin
commands = []
self.register.set_global_register_value('Colpr_Mode', 0) # one DC only
self.register.set_global_register_value('Colpr_Addr', self.enable_double_columns[0])
self.register.set_global_register_value('ExtDigCalSW', 0)
self.register.set_global_register_value('ExtAnaCalSW', 1) # Route Vcal to external pin
commands.extend(self.register.get_commands("WrRegister", name=['Colpr_Addr', 'Colpr_Mode', 'ExtDigCalSW', 'ExtAnaCalSW']))
commands.extend(self.register.get_commands("RunMode"))
self.register_utils.send_commands(commands)
def scan(self):
# Output data structures
scan_parameter_values = self.scan_parameters.PlsrDAC
shape = (len(scan_parameter_values), self.data_index)
atom = tb.FloatAtom()
data_out = self.raw_data_file.h5_file.create_carray(self.raw_data_file.h5_file.root, name='PlsrDACwaveforms', title='Waveforms from transient PlsrDAC calibration scan', atom=atom, shape=shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
shape = (self.data_index,)
atom = tb.FloatAtom()
time_out = self.raw_data_file.h5_file.create_carray(self.raw_data_file.h5_file.root, name='Times', title='Time values', atom=atom, shape=shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
data_out.attrs.scan_parameter_values = scan_parameter_values
data_out.attrs.enable_double_columns = self.enable_double_columns
data_out.attrs.fit_ranges = self.fit_ranges
data_out.attrs.trigger_level_offset = self.trigger_level_offset
trigger_levels = []
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(scan_parameter_values), term_width=80)
progress_bar.start()
for index, scan_parameter_value in enumerate(scan_parameter_values):
if self.stop_run.is_set():
break
# Update PlsrDAC parameter
self.set_scan_parameters(PlsrDAC=scan_parameter_value) # set scan parameter
self.write_global_register('PlsrDAC', scan_parameter_value) # write to FE
self.dut['Oscilloscope'].set_trigger_mode("AUTO")
self.dut['Oscilloscope'].set_trigger_type("EDGe")
self.dut['Oscilloscope'].set_acquire_mode('SAMple')
self.dut['Oscilloscope'].set_acquire_stop_after("RUNSTop")
self.dut['Oscilloscope'].set_acquire_state("RUN")
time.sleep(1.5)
self.dut['Oscilloscope'].force_trigger()
self.dut['Oscilloscope'].set_acquire_state("STOP")
data = self.dut['Oscilloscope']._intf._resource.query_binary_values("DATA:SOURCE CH%d;:CURVe?" % self.channel, datatype='h', is_big_endian=True)
self.preamble = self.dut['Oscilloscope'].get_parameters(channel=self.channel)
times, voltages, time_unit, voltage_unit = interpret_oscilloscope_data(self.preamble, data)
if len(data):
trigger_level = (np.mean(voltages) - self.trigger_level_offset * 1e-3) / 2.0 + self.trigger_level_offset * 1e-3
else:
trigger_level = trigger_levels[-1]
self.dut['Oscilloscope'].set_trigger_level(trigger_level)
self.dut['Oscilloscope'].set_vertical_scale(min(self.vertical_scale, (np.mean(voltages) + 0.2 * np.mean(voltages)) / 10), channel=self.channel)
# self.dut['Oscilloscope'].set_vertical_scale(0.05, channel=self.channel)
# Setup data aquisition and start scan loop
self.dut['Oscilloscope'].set_trigger_mode("NORMal")
self.dut['Oscilloscope'].set_trigger_type("PULSe")
self.dut['Oscilloscope'].set_acquire_mode('AVErage') # average to get rid of noise and keeping high band width
self.dut['Oscilloscope'].set_acquire_stop_after("SEQuence")
self.dut['Oscilloscope'].set_acquire_state("RUN")
time.sleep(1.5)
super(PlsrDacTransientCalibration, self).scan() # analog scan loop
self.dut['Oscilloscope'].set_acquire_state("STOP")
if self.dut['Oscilloscope'].get_number_waveforms() == 0:
logging.warning("No acquisition taking place.")
data = self.dut['Oscilloscope']._intf._resource.query_binary_values("DATA:SOURCE CH%d;:CURVe?" % self.channel, datatype='h', is_big_endian=True)
self.preamble = self.dut['Oscilloscope'].get_parameters(channel=self.channel)
times, voltages, time_unit, voltage_unit = interpret_oscilloscope_data(self.preamble, data)
data_out[index, :] = voltages[:]
trigger_level = float(self.dut['Oscilloscope'].get_trigger_level())
trigger_levels.append(trigger_level)
progress_bar.update(index)
self.dut['Oscilloscope'].set_vertical_scale(self.vertical_scale, channel=self.channel)
time_out[:] = times
data_out.attrs.trigger_levels = trigger_levels
progress_bar.finish()
def analyze(self):
logging.info('Analyzing the PlsrDAC waveforms')
with tb.open_file(self.output_filename + '.h5', 'r') as in_file_h5:
data = in_file_h5.root.PlsrDACwaveforms[:]
try:
times = in_file_h5.root.Times[:]
except tb.NoSuchNodeError: # for backward compatibility
times = np.array(in_file_h5.root.PlsrDACwaveforms._v_attrs.times)
scan_parameter_values = in_file_h5.root.PlsrDACwaveforms._v_attrs.scan_parameter_values
enable_double_columns = in_file_h5.root.PlsrDACwaveforms._v_attrs.enable_double_columns
trigger_levels = in_file_h5.root.PlsrDACwaveforms._v_attrs.trigger_levels
trigger_level_offset = in_file_h5.root.PlsrDACwaveforms._v_attrs.trigger_level_offset
fit_ranges = in_file_h5.root.PlsrDACwaveforms._v_attrs.fit_ranges
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=data.shape[0], term_width=80)
with tb.open_file(self.output_filename + '_interpreted.h5', 'w') as out_file_h5:
description = [('PlsrDAC', np.uint32), ('voltage_step', np.float)] # output data table description
data_array = np.zeros((data.shape[0],), dtype=description)
data_table = out_file_h5.create_table(out_file_h5.root, name='plsr_dac_data', description=np.zeros((1,), dtype=description).dtype, title='Voltage steps from transient PlsrDAC calibration scan')
with PdfPages(self.output_filename + '_interpreted.pdf') as output_pdf:
progress_bar.start()
for index in range(data.shape[0]):
voltages = data[index]
trigger_level = trigger_levels[index]
plsr_dac = scan_parameter_values[index]
# index of first value below trigger level
step_index = np.argmin(voltages > trigger_level)
step_time = times[step_index]
start_index_baseline = np.argmin(np.abs(times * 1e9 - fit_ranges[0][0] - step_time * 1e9))
stop_index_baseline = np.argmin(np.abs(times * 1e9 - fit_ranges[0][1] - step_time * 1e9))
start_index_peak = np.argmin(np.abs(times * 1e9 - fit_ranges[1][0] - step_time * 1e9))
stop_index_peak = np.argmin(np.abs(times * 1e9 - fit_ranges[1][1] - step_time * 1e9))
if not (step_index > start_index_baseline and step_index > stop_index_baseline):
logging.warning("Baseline fit range might be too large")
if not (step_index < start_index_peak and step_index < stop_index_peak):
logging.warning("Peak fit range might be too small")
times_baseline = times[start_index_baseline:stop_index_baseline]
times_peak = times[start_index_peak:stop_index_peak]
voltage_baseline = voltages[start_index_baseline:stop_index_baseline]
voltage_peak = voltages[start_index_peak:stop_index_peak]
median_baseline = np.median(voltage_baseline)
median_peak = np.median(voltage_peak)
# sanity check
if not (median_baseline > trigger_level and median_peak < trigger_level and trigger_level * 1e3 >= trigger_level_offset):
logging.warning('Skipping PlsrDAC=%d because the trigger level of %.1f mV is too low.', plsr_dac, trigger_level * 1e3)
data_array['voltage_step'][index] = np.NaN
continue
data_array['PlsrDAC'][index] = plsr_dac
data_array['voltage_step'][index] = median_baseline - median_peak
# Plot waveform + fit
fig = Figure()
FigureCanvas(fig)
ax = fig.add_subplot(111)
ax.plot(times * 1e9, voltages * 1e3, label='PlsrDAC Pulse')
ax.axhline(y=trigger_level * 1e3, linewidth=2, linestyle="--", color='r', label='Trigger (%0.fmV)' % (trigger_level * 1e3))
ax.plot(times_baseline * 1e9, np.repeat(median_baseline * 1e3, times_baseline.size), '-', linewidth=2, label='Baseline (%.3fmV)' % (median_baseline * 1e3))
ax.plot(times_peak * 1e9, np.repeat(median_peak * 1e3, times_peak.size), '-', linewidth=2, label='Peak (%.3fmV)' % (median_peak * 1e3))
ax.set_title('PulserDAC=%d Waveform' % plsr_dac)
ax.set_xlabel('Time [ns]')
ax.set_ylabel('Voltage [mV]')
delta_string = '$\Delta=$%.3fmV' % (median_baseline * 1e3 - median_peak * 1e3)
handles, labels = ax.get_legend_handles_labels()
handles.append(mpatches.Patch(color='none', label=delta_string))
ax.legend(handles=handles, loc=4) # lower right
output_pdf.savefig(fig)
progress_bar.update(index)
data_table.append(data_array[np.isfinite(data_array['voltage_step'])]) # store valid data
# Plot, fit and store linear PlsrDAC transfer function
select = np.isfinite(data_array['voltage_step'])
x = data_array[select]['PlsrDAC']
y = data_array[select]['voltage_step']
slope_fit, slope_err, plateau_fit, plateau_err = plot_pulser_dac(x, y, output_pdf=output_pdf, title_suffix="(DC %d)" % (enable_double_columns[0],), atol_first_dev=1.0 * 1e-04, atol_second_dev=2.0 * 1e-05)
# Store result in file
self.register.calibration_parameters['Vcal_Coeff_0'] = np.nan_to_num(slope_fit[0] * 1000.0) # store in mV
self.register.calibration_parameters['Vcal_Coeff_1'] = np.nan_to_num(slope_fit[1] * 1000.0) # store in mV/DAC
progress_bar.finish()
if __name__ == "__main__":
with RunManager('configuration.yaml') as runmngr:
runmngr.run_run(PlsrDacTransientCalibration)
| {
"repo_name": "SiLab-Bonn/pyBAR",
"path": "pybar/scans/calibrate_plsr_dac_transient.py",
"copies": "1",
"size": "18061",
"license": "bsd-3-clause",
"hash": 5362335622730516000,
"line_mean": 60.852739726,
"line_max": 268,
"alpha_frac": 0.6287580976,
"autogenerated": false,
"ratio": 3.486679536679537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46154376342795367,
"avg_score": null,
"num_lines": null
} |
"""A script that changes the voltage in a certain range and measures the current needed for IV curves. Maximum voltage and current limits
can be set for device protection.
"""
import logging
import time
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import numpy as np
import tables as tb
import progressbar
from pybar.fei4_run_base import Fei4RunBase
from pybar.run_manager import RunManager
class IVScan(Fei4RunBase):
_default_run_conf = {
"broadcast_commands": False,
"threaded_scan": False,
"voltages": np.arange(-2, -101, -2).tolist(), # voltage steps of the IV curve
"max_leakage": 10e-6, # scan aborts if current is higher
"max_voltage": -20, # for safety, scan aborts if voltage is higher
"minimum_delay": 0.5, # minimum delay between current measurements in seconds
"bias_voltage": -10 # if defined ramp bias to bias voltage after scan is finished, has to be less than last scanned voltage
}
def configure(self):
pass
def scan(self):
logging.info('Measure IV for V = %s' % self.voltages)
description = [('voltage', np.float), ('current', np.float)]
data = self.raw_data_file.h5_file.create_table(self.raw_data_file.h5_file.root, name='IV_data', description=np.zeros((1, ), dtype=description).dtype, title='Data from the IV scan')
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(self.voltages), term_width=80)
progress_bar.start()
actual_voltage = None
try:
for index, voltage in enumerate(self.voltages):
if self.stop_run.is_set():
break
if voltage > 0:
RuntimeError('Voltage has to be negative! Abort to protect device.')
if self.abort_run.is_set():
break
if abs(voltage) <= abs(self.max_voltage):
self.dut['Sourcemeter'].set_voltage(voltage)
actual_voltage = voltage
time.sleep(self.minimum_delay)
else:
logging.info('Maximum voltage with %f V reached, abort', voltage)
break
current = float(self.dut['Sourcemeter'].get_current().split(',')[1])
if abs(current) > abs(self.max_leakage):
logging.info('Maximum current with %e I reached, abort', current)
break
logging.info('V = %f, I = %e', voltage, current)
max_repeat = 50
for i in range(max_repeat): # repeat current measurement until stable (current does not increase)
time.sleep(self.minimum_delay)
actual_current = float(self.dut['Sourcemeter'].get_current().split(',')[1])
if abs(actual_current) > abs(self.max_leakage):
logging.info('Maximum current with %e I reached, abort', actual_current)
break
if (abs(actual_current) < abs(current)): # stable criterion
break
current = actual_current
if i == max_repeat - 1: # true if the leakage always increased
raise RuntimeError('Leakage current is not stable')
else:
a = np.array([(voltage, current)], dtype=description)
data.append(a)
progress_bar.update(index)
progress_bar.finish()
data.flush()
finally:
# ramp down
if self.bias_voltage and self.bias_voltage <= 0 and actual_voltage is not None:
logging.info('Set bias voltage from %f V to %f V', actual_voltage, self.bias_voltage)
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(range(actual_voltage, self.bias_voltage + 1, 2)), term_width=80)
progress_bar.start()
for index, voltage in enumerate(range(actual_voltage, self.bias_voltage + 1, 2)): # ramp until bias
time.sleep(self.minimum_delay)
self.dut['Sourcemeter'].set_voltage(voltage)
progress_bar.update(index)
progress_bar.finish()
def analyze(self):
logging.info('Analyze and plot results')
with tb.open_file(self.output_filename + '.h5', 'r+') as in_file_h5:
data = in_file_h5.root.IV_data[:]
# Plot and fit result
x, y = data['voltage'], data['current'] * 1e6
fig = Figure()
FigureCanvas(fig)
ax = fig.add_subplot(111)
ax.plot(x, y, '.-', label='data')
ax.set_title('IV curve')
ax.set_ylabel('Current [uA]')
ax.set_xlabel('Voltage [V]')
ax.grid(True)
ax.legend(loc=0)
fig.savefig(self.output_filename + '.pdf')
if __name__ == "__main__":
with RunManager('configuration.yaml') as runmngr:
runmngr.run_run(IVScan)
| {
"repo_name": "SiLab-Bonn/pyBAR",
"path": "pybar/scans/scan_iv.py",
"copies": "1",
"size": "5354",
"license": "bsd-3-clause",
"hash": 3378317442352461000,
"line_mean": 47.6727272727,
"line_max": 257,
"alpha_frac": 0.5698543145,
"autogenerated": false,
"ratio": 4.096403978576894,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5166258293076893,
"avg_score": null,
"num_lines": null
} |
"""A script that contains all functions to do RNA-seq epistasis analysis."""
# important stuff:
import pandas as pd
import numpy as np
# Graphics
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.odr as odr
# labeller:
import gvars
from scipy.stats import gaussian_kde
from matplotlib import rc
rc('text', usetex=True)
rc('text.latex', preamble=r'\usepackage{cmbright}')
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
# JB's favorite Seaborn settings for notebooks
rc = {'lines.linewidth': 2,
'axes.labelsize': 18,
'axes.titlesize': 18,
'axes.facecolor': 'DFDFE5'}
sns.set_context('paper', rc=rc)
sns.set_style("dark")
mpl.rcParams['xtick.labelsize'] = 16
mpl.rcParams['ytick.labelsize'] = 16
mpl.rcParams['legend.fontsize'] = 16
genvar = gvars.genvars()
epi = gvars.epistasis()
def label(code1, code2):
"""A function to make epistasis labels given two code-letters."""
return '{0} $>$ {1}'.format(genvar.fancy_mapping[code1],
genvar.fancy_mapping[code2])
def find_overlap(genotypes, df, q=0.1, col='code'):
"""Given a list of genotypes, df and a q-value, find DEG common to all."""
# find only DE genes:
sig = df[(df[col].isin(genotypes)) & (df.qval < q)]
grouped = sig.groupby('target_id')
genes = []
for target, group in grouped:
# make sure the group contains all desired genotypes
all_in = (len(group[col].unique()) == len(genotypes))
if all_in:
genes += [target]
return genes
def find_STP(single_muts, double_mut, df, q=0.1):
"""
Finds Shared Transcriptomic Phenotype among 2 single and a double mutant.
Given 3 genotypes, find shared DE genes and return sliced dataframes.
Params:
single_muts - a list containing exactly two elements
double_muts - a code for a double mutant
df - a tidy dataframe. must contain columns 'target_id' and 'code'
Output:
x, y, xy - Dataframes containing DE genes and all relevant information
(Betas, q-values, etc...). First dataframe corresponds to single_muts[0],
second dataframe corresponds to genotype single_muts[1] and third is the
double mutant information.
"""
if type(single_muts) is not list:
raise ValueError('single_muts must be of type list')
if type(double_mut) is not str:
raise ValueError('double_mut must be of type str')
# find the overlapping gene list
genes = find_overlap(single_muts + [double_mut], df)
# extract the dataframes
x = df[(df.target_id.isin(genes)) &
(df.code == single_muts[0])]
y = df[(df.target_id.isin(genes)) &
(df.code == single_muts[1])]
xy = df[(df.target_id.isin(genes)) &
(df.code == double_mut)]
# return the dataframes
return x, y, xy
def f(B, x):
"""A linear function for the ODR."""
return B*(x)
def perform_odr(add, dev, wadd, wdev):
"""
A wrapper to calculate an ODR regression.
params:
-------
add, dev - x and y axis of the regression
wadd, wdev - standard deviations
returns:
an ODR object
"""
linear = odr.Model(f)
# mydata = odr.Data(add, dev, wd=1./wadd, we=1./wdev)
mydata = odr.RealData(add, dev, sx=wadd, sy=wdev)
myodr = odr.ODR(mydata, linear, beta0=[0])
myoutput = myodr.run()
return myoutput
def ODR(singles, double, epistasis):
"""Find the ODR in epistasis plot between single muts and a double mut."""
# errors:
if len(singles) != 2:
raise ValueError('`singles` must be a list with two dataframes!')
if type(double) is not pd.DataFrame:
raise ValueError('`double` must be a dataframe!')
try:
epistasis = epistasis.lower()
except:
raise ValueError('epistasis must be a string!')
if epistasis not in ['actual', 'xy=x', 'xy=y']:
raise ValueError('epistasis must be one of `actual`, `xy=x`, `xy=y`')
# define the X-coordinate as the additive model of interaction
X = singles[0].b.values + singles[1].b.values
# fit an ODR model
wadd = np.sqrt(singles[1].se_b.values**2 + singles[0].se_b.values**2)
if epistasis == 'actual':
# calculate deviation standard error:
wdev = double.se_b.values**2
for i, df in enumerate(singles):
wdev += df.se_b.values**2
wdev = np.sqrt(wdev)
# calculate:
output = perform_odr(X, double.b.values - X, wadd=wadd, wdev=wdev)
if epistasis == 'xy=x':
# if XY = X, then XY - X - Y = -Y
output = perform_odr(X, -singles[1].b.values, wadd=wadd,
wdev=singles[1].se_b.values)
if epistasis == 'xy=y':
# if XY = Y, then XY - X - Y = -X
output = perform_odr(X, -singles[0].b.values, wadd=wadd,
wdev=singles[0].se_b.values)
return output
def plot_epistasis_regression(X, slope, **kwargs):
"""Plot the ODR line."""
# find the xmin and xmax:
xmin = X.min()
xmax = X.max()
x = np.linspace(xmin - 0.1, xmax + 0.1, 1000)
y0 = x*slope
# plot the models
plt.plot(x, y0, **kwargs)
def draw_bs_sample(n):
"""Draw a bootstrap sample from a 1D data set."""
ind = np.arange(0, n)
return np.random.choice(ind, size=n)
def bootstrap(bframe, sebframe, epistasis='actual', nsim=1000):
"""
Perform non-parametric bootstrapping for an epistasis ODR.
Given a list of three numpy vectors containing betas and a separate list of
vectors containing their standard errors, fit a model according to the
`epistasis` parameter indicated and bootstrap it. The vectors MUST
be provided in the order [X, Y, XY], where X is the first genotype, Y is
the second genotype and XY is the double mutant.
Params:
bframe - a list of numpy vectors containing the betas for each genotype
sebframe - a list of numpy vectors containing the se_b for each genotype
epistasis - kind of model to simulate. One of:
'actual', 'suppress', 'xy=x+y', 'xy=x', 'xy=y','xy=x=y'.
nsim - number of iterations to be performed. Must be >0
Output:
s, se_s
"""
nsim = int(nsim)
# unpack
xb, yb, xyb = bframe
xseb, yseb, xyseb = sebframe
s = np.zeros(nsim)
# draw bootstrap repetitions
for i in range(nsim):
# sample data, keeping tuples paired:
ind = draw_bs_sample(len(xb))
currx = xb[ind]
curry = yb[ind]
currxy = xyb[ind]
currsex = xseb[ind]
currsey = yseb[ind]
currsexy = xyseb[ind]
# different bootstraps to do:
# for the actual data, do a non-parametric bootstrap
wadd = np.sqrt(currsex**2 + currsey**2)
if epistasis == 'actual':
X = currx + curry
Y = currxy - X
wdev = np.sqrt(wadd**2 + currsexy**2)
elif epistasis == 'xy=x':
X = currx + curry
Y = -curry
wdev = currsey
elif epistasis == 'xy=y':
X = currx + curry
Y = -currx
wdev = currsex
# for all others, do a parametric bootstrap
# because we know what the slope should be,
# but we need to generate a structure to test
# against. Non-parametric bootstrapping will
# yield perfect lines every time.
elif epistasis == 'xy=x+y':
X = currx + curry
Y = np.random.normal(0, wadd, len(X))
wdev = wadd
elif epistasis == 'xy=x=y':
# flip a coin:
coin = np.random.randint(0, 1)
# half the time use the X data
# half the time use the Y
if coin == 0:
wadd = np.sqrt(2*currsex**2)
wdev = currsex
X = currx + np.random.normal(0, wadd, len(curry))
Y = -1/2*currx + np.random.normal(0, wdev, len(currx))
else:
wadd = np.sqrt(2)*currsey
wdev = currsey
X = curry + np.random.normal(0, wadd, len(curry))
Y = -1/2*curry + np.random.normal(0, wdev, len(curry))
elif epistasis == 'suppress':
# flip a coin:
coin = np.random.randint(0, 2)
# half the time use the X data
# half the time use the Y
if coin == 0:
wadd = np.sqrt(2)*currsex
wdev = currsey
X = curry + np.random.normal(0, wadd, len(curry))
Y = -curry + np.random.normal(0, wdev, len(curry))
else:
wadd = np.sqrt(2)*currsex
wdev = currsex
X = currx + np.random.normal(0, wadd, len(currx))
Y = -currx + np.random.normal(0, wdev, len(currx))
# do calcs and store in vectors
output = perform_odr(X, Y, wadd=wadd, wdev=wdev)
# extract the slope and standard error from the output
# and store it
s[i] = output.beta[0]
# se_s[i] = output.sd_beta[0]
return s
def bootstrap_regression(singles, double, df, epistasis='actual', nsim=100):
"""
Perform a bootstrap regression for the desired epistatic model.
Params:
singles - a list of 2 genotypes that make up the double mutant
double - a string containing the ID of the double mutant.
df - a tidy dataframe. must have columns 'target_id', 'b', 'se_b', 'qval'
'code', and 'genotype'
epistasis - kind of model to simulate. One of:
'actual', 'suppress', 'xy=x+y', 'xy=x', 'xy=y','xy=x=y'.
nsim - number of simulations to perform
Outputs:
s - numpy vector containing all the ODR slope values from the bootstrap
se_s - numpy vector containing all the ODR standard error of the slope
values from the bootstrap
"""
nsim = int(nsim)
x, y, xy = find_STP(singles, double, df)
xb = x.b.values
yb = y.b.values
xyb = xy.b.values
xseb = x.se_b.values
yseb = y.se_b.values
xyseb = xy.se_b.values
beta = bootstrap([xb, yb, xyb],
[xseb, yseb, xyseb],
epistasis=epistasis,
nsim=nsim)
return beta
def epiplot(X, Y, Y_se, **kwargs):
"""Given two arrays, X and Y, plot the points."""
plot_unbranched = kwargs.pop('plot_unbranched', False)
beta = kwargs.pop('beta', np.nan)
s0 = kwargs.pop('s0', 15)
cmap = kwargs.pop('cmap', 'viridis')
ax = kwargs.pop('ax', None)
# Calculate the point density
points = np.vstack([X, Y])
z = gaussian_kde(points)(points)
# plot:
if ax is None:
fig, ax = plt.subplots()
if len(X) > 50:
ax.scatter(X, Y, c=z, s=s0/Y_se,
edgecolor='', cmap=cmap, alpha=0.5)
else:
ax.scatter(X, Y, s=s0/np.sqrt(Y_se),
color='#33a02c', alpha=.9)
if plot_unbranched:
smoothX = np.linspace(X.min() - 0.5, X.max() + 0.5, 1000)
plt.plot(smoothX, -1/2*smoothX, color='#1f78b4', ls='--',
label='Unbranched Pathway')
if beta:
plot_epistasis_regression(X, beta, ls='-', lw=2.3,
color='#33a02c', label='data fit')
plt.xlabel(r'Predicted log-Additive Effect')
plt.ylabel(r'Deviation from log-Additive Effect')
plt.legend()
return ax
def make_epiplot(singles, double, df, **kwargs):
"""
Draw an epistasis plot of the data.
Params:
singles - a list of 2 genotypes that make up the double mutant
double - a string containing the ID of the double mutant.
Output:
x - tidy dataframe containing the DE gene data for singles[0]
y - tidy dataframe containing the DE gene data for singles[1]
xy - tidy dataframe containing the DE gene data for the double mutant
ax - axis containing the plot
"""
x, y, xy = find_STP(singles, double, df)
actual = ODR([x, y], xy, 'actual')
# transform coordinates:
X = x.b.values + y.b.values
Y = xy.b.values - X
Y_se = np.sqrt(x.se_b.values**2 + y.se_b.values**2 + xy.se_b.values**2)
ax = epiplot(X, Y, Y_se, plot_unbranched=True, beta=actual.beta)
return x, y, xy, ax
def calculate_all_bootstraps(x, y, xy, df, nsim=5000):
"""
Given two double mutants and a double find the bootstrapped epistasis coef.
Params:
x
y
xy
df
nsim
Output:
epicoef, epierr
"""
models = epi.models
epicoef = {}
for model in models:
s = bootstrap_regression([x, y], xy, df,
epistasis=model, nsim=nsim)
epicoef[model] = s
return epicoef
def plot_bootstraps(x, y, epicoef, **kwargs):
"""Make KDE plots of the bootstrapped epistasis coefficients."""
# make dictionaries for plotting
colors = {'actual': '#33a02c', 'xy=x': 'blue', 'xy=y': 'k',
'xy=x=y': '#1f78b4', 'xy=x+y': '#ff7f00', 'suppress': '#e31a1c'
}
labels = {'actual': 'data', 'xy=x': label(x, y),
'xy=y': label(y, x), 'xy=x=y': 'Unbranched',
'xy=x+y': 'log-Additive', 'suppress': 'Suppression'
}
# checks and balances
if type(epicoef) is not dict:
raise ValueError('epicoef must be a dictionary')
epistasis_choice = ['actual', 'xy=x', 'xy=y', 'xy=x=y', 'xy=x+y',
'suppress']
for epistasis in epistasis_choice:
if epistasis.lower() not in epicoef.keys():
warning = 'epicoef must contain keys for all epistasis models'
raise ValueError(warning)
if len(epicoef[epistasis.lower()]) < 10:
warning = 'too few bootstraps. Please perform >100' + \
'bootstraps per test'
raise ValueError(warning)
fig, ax = plt.subplots()
for model, s in epicoef.items():
try:
sns.kdeplot(data=s, label=labels[model.lower()],
color=colors[model.lower()], **kwargs)
except:
print('{0} did not have a label'.format(model))
next
# plot a horizontal line wherever the actual data mean is
plt.gca().axvline(epicoef['actual'].mean(), color='#33a02c', ls='--', lw=3)
plt.xlabel('Epistasis Coefficient')
plt.ylabel('Cumulative Density Function')
return ax
def permutation_test(s):
"""Perform a permutation test on the slope the genetic data."""
epistasis = ['xy=x', 'xy=y', 'xy=x=y', 'xy=x+y',
'suppress']
diff = {}
for epi in epistasis:
d = [s['actual'][i] - s[epi][i] for i in range(len(s[epi]))]
diff[epi] = d
return diff
def message(name, pval, alpha=0.01):
"""Write a message."""
if pval < alpha:
return '{0} can be rejected (pval <= {1:.2g})'.format(name, pval)
else:
return '{0} cannot be rejected (pval = {1:.2g})'.format(name, pval)
def calculate_pval(s, diff):
"""Given `s` and `diff`, print out the p-values for each comparison."""
for key, array in diff.items():
# test =
if s[key].mean() > s['actual'].mean():
pval = len(array[array > 0])/len(array)
else:
pval = len(array[array < 0])/len(array)
if pval == 0:
p = 1/(len(array)/10)
else:
p = pval
mess = message(key, p)
print(mess)
| {
"repo_name": "WormLabCaltech/mprsq",
"path": "src/epistasis.py",
"copies": "1",
"size": "15460",
"license": "mit",
"hash": 4602572335771810300,
"line_mean": 30.2955465587,
"line_max": 79,
"alpha_frac": 0.5752910737,
"autogenerated": false,
"ratio": 3.2865646258503403,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.93603796536664,
"avg_score": 0.0002952091767881241,
"num_lines": 494
} |
"""A script that contains all genotype variable information for mprsq."""
class genvars:
"""A class that contains important variables for the mprsq project."""
def __init__(self):
"""Initialize the class object with all the necessary variables."""
self.double_mapping = {'bd': 'a', 'bc': 'f'}
# fancy mapping, for use in graphs
self.fancy_mapping = {'a': r'\emph{egl-9;vhl-1}',
'f': r'\emph{egl-9 hif-1}',
'b': r'\emph{egl-9}',
'c': r'\emph{hif-1}',
'd': r'\emph{vhl-1}',
'e': r'\emph{rhy-1}',
'g': r'\emph{fog-2}'
}
# mapping, for use in printing or networkx
self.mapping = {'a': 'egl-9;vhl-1',
'b': 'egl-9',
'c': 'hif-1',
'd': 'vhl-1',
'e': 'rhy-1',
'f': 'egl-9 hif-1',
'g': 'fog-2'
}
# mapping, for use in printing or networkx
self.sort_muts = {'a': 6,
'b': 2,
'c': 4,
'd': 3,
'e': 1,
'f': 7,
'g': 5
}
# sort pairs, for plotting pairwise correlations
self.sort_pairs = {'eb': 1, 'be': 1,
'ed': 2, 'de': 2,
'ec': 3, 'ce': 3,
'eg': 4, 'ge': 4,
'bd': 5, 'db': 5,
'cb': 6, 'bc': 6,
'bg': 7, 'gb': 7,
'cd': 8, 'dc': 8,
'dg': 9, 'gd': 9,
'cg': 10, 'gc': 10
}
# decode pairs for plotting pairwise correlations
self.decode_pairs = {'eb': '\emph{rhy-1}, \emph{egl-9}',
'be': '\emph{rhy-1}, \emph{egl-9}',
'ed': '\emph{rhy-1}, \emph{vhl-1}',
'de': '\emph{rhy-1}, \emph{vhl-1}',
'ec': '\emph{rhy-1}, \emph{hif-1}',
'ce': '\emph{rhy-1}, \emph{hif-1}',
'eg': '\emph{rhy-1}, \emph{fog-2}',
'ge': '\emph{rhy-1}, \emph{fog-2}',
'bd': '\emph{egl-9}, \emph{vhl-1}',
'db': '\emph{egl-9}, \emph{vhl-1}',
'cb': '\emph{egl-9}, \emph{hif-1}',
'bc': '\emph{egl-9}, \emph{hif-1}',
'bg': '\emph{egl-9}, \emph{fog-2}',
'gb': '\emph{egl-9}, \emph{fog-2}',
'cd': '\emph{vhl-1}, \emph{hif-1}',
'dc': '\emph{vhl-1}, \emph{hif-1}',
'dg': '\emph{vhl-1}, \emph{fog-2}',
'gd': '\emph{vhl-1}, \emph{fog-2}',
'cg': '\emph{hif-1}, \emph{fog-2}',
'gc': '\emph{hif-1}, \emph{fog-2}'
}
# plot order for all qpcr plots
self.plot_order = {r'\emph{egl-9;vhl-1}': 4,
r'\emph{egl-9 hif-1}': 5,
r'\emph{egl-9}': 1,
r'\emph{hif-1}': 3,
r'\emph{vhl-1}': 2,
r'\emph{rhy-1}': 0,
r'\emph{fog-2}': 6
}
# plot colors for all qpcr plots
self.plot_color = {r'\emph{egl-9;vhl-1}': '#e41a1c',
r'\emph{egl-9 hif-1}': '#377eb8',
r'\emph{egl-9}': '#4daf4a',
r'\emph{hif-1}': '#984ea3',
r'\emph{vhl-1}': '#ff7f00',
r'\emph{rhy-1}': '#ffff33'
}
class epistasis:
"""
An object that holds all possible epistasis models.
Functions:
init - initialize the object
Attributes:
models - a list of epistatic models
"""
def __init__(self):
"""Initialize object."""
self.models = ['actual', 'xy=x', 'xy=y', 'xy=x=y', 'xy=x+y',
'suppress']
| {
"repo_name": "WormLabCaltech/mprsq",
"path": "src/gvars.py",
"copies": "1",
"size": "4550",
"license": "mit",
"hash": 2046580166803875000,
"line_mean": 40.3636363636,
"line_max": 75,
"alpha_frac": 0.3186813187,
"autogenerated": false,
"ratio": 3.5271317829457365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9317819220526855,
"avg_score": 0.005598776223776224,
"num_lines": 110
} |
# a script that converts word file to txt files
# requires word application on Windows machine
# requirement:
# 1. Windows platform
# 2. python 2.7
# 3. pywin32, download from http://sourceforge.net/projects/pywin32/
# 4. word application installed on running machine
from win32com.client import constants, Dispatch
import pythoncom
import glob
import os
from zipfile import ZipFile
# convert the word file to a text file.
# @arg wordapp: The word IDispatch object
# @arg wordfile: The word file name
# @returns: The txt file name
def convert_to_text(wordapp, wordfile):
name, ext = os.path.splitext(wordfile)
if ext != '.doc' and ext != '.docx':
return None
txtfile = name + '.txt'
print txtfile
wordapp.Documents.Open(os.path.abspath(wordfile))
wdFormatTextLineBreaks = 3
wordapp.ActiveDocument.SaveAs(os.path.abspath(txtfile),
FileFormat=wdFormatTextLineBreaks)
wordapp.ActiveDocument.Close()
return txtfile
# a generator that iterates all doc files in the current work dir
def next_doc():
for d in glob.glob('*.doc'):
yield d
for d in glob.glob('*.docx'):
yield d
# convert all doc/docx files and zip all output txt files as the zipfilename
def convert_and_zip(zipfilename):
word = Dispatch("Word.Application")
with ZipFile(zipfilename, 'w') as fzip:
for doc in next_doc():
print 'converting ', doc, '...'
txtfile = convert_to_text(word, doc)
if txtfile:
fzip.write(txtfile)
word.Quit()
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/578121_Converts_doc_files_intext_files_Windows/recipe-578121.py",
"copies": "1",
"size": "1578",
"license": "mit",
"hash": -5493638667337446000,
"line_mean": 32.5744680851,
"line_max": 76,
"alpha_frac": 0.6679340938,
"autogenerated": false,
"ratio": 3.704225352112676,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4872159445912676,
"avg_score": null,
"num_lines": null
} |
# A script that finds occurrences of the .. tags:: directive
# and sets up the structure of the tags directory. One file
# is created for each subject tag, that file contains links to
# each instance of the tag throughout the docs.
import os
import shutil
import re
from six import PY3
def make_tagdir():
# Clean up tagdir, create tagdir, return tagdir
dir = os.path.dirname(__file__)
tagdir = os.path.join(dir, "../tags")
if os.path.isdir(tagdir):
shutil.rmtree(tagdir)
os.mkdir(tagdir)
return tagdir
def make_tagfiles(docdirs, tagdir):
# Pull tags from each file, then make a file
# for each tag, containing all links to tagged files.
for docdir in docdirs:
for dirpath, dirnames, filenames in os.walk(docdir):
for filename in filenames:
# The path to the file being read for tags
sourcefile = os.path.join(dirpath, filename)
# A file object for the file being read for tags
if PY3:
with open(sourcefile, 'r', encoding="latin-1") as textfile:
# The text of the entire sourcefile
filetext = textfile.read()
else:
with open(sourcefile, 'r') as textfile:
# The text of the entire sourcefile
filetext = textfile.read()
# Pull all tag directives out of the filetext
matches = re.findall(".. tags::.*$", filetext)
# For every instance of tag directive, get a list of tags
for match in matches:
match = match.lstrip(".. tags::")
taglist = match.split(", ")
for tag in taglist:
filepath = os.path.join(tagdir, (tag + ".rst"))
# If the tagfile doesn't exist, let's put in a header
if not os.path.exists(filepath):
tagfileheader = """
=========================
%s
=========================
.. toctree::
""" % tag
# Write the header for this tag's file.
with open(filepath, 'a') as tagfile:
tagfile.write(tagfileheader)
# Write a link into an existing tagfile.
with open(filepath, 'a') as tagfile:
tagfile.write(" ../%s\n" % (sourcefile))
def make_tagindex(tagdir):
# Once all the files exist, create a simple index.rst file
indexfile = tagdir + "/index.rst"
for filepath, dirnames, filenames in os.walk(tagdir):
with open(indexfile, 'a') as index:
index.write("""
:orphan:
================
Tags in OpenLEGO
================
.. toctree::
:maxdepth: 1
:glob:
./*
""")
def tag():
# Set the directories in which to find tags
docdirs = ['examples']
tagdir = make_tagdir()
make_tagfiles(docdirs, tagdir)
make_tagindex(tagdir)
if __name__ == "__main__":
tag()
| {
"repo_name": "daniel-de-vries/OpenLEGO",
"path": "openlego/docs/_utils/preprocess_tags.py",
"copies": "1",
"size": "3106",
"license": "apache-2.0",
"hash": 1140347213114532000,
"line_mean": 30.693877551,
"line_max": 79,
"alpha_frac": 0.5244687701,
"autogenerated": false,
"ratio": 4.331938633193864,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5356407403293864,
"avg_score": null,
"num_lines": null
} |
'''A script that gathers analytical data regarding the automata.'''
from lrp import Linear_Reward_Penalty as LRP
from mse import MSE
from environment import Environment
from pinger import Pinger
import numpy as np
# import matplotlib.pyplot as plt
import plotly.plotly as py
import plotly.graph_objs as go
# import tune_lrp as tune
# Define the number of discrete depths between the surface and seabed.
num_actions = 5
n = 10000
interval = 1
time_between = (n / interval) - 1
# Metrics
# ============================================================================
# The number of actions until the automata converges on an environment.
converge = np.zeros(5)
correct_actions = np.zeros(5)
incorrect_actions = np.zeros(5)
total_dist = np.zeros(5)
# Define the environment with the number of discrete depths for the detectable
# object.
env = Environment(num_actions)
# Define the LRI automata with the same number of actions. This number does
# not correspond to the number of receivers on the array. It is merely the
# representation of the array's ability to detect the object at that depth.
lrp = LRP(num_actions) # The learning automata.
# The most probable depth that the object exists at, as calculated by the
# learner.
bestdepth = np.zeros(num_actions)
# Define the Markovian Switching Environment that will feed probabilities to
# the Pinger object.
Es = [[0.48796, 0.024438, 0.067891, 0.41971, 0.00],
[0.021431, 0.071479, 0.40562, 0.50147, 0.00],
[0.018288, 0.083153, 0.50582, 0.39274, 0.00],
[0.48455, 0.015527, 0.18197, 0.31795, 0.00],
[0.01675, 0.58845, 0.11313, 0.28167, 0.00]]
mse = MSE(Es)
det_obj = Pinger(mse.env_now()) # Create the detectable object.
# Run 5 individual experiments experiments.
for k in range(len(mse.envs)):
# Generate an ensemble of n experiments
det_obj.set_env(mse.env_now())
# lrp.a = tune.find_optimal_a(lrp, env, det_obj)
# print("Optimized value for a is: " + str(lrp.a))
lrp.a = 0.99999999999999
lrp.b = 0.5
bestdepth = np.zeros(num_actions)
current_best = 0
conv_per_k = []
for j in range(n):
# reset the action probabilities.
# lrp.reset_actions()
count = 0
# lrp.b = tune.find_optimal_b(lrp, env, det_obj)
# Run a single experiment. Terminate if it reaches 10000 iterations.
while(True and count < 10000):
# Define m as the next action predicting the depth of the object.
m = lrp.next_action()
# Define req as the next detectable object depth.
req = det_obj.request()
# reward if m = req.
resp = env.response(m, req)
if(not resp):
lrp.do_reward(m)
else:
lrp.do_penalty(m)
converge[k] += count + 1
if(max(lrp.p) > 0.98):
# The best depth counting from 0.
# Break at 98% convergence to a single depth.
bestdepth[np.argmax(lrp.p)] += 1
break
count += 1
conv_per_k.append(count)
if (current_best != np.argmax(bestdepth)):
current_best = np.argmax(bestdepth)
total_dist[k] += 14
# Plot conv per k
converge[k] = converge[k] / (n + 1)
print("The convergence vector is: " + str(converge[k]))
print("The desired vector is now: " + str(mse.env_now()))
print("The learned vector is: " + str(bestdepth / sum(bestdepth)))
print("The rate of convergence is: " + str(converge[k]))
print("Best depth is: " + str(np.argmax(bestdepth) * 14 + 14) + "m. " +
"The desired depth is: " + str(np.argmax(mse.env_now()) * 14 + 14) +
"m.")
print("*************************************************************")
mse.next_env()
print("The distance covered by the automata before convergence is: " +
str(total_dist))
# n, bins, patches = plt.hist(converge, 5, facecolor='g')
# plt.xlabel('Environment')
# plt.ylabel('Average Actions per 100000 Trials')
# plt.title('Actions Required for 95% Accurate Convergence')
# # plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
# plt.axis([1, 5, 0, 16])
# plt.grid(True)
# plt.show()
# data = [go.Bar(
# x=['Environment 1', 'Environment 2', 'Environment 3',
# 'Environment 4',
# 'Environment 5'],
# y=converge.tolist())]
# py.iplot(data, filename='basic-bar')
| {
"repo_name": "0xSteve/detection_learning",
"path": "P_model/Visualizations/UUAV_depth_finding/analytics.py",
"copies": "1",
"size": "4407",
"license": "apache-2.0",
"hash": -671019840492716300,
"line_mean": 38.7027027027,
"line_max": 78,
"alpha_frac": 0.6065350579,
"autogenerated": false,
"ratio": 3.3035982008995504,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.441013325879955,
"avg_score": null,
"num_lines": null
} |
"""A script that generates a TSV file of games to import into Team Cowboy.
Games are imported from any Sportszone web site (e.g. http://www.gshockey.com/).
If Team Cowboy API credentials are provided then Team Cowboy is queried for
existing games in the same date range as the Sportszone schedule. Any duplicate
games are removed from the list of games to export.
"""
import csv
import datetime
import getpass
import gflags
import logging
import pytz
import pytz.reference
import sportszone
import sys
import teamcowboy
import time
import urlparse
FLAGS = gflags.FLAGS
gflags.DEFINE_string('url', None,
('The URL of a schedule page. If this is provided, then '
'the other Sportszone parameters are inferred from it.'))
gflags.DEFINE_string('sportszone_url', None, 'The base Sportszone URL.')
gflags.DEFINE_integer('league_id', None, 'The Sportszone league ID.')
gflags.DEFINE_integer('team_id', None, 'The Sportszone team ID.')
gflags.DEFINE_integer('season_id', None, 'The Sportszone season ID.')
gflags.DEFINE_string('output_file', 'schedule.tsv', 'The output file.')
gflags.DEFINE_string('home_color', 'White', 'The color of the home jerseys.')
gflags.DEFINE_string('away_color', 'Black', 'The color of the away jerseys.')
gflags.DEFINE_multistring(
'arena_map', [], 'A map from Sportszone to Team Cowboy arena names.')
gflags.DEFINE_string(
'team_cowboy_public_key', None, 'Team Cowboy API public key.')
gflags.DEFINE_string(
'team_cowboy_private_key', None, 'Team Cowboy API private key.')
gflags.DEFINE_string('team_cowboy_username', None, 'Team Cowboy username.')
gflags.DEFINE_string('team_cowboy_password', None,
('Team Cowboy password. If this is not provided, then a '
'secure prompt will be presented.'))
gflags.DEFINE_string('team_cowboy_team_name', None, 'Team Cowboy team name.')
def _precondition(cond, msg):
"""Asserts the truth of a precondition or fails.
Args:
cond: The result of the precondition test.
msg: A message to display upon failure.
"""
if not cond:
logging.error(msg)
print '%s\\nUsage: %s ARGS\\n%s' % (msg, sys.argv[0], FLAGS)
sys.exit(1)
def _create_arena_map():
"""Creates a map from Sportszone to Team Cowboy arena names from flag values.
Returns:
A map of arena names.
"""
result = {}
for value in FLAGS.arena_map:
parts = value.split('=')
if len(parts) != 2:
logging.warning('Ignoring invalid arena_map argument: %s', value)
continue
result[parts[0]] = parts[1]
return result
def _team_cowboy_games(start_dt, end_dt, num_games):
"""Gets a batch of games from Team Cowboy in the given date range.
Args:
start_dt: The start date of the schedule.
end_dt: The end date of the schedule.
num_games: The number of games to include in the result set.
Returns:
A list of games in Team Cowboy.
"""
if not (FLAGS.team_cowboy_public_key and
FLAGS.team_cowboy_private_key and
FLAGS.team_cowboy_username and
FLAGS.team_cowboy_team_name):
return []
password = FLAGS.team_cowboy_password
if not password:
password = getpass.getpass(
'Enter the Team Cowboy password for %s: '
% FLAGS.team_cowboy_username)
tc = teamcowboy.TeamCowboy(
FLAGS.team_cowboy_public_key, FLAGS.team_cowboy_private_key)
token = tc.auth_get_user_token(FLAGS.team_cowboy_username, password)
teams = tc.user_get_teams(token)
start_date_time = datetime.datetime.fromtimestamp(
time.mktime(start_dt)).strftime('%Y-%m-%d %H:%M:%S')
end_date_time = datetime.datetime.fromtimestamp(
time.mktime(end_dt)).strftime('%Y-%m-%d %H:%M:%S')
for team in teams:
if team['name'] == FLAGS.team_cowboy_team_name:
team_id = team['teamId']
return tc.team_get_events(
token, team_id, filter_type='specificDates',
start_date_time=start_date_time, end_date_time=end_date_time,
qty=num_games)
return []
def _sportszone_games():
"""Gets games from Sportszone.
Returns:
A list of games found on Sportszone.
"""
sportszone_url = FLAGS.sportszone_url
league_id = FLAGS.league_id
team_id = FLAGS.team_id
season_id = FLAGS.season_id
if FLAGS.url:
url = urlparse.urlparse(FLAGS.url)
qs = urlparse.parse_qs(url.query)
if not sportszone_url:
sportszone_url = '%s://%s%s' % (url.scheme, url.netloc, url.path)
if not league_id:
league_id = int(qs.get('LeagueID', [str(league_id)])[0])
if not team_id:
team_id = int(qs.get('TeamID', [str(team_id)])[0])
if not season_id:
season_id = int(qs.get('SeasonID', [str(season_id)])[0])
_precondition(sportszone_url, 'A Sportszone URL is required.')
_precondition(league_id, 'A Sportszone league ID is required.')
_precondition(team_id, 'A Sportszone team ID is required.')
_precondition(season_id, 'A Sportszone season ID is required.')
sz = sportszone.Sportszone(sportszone_url, league_id)
return sz.get_schedule(team_id, season_id)
def _tz():
"""Returns the local timezone, formatted for import into Team Cowboy.
Team Cowboy does not specify the exact format it prefers for timezones and
some values do not work such as 'America/Dawson' or 'US/Pacific-New'. For now
we restrict the set to those timezone names starting with 'US/' and keep the
first value we see.
Returns:
A formatted timezone string.
"""
result = {}
for tz in pytz.all_timezones:
tz = pytz.timezone(tz)
key = tz.localize(datetime.datetime.now()).strftime('%Z')
if str(tz).startswith('US/') and key not in result:
result[key] = tz
return result[pytz.reference.LocalTimezone().tzname(datetime.datetime.now())]
def _write_tsv(games, arena_map):
"""Writes a list of games to a TSV file suitable for importing to Team Cowboy.
Args:
games: A list of games to write.
arena_map: A map from Sportszone to Team Cowboy arena names.
"""
with open(FLAGS.output_file, 'wb') as tsvfile:
writer = csv.writer(tsvfile, delimiter='\t')
writer.writerow([
'Event Type', 'Start Date', 'Start Time', 'End Date', 'End Time',
'Timezone ID', 'Home or Away', 'Opponent/Event Title', 'Location Name',
'Shirt Color', 'Opponent Shirt Color', 'Allow RSVPs', 'Send Reminders',
'Notes/Comments'
])
allow_rsvps = 'Yes'
event_type = 'game'
notes_comments = ''
send_reminders = 'Yes'
timezone = _tz()
for game in games:
dt = datetime.datetime.fromtimestamp(time.mktime(game.game_datetime))
start_date = dt.strftime('%Y-%m-%d')
start_time = dt.strftime('%I:%M %p')
dt += datetime.timedelta(hours=1)
end_date = dt.strftime('%Y-%m-%d')
end_time = dt.strftime('%I:%M %p')
home_away = game.home_away.title()
arena = arena_map.get(game.arena, game.arena)
if game.home_away == 'HOME':
shirt_color = FLAGS.home_color
opponent_shirt_color = FLAGS.away_color
else:
shirt_color = FLAGS.away_color
opponent_shirt_color = FLAGS.home_color
writer.writerow([
event_type, start_date, start_time, end_date, end_time, timezone,
home_away, game.opponent, arena, shirt_color.title(),
opponent_shirt_color.title(), allow_rsvps, send_reminders,
notes_comments
])
def main(argv):
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
sz_games = _sportszone_games()
if not sz_games:
return
start_dt = sz_games[0].game_datetime
end_dt = sz_games[len(sz_games) - 1].game_datetime
tc_games = _team_cowboy_games(start_dt, end_dt, len(sz_games))
# We perform a simple test to find matching games: if the start date/time is
# the same, then we assume the entries represent the same games.
tc_games_by_dt = {}
for i in tc_games:
key = time.strptime(
i['dateTimeInfo']['startDateTimeLocal'], '%Y-%m-%d %H:%M:%S')
tc_games_by_dt[key] = i
games = [i for i in sz_games
if i.game_datetime not in tc_games_by_dt]
_write_tsv(games, _create_arena_map())
if __name__ == '__main__':
main(sys.argv)
| {
"repo_name": "kjiwa/sportszone-exporter",
"path": "__main__.py",
"copies": "1",
"size": "8265",
"license": "mit",
"hash": 1481966509775941600,
"line_mean": 30.7884615385,
"line_max": 80,
"alpha_frac": 0.6592861464,
"autogenerated": false,
"ratio": 3.194820255121763,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4354106401521763,
"avg_score": null,
"num_lines": null
} |
"A script that generates the Signpost's Featured Content Report."
import getpass
import pywikibot
import re
from wikitools.wiki import Wiki as WikitoolsWiki
from wikitools.page import Page as WikitoolsPage
WP_GO_HEADING = (
r"'''\[\[Wikipedia:Featured (.+?)\|.+?\]\] that gained featured status'''")
WP_GO_ITEM = r"\[\[(.+?)(\|(.+?))?\]\] \((.+?)\)"
FC_LINKS = {"articles": "[[WP:FA|featured article]]s",
"lists": "[[WP:FL|featured list]]s",
"pictures": "[[WP:FP|featured picture]]s"}
def main():
"The main function."
wiki = pywikibot.Site("en", "wikipedia")
wiki.login()
global wikitools_wiki
wikitools_login()
wpgo = pywikibot.Page(wiki, "Wikipedia:Goings-on")
wpgo_content = wpgo.get()
new_fc = wpgo_content[wpgo_content.find("==New featured content=="):]
# Trim it down to just the list of featured content
new_fc = new_fc[:new_fc.find("|-") - 2]
# Remove the section heading
new_fc = new_fc[len("==New featured content=="):]
# Create fc_cats, which looks like this: {type: [title of content]}
fc_cats = dict()
for fc_cat in re.finditer(WP_GO_HEADING, new_fc):
fc_cat_name = fc_cat.groups()[0]
fc_cat_raw_list = new_fc[fc_cat.start():]
fc_cat_raw_list = fc_cat_raw_list[len(fc_cat_name) + 1:]
next_heading = re.search(WP_GO_HEADING, fc_cat_raw_list)
if next_heading:
fc_cat_raw_list = fc_cat_raw_list[:next_heading.start()]
fc_cat_raw_list = fc_cat_raw_list.strip()
# Now that we have just the list, parse out the items
for fc_item in re.finditer(WP_GO_ITEM, fc_cat_raw_list):
name, _, label, date = fc_item.groups()
print u"{} (a {}) was promoted on {}".format(label if label else name, fc_cat_name[:-1], date)
fc_cats[fc_cat_name] = fc_cats.get(fc_cat_name, []) + [(name,
label,
date)]
# Get notification metadata
for fc_cat, fc_items in fc_cats.items():
def add_metadata(fc_item):
name, label, date = fc_item
nom_link = "Wikipedia:Featured " + fc_cat[:-1] + " candidates/"
if fc_cat == "pictures":
nom_link += label[2:-2] if "''" in label else label
#if not WikitoolsPage(wikitools_wiki, title=nom_link).exists:
if not wiki.page_exists(nom_link):
print(nom_link + " DOESN'T EXIST")
else:
nom_link += name[2:-2] if "''" in name else name
nom_link += "/archive1"
return (name, label, date, nom_link)
fc_cats[fc_cat] = map(add_metadata, fc_items)
# Build "report"
report = ""
for fc_cat, fc_items in fc_cats.items():
report += "\n\n===Featured {}===".format(fc_cat)
report += "\n{} {} were promoted this week.".format(len(fc_items),
FC_LINKS[fc_cat])
for fc_item in fc_items:
name, label, date, nom_link = fc_item
piped = "|" + label if label else ""
report += u"\n* '''[[{}{}]]''' <small>([[{}|nominated]] by [[User:Example|Example]])</small> Description.".format(name, piped, nom_link)
report = report.strip()
# Write report to Wikipedia
report_page = WikitoolsPage(wikitools_wiki, title="User:APersonBot/sandbox")
print("Editing report page...")
result = report_page.edit(text=report.encode("ascii", "ignore"),
bot=True,
summary="Test FC report")
if result[u"edit"][u"result"] == u"Success":
print "Success!"
else:
print "Error! Couldn't write report - result: {}".format(str(result))
def wikitools_login():
global wikitools_wiki
wikitools_wiki = WikitoolsWiki("http://en.wikipedia.org/w/api.php")
while True:
username = raw_input("Username: ")
password = getpass.getpass("Password for " + username + " on enwiki: ")
print("Logging in to enwiki as " + username + "...")
wikitools_wiki.login(username, password)
if wikitools_wiki.isLoggedIn():
break
print("Error logging in. Try again.")
if __name__ == "__main__":
main()
| {
"repo_name": "APerson241/EnterpriseyBot",
"path": "fcreporter/fcreporter.py",
"copies": "2",
"size": "4372",
"license": "mit",
"hash": 840095833037450100,
"line_mean": 41.4466019417,
"line_max": 148,
"alpha_frac": 0.5475754803,
"autogenerated": false,
"ratio": 3.450670876085241,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4998246356385241,
"avg_score": null,
"num_lines": null
} |
"""A script that generates the SRA submission files for our project."""
# -*- coding: utf-8 -*-
import pandas as pd
import argparse as arg
import os
import numpy
parser = arg.ArgumentParser(description='Generate the SRA metadata files')
parser.add_argument('biosample', type=str,
help='path to the biosample file')
parser.add_argument('library_map', type=str,
help='The library ID to folder name mapping')
parser.add_argument('dir', type=str, help='Path to the read folders')
parser.add_argument('output', type=str, help='filename to write to')
args = parser.parse_args()
biosample_info = pd.read_csv(args.biosample, sep='\t')
library_map = pd.read_csv(args.library_map, sep='\t', comment='#')
# biosample_info = pd.read_csv('sra_biosamples.txt', sep='\t')
# library_map = pd.read_csv('library_to_ID.txt', sep='\t', comment='#')
column_names = ['library_strategy', 'library_source', 'library_selection',
'library_layout', 'platform', 'instrument_model',
'design_description', 'filetype', 'filename']
# biosample_info =biosample_info[['bioproject_accession','biosample_accession',
# 'library_ID', 'title']]
proj_to_file = {}
for project in os.listdir(args.dir):
if ~(library_map.project_name == project).any():
continue
if project == '.DS_Store':
continue
for fname in os.listdir(args.dir + project):
if '.fastq' in fname:
if project in proj_to_file.keys():
proj_to_file[project] += [fname]
else:
proj_to_file[project] = [fname]
with open(args.output, 'w') as file:
columns = ['bioproject_accession', 'biosample_accession', 'library_ID',
'title'] + column_names
columns = '\t'.join(columns)
file.write(columns + '\n')
for genotype in library_map.genotype.unique():
line = ''
ind = (library_map.genotype == genotype)
project_ids = library_map[ind].project_name.unique()
for project in project_ids:
for f in proj_to_file[project]:
line += str(f) + '\t'
line = line[:-1]
ind = biosample_info.library_ID == genotype
x = biosample_info[ind].values[0].tolist()
x = [str(i) for i in x]
x = '\t'.join(x) + '\t' + line + '\n'
file.write(x)
| {
"repo_name": "WormLabCaltech/mprsq",
"path": "sra_submission/make_sra_metadata_file.py",
"copies": "1",
"size": "2373",
"license": "mit",
"hash": 4050353250908966400,
"line_mean": 38.55,
"line_max": 79,
"alpha_frac": 0.5992414665,
"autogenerated": false,
"ratio": 3.469298245614035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9568539712114035,
"avg_score": 0,
"num_lines": 60
} |
#a script that locks a certain file - it doesn't check (currently)
#the lockfile relevance, lockfiles are now implementd as file_naem.lock
from random import random
from time import sleep
from os import remove
from os.path import exists,dirname,join,isdir
lock_ext='.lock'
timeout = 10
class deadlockError(Exception):
def __init__(s,filename,lockfile_ext,trieddir,time):
s.file = filename
s.lockfile_ext = lockfile_ext
s.isdir = trieddir
s.secs = time
def __str__(s):
if s.isdir:
final = join(s.file,s.lockfile_ext)
else: final = s.file+s.lockfile_ext
print "couldn't find '"+final+"' for "+s.secs+" seconds, params: "\
+str((s.file,s.lockfile_ext,s.isdir,s.secs))
def lockfile(file,content=None,lockfile_ext=lock_ext,deadlock_timeout=timeout):
deadlock = 0.0
lockfile = file+lockfile_ext
while (exists(lockfile) or exists(join(dirname(file),lockfile_ext))):
t = random()/10
sleep(t)
deadlock += t
if deadlock>deadlock_timeout: raise deadlockError(file,lockfile_ext,False,deadlock)
f = open(lockfile,'w')
if content!=None: f.write(content)
f.close()
def releasefile(file,lockfile_ext='.lock'):
remove(file+lockfile_ext)
def lockdir(dir,content=None,lockfile_ext=lock_ext,deadlock_timeout=timeout):
deadlock = 0.0
lockfile = join(dir,lockfile_ext)
while (exists(lockfile)):
t = random()/10
sleep(t)
deadlock+=t
if deadlock>deadlock_timeout: raise deadlockError(dir,lockfile_ext,True,deadlock)
f = open(lockfile,'w')
if content!=None: f.write(content)
f.close()
def releasedir(dir,lockfile_ext=lock_ext):
remove(join(dir,lockfile_ext))
def islocked(file,lockfile_ext=lock_ext):
if isdir(file): return exists(join(file,lockfile_ext))
return exists(file+lockfile_ext) or exists(join(dirname(file),lockfile_ext))
def lock(file,content=None,lockfile_ext=lock_ext,deadlock_timeout=timeout):
if isdir(file): lockdir(file,content,lockfile_ext,deadlock_timeout)
else: lockfile(file,content,lockfile_ext,deadlock_timeout)
def release(file,lockfile_ext=lock_ext):
if isdir(file): releasedir(file,lockfile_ext)
else: releasefile(file,lockfile_ext)
"""
lock(r'c:\try.exe')
print islocked(r'c:\try.exe')
release(r'c:\try.exe')
print islocked(r'c:\try.exe')
lock('c:\\')
print islocked('c:\\')
release('c:\\')
print islocked('c:\\')
"""
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/193488_lockfile_and_directory_module/recipe-193488.py",
"copies": "1",
"size": "2462",
"license": "mit",
"hash": -7359080534037621000,
"line_mean": 35.2058823529,
"line_max": 91,
"alpha_frac": 0.6758732738,
"autogenerated": false,
"ratio": 3.222513089005236,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.925481724780578,
"avg_score": 0.028713822999891017,
"num_lines": 68
} |
# A script that parses the wikipedia page into JSON.
# run wget http://en.wikipedia.org/wiki/List_of_mobile_country_codes
# then this..
import json
import os
from BeautifulSoup import BeautifulSoup
from mobile_codes import MNCOperator
def parse_wikipedia():
with open('List_of_mobile_country_codes', 'r') as htmlfile:
soup = BeautifulSoup(htmlfile)
operators = []
for table in soup.findAll('table', attrs={'class': 'wikitable'}):
for row in table.findAll('tr'):
mcc, mnc, brand, operator = row.findChildren()[:4]
if mcc.text in ['MCC', '']:
continue
operators.append(
MNCOperator(
operator=operator.text, brand=brand.text,
mcc=mcc.text, mnc=mnc.text))
return operators
def parse_itu():
with open(os.path.join('source_data', 'itu.json'), 'rb') as jsonfile:
return json.loads(jsonfile.read().decode())
def merge_wiki_itu():
wiki_operators = parse_wikipedia()
itu_operators = parse_itu()
merged_operators = {}
for operator in wiki_operators:
operator_key = operator.mcc, operator.mnc
merged_operators[operator_key] = operator
for operator in itu_operators:
operator_key = operator.mcc, operator.mnc
merged_operators[operator_key] = operator
return merged_operators.values()
def write_operators(operators):
with open(os.path.join('mobile_codes', 'json', 'operators.json'),
'wb') as outfile:
outfile.write(json.dumps(operators))
if __name__ == '__main__':
write_operators(merge_wiki_itu())
| {
"repo_name": "andymckay/mobile-codes",
"path": "parse.py",
"copies": "2",
"size": "1685",
"license": "mit",
"hash": -4015037618027713000,
"line_mean": 27.5593220339,
"line_max": 73,
"alpha_frac": 0.6112759644,
"autogenerated": false,
"ratio": 3.795045045045045,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5406321009445045,
"avg_score": null,
"num_lines": null
} |
# A script that processes HTSeq-count output for easy input into R.
# Fetches the sample name from the cluster output
# Saves the six lines of basic statistics generated by HTSeq at the bottom of the file in a .txt file
import re
import os
import glob
import argparse
import sys
def create_parser():
"""Return the argument parser"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', dest='input_path', required=True,
help='''The input folder path, containing the data. ''')
return parser
def tidy_htseq(my_file, path):
f = open(my_file, "r")
for line in f:
if line.startswith('Input for'):
fluff, file_name = line.rsplit("/", 1)
sample, extension = file_name.split(".", 1)
if re.search(r'\w+\t\d*', line):
match_stats = re.search(r'^__\w+\t\d*', line)
if match_stats:
stats_file = open(os.path.join(path + '/' + sample + "_stats.txt"), "a")
stats_file.write(line)
stats_file.close()
else:
counts = open(os.path.join(path + '/' + sample + "_htseq-counts.txt"), "a")
counts.write(line)
counts.close()
f.close()
print ("All done with %s" % sample)
def find_moi(input_path):
count_files = glob.glob(os.path.join(input_path+"/*.sh.*"))
print ("Found %d count files" % len(count_files))
for sample_counts in count_files:
tidy_htseq(sample_counts, input_path)
def main(argv=None):
"""Program wrapper
:param argv:
"""
if argv is None:
argv = sys.argv[1:]
parser = create_parser()
args = parser.parse_args(argv)
input_path = args.input_path
find_moi(input_path)
return 0
if __name__ == '__main__':
import doctest
doctest.testmod()
sys.exit(main())
| {
"repo_name": "Joannacodes/science",
"path": "htseq-2-R.py",
"copies": "2",
"size": "1880",
"license": "apache-2.0",
"hash": 1972307393639351300,
"line_mean": 27.0597014925,
"line_max": 101,
"alpha_frac": 0.5776595745,
"autogenerated": false,
"ratio": 3.6153846153846154,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5193044189884616,
"avg_score": null,
"num_lines": null
} |
"""A script that processes the input CSV files and copies them into a SQLite database."""
import csv
import sqlite3
import os
import sys
import click
import time
__version__ = '2.1.0'
def write_out(msg):
if write_out.verbose:
print(msg)
class CsvOptions:
def __init__(self,
typing_style="quick",
drop_tables=False,
delimiter=",",
encoding="utf8",
bracket_style="all"):
self.typing_style = typing_style
self.drop_tables = drop_tables
self.delimiter = delimiter
self.encoding = encoding
self.bracket_style = bracket_style
class CsvFileInfo:
def __init__(self, path, options = None):
self.path = path
self.columnNames = None
self.columnTypes = None
self.csvfile = None
self.reader = None
self.options = options
if not options:
self.options = CsvOptions()
self.lb, self.rb = ("[", "]") if options.bracket_style == "all" else ("", "")
def get_table_name(self):
return os.path.splitext(os.path.basename(self.path))[0]
def get_minimal_type(self, value):
try:
int(value)
return "integer"
except ValueError:
pass
try:
float(value)
return "real"
except ValueError:
pass
return "text"
def __enter__(self):
self.csvfile = open(self.path, encoding=self.options.encoding)
self.reader = csv.reader(self.csvfile, delimiter=self.options.delimiter)
return self
def __exit__(self, *args):
if self.csvfile:
self.csvfile.close()
def get_restarted_reader(self):
self.csvfile.seek(0)
return self.reader
def determine_types(self):
write_out("Determining types")
rdr = self.get_restarted_reader()
self.columnNames = [name for name in next(rdr)]
cols = len(self.columnNames)
if self.options.typing_style == 'none':
self.columnTypes = ["text"] * cols
return
self.columnTypes = ["integer"] * cols
for row in rdr:
for col in range(cols):
if self.columnTypes[col] == "text":
continue
col_type = self.get_minimal_type(row[col])
if self.columnTypes[col] != col_type:
if col_type == "text" or \
(col_type == "real" and self.columnTypes[col] == "integer"):
self.columnTypes[col] = col_type
if self.options.typing_style == 'quick':
break
def save_to_db(self, connection):
write_out("Writing table " + self.get_table_name())
cols = len(self.columnNames)
if self.options.drop_tables:
try:
write_out("Dropping table " + self.get_table_name())
connection.execute('drop table [{tableName}]'.format(tableName=self.get_table_name()))
except:
pass
createQuery = 'create table [{tableName}] (\n'.format(tableName=self.get_table_name()) \
+ ',\n'.join("\t%s%s%s %s" % (self.lb, i[0], self.rb, i[1]) for i in zip(self.columnNames, self.columnTypes)) \
+ '\n);'
write_out(createQuery)
connection.execute(createQuery)
linesTotal = 0
currentBatch = 0
reader = self.get_restarted_reader()
buf = []
maxL = 10000
next(reader) #skip headers
for line in reader:
buf.append(line)
currentBatch += 1
if currentBatch == maxL:
write_out("Inserting {0} records into {1}".format(maxL, self.get_table_name()))
connection.executemany('insert into [{tableName}] values ({cols})'
.format(tableName=self.get_table_name(), cols=','.join(['?'] * cols)),
buf)
linesTotal += currentBatch
currentBatch = 0
buf = []
if len(buf) > 0:
write_out("Flushing the remaining {0} records into {1}".format(len(buf), self.get_table_name()))
connection.executemany('insert into [{tableName}] values ({cols})'
.format(tableName=self.get_table_name(), cols=','.join(['?'] * cols)),
buf)
linesTotal += len(buf)
return linesTotal
@click.command()
@click.option("--file", "-f",
type=click.Path(exists=True),
help="A file to copy into the database. \nCan be specified multiple times. \n"
"All the files are processed, including file names piped from standard input.",
multiple=True)
@click.option("--output", "-o", help="The output database path",
type=click.Path(),
default=os.path.basename(os.getcwd()) + ".db")
@click.option('--typing', "-t",
type=click.Choice(['full', 'quick', 'none']),
help="""Determines whether the script should guess the column type (int/float/string supported).
quick: only base the types on the first line
full: read the entire file
none: no typing, every column is string""",
default='quick')
@click.option("--drop-tables/--no-drop-tables", "-D",
help="Determines whether the tables should be dropped before creation, if they already exist"
" (BEWARE OF DATA LOSS)",
default=False)
@click.option("--verbose", "-v",
is_flag=True,
help="Determines whether progress reporting messages should be printed",
default=False)
@click.option("--delimiter", "-x",
help="Choose the CSV delimiter. Defaults to comma. Hint: for tabs, in Bash use $'\\t'.",
default=",")
@click.option("--encoding", "-e",
help="Choose the input CSV's file encoding. Use the string identifier Python uses to specify encodings, e.g. 'windows-1250'.",
default="utf8")
@click.option('--bracket-style',
type=click.Choice(['all', 'none']),
help="""Determines whether all the column names should be wrapped in brackets, or none of them should be.
Keep in mind that if you select 'none', it is up to you to ensure the CSV's column names are also valid SQLite column names.
all: wrap all.
none: no brackets""",
default='all')
def start(file, output, typing, drop_tables, verbose, delimiter, encoding, bracket_style):
"""A script that processes the input CSV files and copies them into a SQLite database.
Each file is copied into a separate table. Column names are taken from the headers (first row) in the csv file.
If file names are passed both via the --file option and standard input, all of them are processed.
For example in PowerShell, if you want to copy all the csv files in the current folder
into a database called "out.db", type:
ls *.csv | % FullName | csv-to-sqlite -o out.db
"""
write_out.verbose = verbose
files = list(file)
if not sys.stdin.isatty():
files.extend(list(sys.stdin))
if not files:
print("No files were specified. Exiting.")
return
options = CsvOptions(typing_style=typing, drop_tables=drop_tables, delimiter=delimiter, encoding=encoding, bracket_style=bracket_style)
write_csv(files, output, options)
def write_csv(files, output, options):
write_out("Output file: " + output)
conn = sqlite3.connect(output)
write_out("Typing style: " + options.typing_style)
totalRowsInserted = 0
startTime = time.perf_counter()
with click.progressbar(files) as _files:
actual = files if write_out.verbose else _files
for file in actual:
try:
file = file.strip()
write_out("Processing " + file)
with CsvFileInfo(file, options) as info:
info.determine_types()
totalRowsInserted += info.save_to_db(conn)
except Exception as exc:
print("Error on table {0}: \n {1}".format(file, exc))
print("Written {0} rows into {1} tables in {2:.3f} seconds".format(totalRowsInserted, len(files), time.perf_counter() - startTime))
conn.commit()
return totalRowsInserted
if __name__ == "__main__":
start()
else:
write_out.verbose = False
| {
"repo_name": "zblesk/csv-to-sqlite",
"path": "csv_to_sqlite.py",
"copies": "1",
"size": "8543",
"license": "mit",
"hash": -4193641018838773000,
"line_mean": 38.1880733945,
"line_max": 140,
"alpha_frac": 0.5712279059,
"autogenerated": false,
"ratio": 4.107211538461539,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5178439444361539,
"avg_score": null,
"num_lines": null
} |
"""A script that provides custom input vectors that can be used during active
scans.
The option Enable Script Input Vectors must be enabled before starting the
scans.
Note that new scripts will initially be disabled, right click the script in the
Scripts tree and select Enable Script.
"""
import urllib
def parseParameters(helper, msg):
"""Parses/extracts the parameters from the HTTP message (request), to be
later tested by the scanners.
Args:
helper (VariantCustom): Helper class that provides functions to add new
parameters and process its values.
msg (HttpMessage): The HTTP message (request) that will be scanned.
"""
# Extract the attributes of a custom header...
header = msg.getRequestHeader().getHeader('My-Custom-Header')
if not header:
return
attributes = header.strip().split(';')
for attribute in attributes:
attribute = attribute.strip()
if not attribute:
continue
data = attribute.split('=')
name = data[0]
value = urllib.unquote(data[1])
helper.addParamHeader(name, value)
def setParameter(helper, msg, param, value, escaped):
"""Sets the new value (attack) of a parameter, called by the scanners
during the active scan.
Args:
helper (VariantCustom): Helper class that provides functions to get the
parameters and process its values.
msg (HttpMessage): The HTTP message where the value should be injected.
param (String): The name of the parameter.
value (String): The value to inject.
escaped (bool): True if the value is already escaped, False otherwise.
"""
# Rebuild the header with the attack...
header = ''
for parameter in helper.getParamList():
header += parameter.getName() + '='
if parameter.getName() == param:
header += urllib.quote(value)
else:
header += parameter.getValue()
header += '; '
msg.getRequestHeader().setHeader('My-Custom-Header', header)
| {
"repo_name": "zapbot/zap-extensions",
"path": "addOns/jython/src/main/zapHomeFiles/scripts/templates/variant/Input Vector default template.py",
"copies": "7",
"size": "2067",
"license": "apache-2.0",
"hash": 3039964650618017300,
"line_mean": 32.8852459016,
"line_max": 79,
"alpha_frac": 0.6574746009,
"autogenerated": false,
"ratio": 4.634529147982063,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8792003748882062,
"avg_score": null,
"num_lines": null
} |
"""A script that runs a fast threshold scan for different parameter (e.g. GDAC, TDACVbp) to get a threshold calibration.
To save time the PlsrDAC start position is the start position determined from the previous threshold scan. So the
scan parameter values should be chosen in a ways that the threshold increases for each step.
After the data taking the data is analyzed and the calibration is written into a h5 file.
"""
import logging
import os
import ast
from matplotlib.backends.backend_pdf import PdfPages
import tables as tb
import numpy as np
import progressbar
from pybar_fei4_interpreter import data_struct
from pybar.run_manager import RunManager
from pybar.scans.scan_threshold_fast import FastThresholdScan
from pybar.analysis import analysis_utils
from pybar.analysis.plotting.plotting import plot_three_way, plot_scurves, plot_scatter
from pybar.analysis.analyze_raw_data import AnalyzeRawData
def create_threshold_calibration(scan_base_file_name, create_plots=True): # Create calibration function, can be called stand alone
def analyze_raw_data_file(file_name):
if os.path.isfile(os.path.splitext(file_name)[0] + '_interpreted.h5'): # skip analysis if already done
logging.warning('Analyzed data file ' + file_name + ' already exists. Skip analysis for this file.')
else:
with AnalyzeRawData(raw_data_file=file_name, create_pdf=False) as analyze_raw_data:
analyze_raw_data.create_tot_hist = False
analyze_raw_data.create_tot_pixel_hist = False
analyze_raw_data.create_fitted_threshold_hists = True
analyze_raw_data.create_threshold_mask = True
analyze_raw_data.interpreter.set_warning_output(False) # RX errors would fill the console
analyze_raw_data.interpret_word_table()
def store_calibration_data_as_table(out_file_h5, mean_threshold_calibration, mean_threshold_rms_calibration, threshold_calibration, parameter_values):
logging.info("Storing calibration data in a table...")
filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False)
mean_threshold_calib_table = out_file_h5.create_table(out_file_h5.root, name='MeanThresholdCalibration', description=data_struct.MeanThresholdCalibrationTable, title='mean_threshold_calibration', filters=filter_table)
threshold_calib_table = out_file_h5.create_table(out_file_h5.root, name='ThresholdCalibration', description=data_struct.ThresholdCalibrationTable, title='threshold_calibration', filters=filter_table)
for column in range(80):
for row in range(336):
for parameter_value_index, parameter_value in enumerate(parameter_values):
threshold_calib_table.row['column'] = column
threshold_calib_table.row['row'] = row
threshold_calib_table.row['parameter_value'] = parameter_value
threshold_calib_table.row['threshold'] = threshold_calibration[column, row, parameter_value_index]
threshold_calib_table.row.append()
for parameter_value_index, parameter_value in enumerate(parameter_values):
mean_threshold_calib_table.row['parameter_value'] = parameter_value
mean_threshold_calib_table.row['mean_threshold'] = mean_threshold_calibration[parameter_value_index]
mean_threshold_calib_table.row['threshold_rms'] = mean_threshold_rms_calibration[parameter_value_index]
mean_threshold_calib_table.row.append()
threshold_calib_table.flush()
mean_threshold_calib_table.flush()
logging.info("done")
def store_calibration_data_as_array(out_file_h5, mean_threshold_calibration, mean_threshold_rms_calibration, threshold_calibration, parameter_name, parameter_values):
logging.info("Storing calibration data in an array...")
filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False)
mean_threshold_calib_array = out_file_h5.create_carray(out_file_h5.root, name='HistThresholdMeanCalibration', atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype), shape=mean_threshold_calibration.shape, title='mean_threshold_calibration', filters=filter_table)
mean_threshold_calib_rms_array = out_file_h5.create_carray(out_file_h5.root, name='HistThresholdRMSCalibration', atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype), shape=mean_threshold_calibration.shape, title='mean_threshold_rms_calibration', filters=filter_table)
threshold_calib_array = out_file_h5.create_carray(out_file_h5.root, name='HistThresholdCalibration', atom=tb.Atom.from_dtype(threshold_calibration.dtype), shape=threshold_calibration.shape, title='threshold_calibration', filters=filter_table)
mean_threshold_calib_array[:] = mean_threshold_calibration
mean_threshold_calib_rms_array[:] = mean_threshold_rms_calibration
threshold_calib_array[:] = threshold_calibration
mean_threshold_calib_array.attrs.dimensions = ['column', 'row', parameter_name]
mean_threshold_calib_rms_array.attrs.dimensions = ['column', 'row', parameter_name]
threshold_calib_array.attrs.dimensions = ['column', 'row', parameter_name]
mean_threshold_calib_array.attrs.scan_parameter_values = parameter_values
mean_threshold_calib_rms_array.attrs.scan_parameter_values = parameter_values
threshold_calib_array.attrs.scan_parameter_values = parameter_values
logging.info("done")
def mask_columns(pixel_array, ignore_columns):
idx = np.array(ignore_columns) - 1 # from FE to Array columns
m = np.zeros_like(pixel_array)
m[:, idx] = 1
return np.ma.masked_array(pixel_array, m)
raw_data_files = analysis_utils.get_data_file_names_from_scan_base(scan_base_file_name)
first_scan_base_file_name = scan_base_file_name if isinstance(scan_base_file_name, basestring) else scan_base_file_name[0] # multilpe scan_base_file_names for multiple runs
with tb.open_file(first_scan_base_file_name + '.h5', mode="r") as in_file_h5: # deduce scan parameters from the first (and often only) scan base file name
ignore_columns = in_file_h5.root.configuration.run_conf[:][np.where(in_file_h5.root.configuration.run_conf[:]['name'] == 'ignore_columns')]['value'][0]
parameter_name = in_file_h5.root.configuration.run_conf[:][np.where(in_file_h5.root.configuration.run_conf[:]['name'] == 'scan_parameters')]['value'][0]
ignore_columns = ast.literal_eval(ignore_columns)
parameter_name = ast.literal_eval(parameter_name)[1][0]
calibration_file = first_scan_base_file_name + '_calibration'
for raw_data_file in raw_data_files: # analyze each raw data file, not using multithreading here, it is already used in s-curve fit
analyze_raw_data_file(raw_data_file)
files_per_parameter = analysis_utils.get_parameter_value_from_file_names([os.path.splitext(file_name)[0] + '_interpreted.h5' for file_name in raw_data_files], parameter_name, unique=True, sort=True)
logging.info("Create calibration from data")
mean_threshold_calibration = np.empty(shape=(len(raw_data_files),), dtype='<f8')
mean_threshold_rms_calibration = np.empty(shape=(len(raw_data_files),), dtype='<f8')
threshold_calibration = np.empty(shape=(80, 336, len(raw_data_files)), dtype='<f8')
if create_plots:
logging.info('Saving calibration plots in: %s', calibration_file + '.pdf')
output_pdf = PdfPages(calibration_file + '.pdf')
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(files_per_parameter.items()), term_width=80)
progress_bar.start()
parameter_values = []
for index, (analyzed_data_file, parameters) in enumerate(files_per_parameter.items()):
parameter_values.append(parameters.values()[0][0])
with tb.open_file(analyzed_data_file, mode="r") as in_file_h5:
occupancy_masked = mask_columns(pixel_array=in_file_h5.root.HistOcc[:], ignore_columns=ignore_columns) # mask the not scanned columns for analysis and plotting
thresholds_masked = mask_columns(pixel_array=in_file_h5.root.HistThresholdFitted[:], ignore_columns=ignore_columns)
if create_plots:
plot_three_way(hist=thresholds_masked, title='Threshold Fitted for ' + parameters.keys()[0] + ' = ' + str(parameters.values()[0][0]), filename=output_pdf)
plsr_dacs = analysis_utils.get_scan_parameter(meta_data_array=in_file_h5.root.meta_data[:])['PlsrDAC']
plot_scurves(occupancy_hist=occupancy_masked, scan_parameters=plsr_dacs, scan_parameter_name='PlsrDAC', filename=output_pdf)
# fill the calibration data arrays
mean_threshold_calibration[index] = np.ma.mean(thresholds_masked)
mean_threshold_rms_calibration[index] = np.ma.std(thresholds_masked)
threshold_calibration[:, :, index] = thresholds_masked.T
progress_bar.update(index)
progress_bar.finish()
with tb.open_file(calibration_file + '.h5', mode="w") as out_file_h5:
store_calibration_data_as_array(out_file_h5=out_file_h5, mean_threshold_calibration=mean_threshold_calibration, mean_threshold_rms_calibration=mean_threshold_rms_calibration, threshold_calibration=threshold_calibration, parameter_name=parameter_name, parameter_values=parameter_values)
store_calibration_data_as_table(out_file_h5=out_file_h5, mean_threshold_calibration=mean_threshold_calibration, mean_threshold_rms_calibration=mean_threshold_rms_calibration, threshold_calibration=threshold_calibration, parameter_values=parameter_values)
if create_plots:
plot_scatter(x=parameter_values, y=mean_threshold_calibration, title='Threshold calibration', x_label=parameter_name, y_label='Mean threshold', log_x=False, filename=output_pdf)
plot_scatter(x=parameter_values, y=mean_threshold_calibration, title='Threshold calibration', x_label=parameter_name, y_label='Mean threshold', log_x=True, filename=output_pdf)
output_pdf.close()
class ThresholdCalibration(FastThresholdScan):
''' Threshold calibration scan
'''
_default_run_conf = FastThresholdScan._default_run_conf.copy()
_default_run_conf['scan_parameters'] = [('PlsrDAC', (0, None)), ('GDAC', np.unique(np.logspace(1.7, 4.0, 10).astype(np.int)).tolist())]
_default_run_conf.update({
"ignore_columns": (1, 78, 79, 80),
'reset_rx_on_error': True, # long scans have a high propability for ESD related data transmission errors; recover and continue here
"create_plots": True,
})
def scan(self):
logging.info('Taking threshold data at following ' + self.scan_parameters._fields[1] + ' values: %s', str(self.scan_parameters[1]))
for index, parameter_value in enumerate(self.scan_parameters[1]):
if self.scan_parameters._fields[1] == 'GDAC': # if scan parameter = GDAC needs special registers set function
self.register_utils.set_gdac(parameter_value)
else:
self.register.set_global_register_value(self.scan_parameters._fields[1], parameter_value)
if index == 0:
actual_scan_parameters = {'PlsrDAC': self.scan_parameters.PlsrDAC, self.scan_parameters._fields[1]: parameter_value}
else:
self.minimum_data_points = self.data_points # Take settings from last fast threshold scan for speed up
actual_scan_parameters = {'PlsrDAC': (self.scan_parameter_start, None), self.scan_parameters._fields[1]: parameter_value} # Start the PlsrDAC at last start point to save time
self.set_scan_parameters(**actual_scan_parameters)
super(ThresholdCalibration, self).scan()
logging.info("Finished!")
def handle_data(self, data, new_file=['GDAC'], flush=True): # Create new file for each scan parameter change
super(ThresholdCalibration, self).handle_data(data=data, new_file=new_file, flush=flush)
def analyze(self):
create_threshold_calibration(self.output_filename, create_plots=self.create_plots)
if __name__ == "__main__":
with RunManager('configuration.yaml') as runmngr:
runmngr.run_run(ThresholdCalibration)
| {
"repo_name": "SiLab-Bonn/pyBAR",
"path": "pybar/scans/calibrate_threshold.py",
"copies": "1",
"size": "12584",
"license": "bsd-3-clause",
"hash": -4561077390620466000,
"line_mean": 69.5,
"line_max": 293,
"alpha_frac": 0.6952479339,
"autogenerated": false,
"ratio": 3.712094395280236,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9892251284076019,
"avg_score": 0.0030182090208432755,
"num_lines": 176
} |
""" A script that send an SMS alert about a predefined TimeTabel"""
# imports
from twilio.rest import TwilioRestClient
from datetime import datetime
import time
# Day defeniton
# fill your time and class according to the template below
# day = {Time:class}
# Time = float, class = String
mon = {}
tue = {}
wed = {}
thu = {}
fri = {}
sat = {}
# eg: mon = {9.00:'Drink tea',9:10:'@OrangeChaten, weebhook works',10:00:"Shit on @itsharrus code even though I've never come close to finishing a project in time"}
# week, a dictionary holding the values weekDay and day
# weekDay = day number indexed from Zero, day = dictionary defined above
week = {0:mon,1:tue,2:wed,3:thu,4:fri,5:sat}
# TwilioRestClient defeniton
# Put your Credentials here
# Go to "https://www.twilio.com/console/" to get your ACCOUNT_SID and AUTH_TOKEN
ACCOUNT_SID = "YOUR ACCOUNT_SID"
AUTH_TOKEN = "YOUR AUTH_TOKEN"
myNumber = 'YOUR PHONE NUMBER'
twNumner = 'TWILIO PHONE NUMBER'
# initialise clinet
client = TwilioRestClient(ACCOUNT_SID, AUTH_TOKEN)
# function to send an sms
# refer "https://www.twilio.com/docs/api?filter-product=sms&filter-platform=mobile" for more details
def sms_twilio(msg='Test sucesses'):
client.messages.create(
to=myNumber,
from_=twNumner,
body=msg,
)
# event loop
while True:
# get current current weekDay and Time
weekDay = int(datetime.now().strftime('%d'))
Time = float(datetime.now().strftime('%H.%M'))
# try sending a message unless a KeyError occurs
# i.e only send a message if the time a key in the dictionary
try:
sendMessage = 'You have (to)'+week[weekDay][Time]
sms_twilio(sendMessage)
except KeyError:
pass
# sleep for a minute , so that when the test case mathches, it sends only "one" message
time.sleep(60)
# a delta of about 10 seconds will be present (from actual time) depending on when you run the script,
# so make that your times are a few minutes ahed of schedule.
| {
"repo_name": "Mik-the-koder/timeTable",
"path": "bla.py",
"copies": "1",
"size": "2035",
"license": "mit",
"hash": 99200525214457840,
"line_mean": 31.9166666667,
"line_max": 165,
"alpha_frac": 0.6820638821,
"autogenerated": false,
"ratio": 3.1550387596899223,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4337102641789922,
"avg_score": null,
"num_lines": null
} |
"""A Script that tags your movie files.
Run the script in a folder containing the mp4/mkv movie files with their
filename as the movie's title.
This script might seem a little messy and ugly and I know maybe there is
better and effecient way to do some of the tasks.
but I am unaware of them at the moment and am a begginer in Python and
this is my first, or maybe second python script.
"""
import os
import subprocess
import urllib
import shlex
import linecache
import sys
from json import JSONDecoder
import tmdbsimple as tmdb
from imdbpie import Imdb
from mutagen.mp4 import MP4, MP4Cover
# The following subtitle codecs are ingored if found in the file as they are
# not supported by the mp4 container. These are mainly picture-based subtitles
sub_codec_blacklist = ("dvdsub", "dvd_subtitle", "pgssub", "hdmv_pgs_subtitle")
def collect_stream_metadata(filename):
"""
Returns a list of streams' metadata present in the media file passed as
the argument (filename)
"""
command = 'ffprobe -i "{}" -show_streams -of json'.format(filename)
args = shlex.split(command)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
json_data = JSONDecoder().decode(out)
return json_data
def PrintException():
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
fname = f.f_code.co_filename
linecache.checkcache(fname)
line = linecache.getline(fname, lineno, f.f_globals)
print ('\nEXCEPTION IN ({}, LINE {} "{}"): {}'.format(fname,
lineno,
line.strip(),
exc_obj))
# Setting the API key for usage of TMDB API
tmdb.API_KEY = 'b888b64c9155c26ade5659ea4dd60e64'
def collect_files(file_type):
"""
returns a list of files in the current directory that are of
the extension passed as string\n
eg: collect_files('txt') would return a list of all txt files
"""
filenames = []
for filename in os.listdir(os.getcwd()):
if filename.endswith(file_type):
filenames.append(filename)
return filenames
def get_common_files(mediafile_list, srtfile_list):
"""
returns a list of filenames that are common in mediafile_list and
strfile_list. \n
While getting common filenames it ignores the extension.\n
Also the returned list will have the same file extension the mediafile_list
files have
"""
media_filenames = [i[:-4] for i in mediafile_list]
subtitle_filenames = [i[:-4] for i in srtfile_list]
media_type = mediafile_list[0][-4:]
media_set = set(media_filenames)
srt_set = set(subtitle_filenames)
common_files = list(media_set & srt_set)
common_files = [i + media_type for i in common_files]
common_files.sort()
return common_files
def remove_common_files(list1, list2):
"""
returns a subset of list1 that has common elements removed that were found
in both lists
or in other words - returns a subset of list1 that has elements unique to
only list1
"""
# results in a list of values that are unique to list1
new_list1 = list(set(list1) - set(list2))
new_list1.sort()
return new_list1
def start_process(filenames, mode):
"""
This is the main funtion of the script
where it does its main processing.\n
filenames is the list of files to be processed\n
mode = 1,2,3 or 4\n
1 means mp4 to tagged mp4\n
2 means mp4 with sub to subbed and tagged mp4\n
3 means mkv to tagged mp4\n
4 means mkv with sub to subbed and tagged mp4
"""
for filename in filenames:
try:
title = filename[:-4]
stream_md = collect_stream_metadata(filename)
streams_to_process = []
dvdsub_exists=False
for stream in stream_md['streams']:
if not stream['codec_name'] in sub_codec_blacklist:
streams_to_process.append(stream['index'])
else:
dvdsub_exists=True
print('\nSearching IMDb for "{}"'.format(title))
imdb = Imdb()
movie_results = []
results = imdb.search_for_title(title)
for result in results:
if result['type'] == "feature":
movie_results.append(result)
if not movie_results:
while not movie_results:
title = input('\nNo results for "' + title +
'" Enter alternate/correct movie title >> ')
results = imdb.search_for_title(title)
for result in results:
if result['type'] == "feature":
movie_results.append(result)
# The most prominent result is the first one
# mpr - Most Prominent Result
mpr = movie_results[0]
print('\nFetching data for {} ({})'.format(mpr['title'],
mpr['year']))
# imdb_movie is a dict of info about the movie
imdb_movie = imdb.get_title(mpr['imdb_id'])
imdb_movie_title = imdb_movie['base']['title']
imdb_movie_year = imdb_movie['base']['year']
imdb_movie_id = mpr['imdb_id']
imdb_movie_rating = imdb_movie['ratings']['rating']
if not 'outline' in imdb_movie['plot']:
imdb_movie_plot_outline = (imdb_movie['plot']['summaries'][0]
['text'])
print("\nPlot outline does not exist. Fetching plot summary "
"instead.\n\n")
else:
imdb_movie_plot_outline = imdb_movie['plot']['outline']['text']
# Composing a string to have the rating and the plot of the
# movie which will go into the 'comment' metadata of the
# mp4 file.
imdb_rating_and_plot = str('IMDb rating ['
+ str(float(imdb_movie_rating))
+ '/10] - '
+ imdb_movie_plot_outline)
imdb_movie_genres = imdb.get_title_genres(imdb_movie_id)['genres']
# Composing the 'genre' string of the movie.
# I use ';' as a delimeter to searate the multiple genre values
genre = ';'.join(imdb_movie_genres)
newfilename = (imdb_movie_title
+ ' ('
+ str(imdb_movie_year)
+ ').mp4')
# We don't want the characters not allowed in a filename
newfilename = (newfilename
.replace(':', ' -')
.replace('/', ' ')
.replace('?', ''))
command = ""
stream_map = []
for f in streams_to_process:
stream_map.append("-map 0:{}".format(f))
stream_map_str = ' '.join(stream_map)
if mode == 1:
# it is required to rename it as its already an mp4 file that
# wasn't proccessed by ffmpeg
os.rename(filename, newfilename)
if mode == 2 or mode == 4:
command = ('ffmpeg -i "'
+ filename
+ '" -sub_charenc UTF-8 -i "'
+ filename[:-4]
+ '.srt" '
+ stream_map_str
+ ' -map 1 -c copy -c:s mov_text '
'"' + newfilename + '"')
subprocess.run(shlex.split(command))
if mode == 3:
command = ('ffmpeg -i '
+ '"' + filename + '" '
+ stream_map_str
+ ' -c copy -c:s mov_text '
'"' + newfilename + '"')
subprocess.run(shlex.split(command))
if dvdsub_exists:
print("\nRemoved DVD Subtitles due to uncompatibility with "
"mp4 file format")
# The poster is fetched from tmdb only if there is no file
# named " filename + '.jpg' " in the working directory
# this way user can provide their own poster image to be used
poster_filename = filename[:-4] + '.jpg'
if not os.path.isfile(poster_filename):
print('\nFetching the movie poster...')
tmdb_find = tmdb.Find(imdb_movie_id)
tmdb_find.info(external_source = 'imdb_id')
path = tmdb_find.movie_results[0]['poster_path']
complete_path = r'https://image.tmdb.org/t/p/w780' + path
uo = urllib.request.urlopen(complete_path)
with open(poster_filename, "wb") as poster_file:
poster_file.write(uo.read())
poster_file.close()
video = MP4(newfilename)
with open(poster_filename, "rb") as f:
video["covr"] = [MP4Cover(
f.read(),
imageformat=MP4Cover.FORMAT_JPEG)]
video['\xa9day'] = str(imdb_movie_year)
video['\xa9nam'] = imdb_movie_title
video['\xa9cmt'] = imdb_rating_and_plot
video['\xa9gen'] = genre
print('\nAdding poster and tagging file...')
try:
video.save()
# I have encounterd this error in pevious version
# of script, now I handle it by removing the metadata
# of the file. That seems to solve the probelem
except OverflowError:
remove_meta_command = ('ffmpeg -i "' + newfilename
+ '" -codec copy -map_metadata -1 "'
+ newfilename[:-4] + 'new.mp4"')
subprocess.run(shlex.split(remove_meta_command))
video_new = MP4(newfilename[:-4] + 'new.mp4')
with open(poster_filename, "rb") as f:
video_new["covr"] = [MP4Cover(
f.read(),
imageformat=MP4Cover.FORMAT_JPEG)]
video_new['\xa9day'] = str(imdb_movie_year)
video_new['\xa9nam'] = imdb_movie_title
video_new['\xa9cmt'] = imdb_rating_and_plot
video_new['\xa9gen'] = genre
print('\nAdding poster and tagging file...')
try:
video_new.save()
if not os.path.exists('auto fixed files'):
os.makedirs('auto fixed files')
os.rename(newfilename[:-4]
+ 'new.mp4', 'auto fixed files\\'
+ newfilename[:-4] + '.mp4')
os.remove(newfilename)
except OverflowError:
errored_files.append(filename
+ (' - Could not save even after'
'striping metadata'))
continue
os.remove(poster_filename)
print('\n' + filename
+ (' was proccesed successfuly!\n\n===================='
'======================================'))
except Exception as e:
print('\nSome error occured while processing '
+ filename
+ '\n\n====================================================')
errored_files.append(filename + ' - ' + str(e))
PrintException()
mp4_filenames = []
mkv_filenames = []
srt_filenames = []
mp4_with_srt_filenames = []
mkv_with_srt_filenames = []
errored_files = []
mp4_filenames = collect_files('mp4')
mkv_filenames = collect_files('mkv')
srt_filenames = collect_files('srt')
# We check whether there are mp4 files and if yes, are there any
# srt files? if yes, then get the mp4 files that have srts associated with them
# then if there are mp4 files that have srt files associated with them then
# remove the others as they are to be proccessed separately
if not len(mp4_filenames) == 0:
if not len(srt_filenames) == 0:
mp4_with_srt_filenames = get_common_files(mp4_filenames,
srt_filenames)
if not len(mp4_with_srt_filenames) == 0:
mp4_filenames = remove_common_files(mp4_filenames,
mp4_with_srt_filenames)
if not len(mkv_filenames) == 0:
if not len(srt_filenames) == 0:
mkv_with_srt_filenames = get_common_files(mkv_filenames, srt_filenames)
if not len(mkv_with_srt_filenames) == 0:
mkv_filenames = remove_common_files(mkv_filenames,
mkv_with_srt_filenames)
# This is where the main process of conversion takes place.
# We simply check the file lists are not empty and then execute the main task
# depending on what type it is according to mode in the funtion "start_process"
if not len(mp4_filenames) == 0:
start_process(mp4_filenames, 1)
if not len(mp4_with_srt_filenames) == 0:
start_process(mp4_with_srt_filenames, 2)
if not len(mkv_filenames) == 0:
start_process(mkv_filenames, 3)
if not len(mkv_with_srt_filenames) == 0:
start_process(mkv_with_srt_filenames, 4)
if (len(mp4_filenames) == 0 and len(mkv_filenames) == 0
and len(mp4_with_srt_filenames) == 0
and len(mkv_with_srt_filenames) == 0):
print('There were no MP4 or MKV files found in the directory')
else:
# Checks if there were any files that caused the Overflow Error,
# if yes then prints them out.
if len(errored_files) == 0:
print('\n\n\nAll files proccessed successfuly!')
else:
print('\n\n\nThe files that were not proccessed: \n')
for er in errored_files:
print(er)
| {
"repo_name": "prithugoswami/auto-movie-tagger",
"path": "amt.py",
"copies": "1",
"size": "14949",
"license": "mit",
"hash": -3343518404846245000,
"line_mean": 39.185483871,
"line_max": 79,
"alpha_frac": 0.5053849756,
"autogenerated": false,
"ratio": 4.373610298420129,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5378995274020129,
"avg_score": null,
"num_lines": null
} |
"""A script that takes external trigger scan data where the TDC + TDC time stamp were activated and creates
time walk plots from the data.
"""
import logging
from matplotlib import pyplot as plt
from matplotlib import cm
import tables as tb
import numpy as np
from scipy.interpolate import interp1d
import progressbar
from pybar.analysis import analysis_utils
def plsr_dac_to_charge(plsr_dac):
return 72.16 * plsr_dac + 2777.63
def get_charge(max_tdc, tdc_calibration_values, tdc_pixel_calibration): # Return the charge from calibration
''' Interpolatet the TDC calibration for each pixel from 0 to max_tdc'''
charge_calibration = np.zeros(shape=(80, 336, max_tdc))
for column in range(80):
for row in range(336):
actual_pixel_calibration = tdc_pixel_calibration[column, row, :]
if np.any(actual_pixel_calibration != 0) and np.any(np.isfinite(actual_pixel_calibration)):
selected_measurements = np.isfinite(actual_pixel_calibration) # Select valid calibration steps
selected_actual_pixel_calibration = actual_pixel_calibration[selected_measurements]
selected_tdc_calibration_values = tdc_calibration_values[selected_measurements]
interpolation = interp1d(x=selected_actual_pixel_calibration, y=selected_tdc_calibration_values, kind='slinear', bounds_error=False, fill_value=0)
charge_calibration[column, row, :] = interpolation(np.arange(max_tdc))
return charge_calibration
def get_charge_calibration(calibation_file, max_tdc):
''' Open the hit or calibration file and return the calibration per pixel'''
with tb.open_file(calibation_file, mode="r") as in_file_calibration_h5:
tdc_calibration = in_file_calibration_h5.root.HitOrCalibration[:, :, :, 1]
tdc_calibration_values = in_file_calibration_h5.root.HitOrCalibration.attrs.scan_parameter_values[:]
return get_charge(max_tdc, tdc_calibration_values, tdc_calibration)
def get_time_walk_hist(hit_file, charge_calibration, event_status_select_mask, event_status_condition, hit_selection_conditions, max_timesamp, max_tdc, max_charge):
with tb.open_file(hit_file, 'r') as in_file_h5:
cluster_hit_table = in_file_h5.root.ClusterHits
logging.info('Select hits and create TDC histograms for %d cut conditions', len(hit_selection_conditions))
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=cluster_hit_table.shape[0], term_width=80)
progress_bar.start()
n_hits, n_selected_hits = 0, 0
timewalk = np.zeros(shape=(200, max_timesamp), dtype=np.float32)
for cluster_hits, _ in analysis_utils.data_aligned_at_events(cluster_hit_table, chunk_size=10000000):
n_hits += cluster_hits.shape[0]
selected_events_cluster_hits = cluster_hits[np.logical_and(cluster_hits['TDC'] < max_tdc, (cluster_hits['event_status'] & event_status_select_mask) == event_status_condition)]
for _, condition in enumerate(hit_selection_conditions):
selected_cluster_hits = analysis_utils.select_hits(selected_events_cluster_hits, condition)
n_selected_hits += selected_cluster_hits.shape[0]
column_index, row_index, tdc, tdc_timestamp = selected_cluster_hits['column'] - 1, selected_cluster_hits['row'] - 1, selected_cluster_hits['TDC'], selected_cluster_hits['TDC_time_stamp']
# Charge values for each Col/Row/TDC tuple from per pixel charge calibration
# and PlsrDAC calibration in electrons
charge_values = plsr_dac_to_charge(charge_calibration[column_index, row_index, tdc]).astype(np.float32)
actual_timewalk, xedges, yedges = np.histogram2d(charge_values, tdc_timestamp, bins=timewalk.shape, range=((0, max_charge), (0, max_timesamp)))
timewalk += actual_timewalk
progress_bar.update(n_hits)
progress_bar.finish()
logging.info('Selected %d of %d hits = %1.1f percent', n_selected_hits, n_hits, float(n_selected_hits) / float(n_hits) * 100.0)
return timewalk, xedges, yedges
def plot_timewalk(hist, xedges, yedges, title, max_charge, max_time_walk=50):
yedges *= 1.5625 # One TDC time stamp are 1/640 MHZ = 1.5625 ns
timewalks = (yedges[0:-1] + yedges[1:]) / 2.
charges = (xedges[0:-1] + xedges[1:]) / 2.
def get_mean_from_histogram(counts, bin_positions):
return np.dot(counts, np.array(bin_positions)) / np.sum(counts).astype('f4')
# Rebin for more smooth time walk means
cmap = cm.get_cmap('jet')
cmap.set_bad('w', 1.0)
hist = np.ma.masked_where(hist == 0, hist)
mean, std = [], []
for one_slice in hist: # Clearly not the fastest way to calc mean + RMS from a 2D array, but one can understand it...
mean.append(np.dot(one_slice, timewalks) / np.sum(one_slice))
try:
std.append(np.ma.std(np.ma.repeat(timewalks, one_slice, axis=0)))
except TypeError:
std.append(-1)
mean, std = np.array(mean), np.array(std)
mean = np.ma.masked_invalid(mean)
std = np.ma.array(std, mask=mean.mask)
# Time walk is relative, define lowest timewalk as
# minimum mean time walk + 2 RMS of time walk spread of the pixels (here mean of 50 highest charge bins)
zero_timewalk = np.ma.min(mean[-50:]) + 1 * np.ma.mean(std[-50:])
percentages = [] # Percentages of hits for 5/10/15/... ns timewalk
n_hits = np.ma.sum(hist)
for timewalk_bin in range(0, max_time_walk + 1, 5):
percentages.append(np.round(float(np.ma.sum(hist[:, np.where(timewalks <= zero_timewalk + timewalk_bin)])) / n_hits * 100.0, 1))
mean -= zero_timewalk # Time walk is relative
plt.plot(charges, mean, '-', label='Mean')
plt.yticks(np.arange(0, max_time_walk + 1, 5)) # One tick evety 5 ns
plt.fill_between(charges, mean - np.array(std), mean + np.array(std), color='gray', alpha=0.5, facecolor='gray', label='RMS')
plt.ylim((0, max_time_walk))
plt.xlim((0, max_charge))
plt.title(title)
plt.xlabel('Charge [e]')
plt.ylabel('Time walk per %1.1f electrons [ns]' % (charges[1] - charges[0]))
plt.legend(loc=0)
plt.grid()
ax2 = plt.gca().twinx()
ax2.set_yticks(np.arange(0, max_time_walk + 1, 5)) # One tick evety 5 ns
ax2.set_xlim((0, max_charge))
ax2.set_ylim((0, max_time_walk))
ax2.set_yticklabels(percentages)
ax2.set_ylabel('Pixel hits up to corresponding time walk [%]')
ax2.plot()
plt.show()
if __name__ == '__main__':
# TDC scan data and hit or calibration file
hit_file = r'15_cmos_passive_1_ext_trigger_scan_interpreted.h5'
calibation_file = r'14_cmos_passive_1_hit_or_calibration_calibration.h5'
# Select pixel and TDC region
col_span = [1, 80] # Pixel column range to use for time walk analysis
row_span = [1, 336] # Pixel row range to use for time walk analysis
max_tdc = 500
max_timesamp = 2000
# Event and hit cuts
event_status_select_mask = 0b0000111111111111, # the event status bits to cut on
event_status_condition = 0b0000000100000000
hit_selection = '(column > %d) & (column < %d) & (row > %d) & (row < %d)' % (col_span[0] + 1, col_span[1] - 1, row_span[0] + 5, row_span[1] - 5) # deselect edge pixels for better cluster size cut
hit_selection_conditions = ['(n_cluster==1) & (cluster_size == 1) & (relative_BCID >= 1) & (relative_BCID <= 3) & ((tot > 12) | ((TDC * 1.5625 - tot * 25 < 100) & (tot * 25 - TDC * 1.5625 < 100))) & %s' % hit_selection]
# Create charge calibration from hit or calibration
charge_calibration = get_charge_calibration(calibation_file, max_tdc)
max_charge = plsr_dac_to_charge(np.amax(charge_calibration)) # Correspond to max TDC, just needed for plotting
# Create and plot time walk histogram
timewalk_hist, xedges, yedges = get_time_walk_hist(hit_file,
charge_calibration,
event_status_select_mask,
event_status_condition,
hit_selection_conditions,
max_timesamp,
max_tdc,
max_charge)
plot_timewalk(timewalk_hist, xedges, yedges, title='Time walk', max_charge=max_charge)
| {
"repo_name": "SiLab-Bonn/pyBAR",
"path": "pybar/scans/analyze_timewalk.py",
"copies": "1",
"size": "8651",
"license": "bsd-3-clause",
"hash": -5986124416934125000,
"line_mean": 52.0736196319,
"line_max": 223,
"alpha_frac": 0.6369205872,
"autogenerated": false,
"ratio": 3.4166666666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45535872538666666,
"avg_score": null,
"num_lines": null
} |
"""A script to apply a page parser to an HTML file.
Used for easier debugging and visual checks."""
import argparse
from jobtechs.parser import NETLOC_TO_PARSER_MAP, TermsExtractor
def main():
# pylint: disable=missing-docstring
parser = argparse.ArgumentParser()
parser.add_argument('parser_netloc', choices=list(NETLOC_TO_PARSER_MAP.keys()))
parser.add_argument(
'infile', type=argparse.FileType('r'),
help='An HTML-file we are trying to apply the parser to.')
parser.add_argument(
'--techs-file', default='techs.txt',
help='A file where the searched techs are listed: each tech on a separate line.')
args = parser.parse_args()
page_parser = NETLOC_TO_PARSER_MAP[args.parser_netloc]()
text = args.infile.read()
extractor = TermsExtractor(args.techs_file)
url = ''
res, err = page_parser.parse_page(url, text, extractor)
if err:
print(err)
else:
print(res.url)
print(res.company)
print(*res.techs, sep=', ')
print(res.site)
if __name__ == '__main__':
main()
| {
"repo_name": "newtover/process_jobs",
"path": "jobtechs/scripts/apply_parser.py",
"copies": "1",
"size": "1093",
"license": "mit",
"hash": 8862534204941144000,
"line_mean": 30.2285714286,
"line_max": 89,
"alpha_frac": 0.6431838975,
"autogenerated": false,
"ratio": 3.607260726072607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4750444623572607,
"avg_score": null,
"num_lines": null
} |
""" A script to build specific fasta databases """
import sys
import logging
#===================================== Iterator ===============================
class Sequence:
''' Holds protein sequence information '''
def __init__(self):
self.header = ""
self.sequence = ""
class FASTAReader:
"""
FASTA db iterator. Returns a single FASTA sequence object.
"""
def __init__(self, fasta_name):
self.fasta_file = open(fasta_name)
def __iter__(self):
return self
def next(self):
''' Iteration '''
while True:
line = self.fasta_file.readline()
if not line:
raise StopIteration
if line[0] == '>':
break
seq = Sequence()
seq.header = line.rstrip().replace('\n','').replace('\r','')
while True:
tail = self.fasta_file.tell()
line = self.fasta_file.readline()
if not line:
break
if line[0] == '>':
self.fasta_file.seek(tail)
break
seq.sequence = seq.sequence + line.rstrip().replace('\n','').replace('\r','')
return seq
#==============================================================================
def target_match(target, search_entry):
''' Matches '''
for atarget in target:
if search_entry.upper().find(atarget.upper()) > -1:
return atarget
return None
def main():
''' the main function'''
logging.basicConfig(filename='filter_fasta_log',
level=logging.INFO,
format='%(asctime)s :: %(levelname)s :: %(message)s',)
used_sequences = set()
work_summary = {'wanted': 0, 'found':0, 'duplicates':0}
targets = []
f_target = open(sys.argv[1])
for line in f_target.readlines():
targets.append(line.strip())
f_target.close()
logging.info('Read target file and am now looking for %d %s', len(targets), 'sequences.')
work_summary['wanted'] = len(targets)
homd_db = FASTAReader(sys.argv[2])
output = open(sys.argv[3], "w")
try:
for entry in homd_db:
target_matched_results = target_match(targets, entry.header)
if target_matched_results:
work_summary['found'] += 1
targets.remove(target_matched_results)
if entry.sequence in used_sequences:
work_summary['duplicates'] += 1
else:
used_sequences.add(entry.sequence)
output.write(entry.header)
output.write('\n')
output.write(entry.sequence)
output.write('\n')
finally:
output.close()
logging.info('Completed filtering')
for parm, count in work_summary.iteritems():
logging.info('%s ==> %d', parm, count)
if __name__ == "__main__":
main()
| {
"repo_name": "jmchilton/TINT",
"path": "projects/TropixGalaxy/resources/client/edu/umn/msi/tropix/galaxy/client/proteomics/filter_by_an_id.py",
"copies": "1",
"size": "2992",
"license": "epl-1.0",
"hash": 7950386859765457000,
"line_mean": 29.8453608247,
"line_max": 99,
"alpha_frac": 0.4943181818,
"autogenerated": false,
"ratio": 4.3615160349854225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.013815364460755953,
"num_lines": 97
} |
"""A script to calculate monasca data size. CAUTION: It is very time consuming in a real environment!"""
import datetime
import os
from monascaclient import client
from monascaclient import ksclient
auth_url = os.environ.get('OS_AUTH_URL')
username = os.environ.get('OS_USERNAME')
password = os.environ.get('OS_PASSWORD')
project_name = os.environ.get('OS_PROJECT_NAME')
def mon_client():
kwargs = {
'username': username,
'password': password,
'auth_url': auth_url,
'project_name' : project_name,
'endpoint_type': 'adminURL' # publicURL is default bu may require https and slower than adminURL
}
_ksclient = ksclient.KSClient(**kwargs)
kwargs = {'token': _ksclient.token}
return client.Client('2_0', _ksclient.monasca_url, **kwargs)
m = mon_client().metrics
unique_metrics = {}
for metric in m.list_names():
unique_metrics[metric['name']] = 1
start = datetime.datetime.now() - datetime.timedelta(days=60)
aggregate = {}
for metric in sorted(unique_metrics.keys()):
print metric
statistics = m.list_statistics(name=metric,
merge_metrics="True",
statistics="count",
period="1000000000",
start_time=start.isoformat())
for stat in statistics:
key = stat['name']
count = stat['statistics'][0][1]
print " {} -> {}".format(key, count)
if key in aggregate:
aggregate[key] += count
else:
aggregate[key] = count
print "total measurements = {}".format(sum(aggregate.values()))
metric_count = 0
for metric in sorted(unique_metrics.keys()):
metric_count += len(m.list(name=metric))
print "total metrics =", metric_count
| {
"repo_name": "zqfan/openstack",
"path": "monasca/monasca_data_size.py",
"copies": "1",
"size": "1805",
"license": "apache-2.0",
"hash": -7113312868030885000,
"line_mean": 30.6666666667,
"line_max": 105,
"alpha_frac": 0.6088642659,
"autogenerated": false,
"ratio": 3.8322717622080678,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9933715080965173,
"avg_score": 0.0014841894285787035,
"num_lines": 57
} |
"""A script to calculate TG-51 dose using pylinac classes and following the TG-51 photon form"""
from pylinac.calibration import tg51
ENERGY = 6
TEMP = 22.1
PRESS = tg51.mmHg2kPa(755.0)
CHAMBER = '30013' # PTW
P_ELEC = 1.000
ND_w = 5.443 # Gy/nC
MU = 200
CLINICAL_PDD = 66.5
tg51_6x = tg51.TG51Photon(
unit='TrueBeam1',
chamber=CHAMBER,
temp=TEMP, press=PRESS,
n_dw=ND_w, p_elec=P_ELEC,
measured_pdd10=66.4, lead_foil=None,
clinical_pdd10=66.5, energy=ENERGY,
voltage_reference=-300, voltage_reduced=-150,
m_reference=(25.65, 25.66, 25.65),
m_opposite=(25.64, 25.65, 25.65),
m_reduced=(25.64, 25.63, 25.63),
mu=MU, tissue_correction=1.0
)
# Done!
print(tg51_6x.dose_mu_dmax)
# examine other parameters
print(tg51_6x.pddx)
print(tg51_6x.kq)
print(tg51_6x.p_ion)
# change readings if you adjust output
tg51_6x.m_reference_adjusted = (25.44, 25.44, 25.43)
# print new dose value
print(tg51_6x.dose_mu_dmax_adjusted)
# generate a PDF for record-keeping
tg51_6x.publish_pdf('TB1 6MV TG-51.pdf', notes=['My notes', 'I used Pylinac to do this; so easy!'], open_file=False)
| {
"repo_name": "jrkerns/pylinac",
"path": "docs/source/code_snippets/tg51_class.py",
"copies": "1",
"size": "1119",
"license": "mit",
"hash": 8672360867416695000,
"line_mean": 25.6428571429,
"line_max": 116,
"alpha_frac": 0.6827524576,
"autogenerated": false,
"ratio": 2.293032786885246,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3475785244485246,
"avg_score": null,
"num_lines": null
} |
"""A script to calculate TG-51 dose using pylinac functions and following the TG-51 photon form"""
from pylinac.calibration import tg51
ENERGY = 6
TEMP = 22.1
PRESS = tg51.mmHg2kPa(755.0)
CHAMBER = '30013' # PTW
P_ELEC = 1.000
ND_w = 5.443 # Gy/nC
MU = 200
CLINICAL_PDD = 66.5
# Section 4 (beam quality)
# since energy is 6MV, PDDx == PDD, but we'll run it through anyway just for show
pdd10x = tg51.pddx(pdd=66.4, energy=ENERGY)
# Section 5 (kQ)
kq = tg51.kq_photon_pddx(chamber=CHAMBER, pddx=pdd10x)
# Alternatively, get kQ from TPR (way quicker to measure, without needing to measure TPR!)
tpr = tg51.tpr2010_from_pdd2010(pdd2010=(38.0/66.4))
kq = tg51.kq_photon_tpr(chamber=CHAMBER, tpr=tpr)
# Section 6 (Temp/Press)
p_tp = tg51.p_tp(temp=TEMP, press=PRESS)
# Section 7 (polarity)
m_reference = (25.66, 25.67, 25.66)
m_opposite = (25.67, 25.67, 25.68)
p_pol = tg51.p_pol(m_reference=m_reference, m_opposite=m_opposite)
# Section 8 (ionization)
m_reduced = (25.61, 25.62)
p_ion = tg51.p_ion(voltage_reference=300, voltage_reduced=150, m_reference=m_reference, m_reduced=m_reduced)
# Section 9 (M corrected)
m_corr = tg51.m_corrected(p_ion=p_ion, p_tp=p_tp, p_elec=P_ELEC, p_pol=p_pol, m_reference=m_reference)
# Section 10 (dose to water @ 10cm)
dose_10 = m_corr*kq*ND_w
dose_10_per_mu = dose_10 / MU
# Section 11 (dose/MU to water @ dmax)
dose_ddmax = dose_10_per_mu / CLINICAL_PDD
# Done!
print(dose_ddmax)
| {
"repo_name": "jrkerns/pylinac",
"path": "docs/source/code_snippets/tg51_function.py",
"copies": "1",
"size": "1425",
"license": "mit",
"hash": -2801244462761614000,
"line_mean": 29.3191489362,
"line_max": 108,
"alpha_frac": 0.7010526316,
"autogenerated": false,
"ratio": 2.2161741835147746,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.34172268151147744,
"avg_score": null,
"num_lines": null
} |
"""A script to calculate TRS-398 dose using pylinac classes and following the TRS-398 photon form"""
from pylinac.calibration import trs398
ENERGY = 6
TEMP = 22.1
PRESS = trs398.mmHg2kPa(755.0)
CHAMBER = '30013' # PTW
K_ELEC = 1.000
ND_w = 5.443 # Gy/nC
MU = 200
CLINICAL_PDD = 66.5
trs398_6x = trs398.TRS398Photon(
unit='TrueBeam1',
setup='SSD',
chamber=CHAMBER,
temp=TEMP, press=PRESS,
n_dw=ND_w,
clinical_pdd_zref=CLINICAL_PDD,
tpr2010=(38.2/66.6),
energy=ENERGY,
fff=False,
k_elec=K_ELEC,
voltage_reference=-300, voltage_reduced=-150,
m_reference=(25.65, 25.66, 25.65),
m_opposite=(25.64, 25.65, 25.65),
m_reduced=(25.64, 25.63, 25.63),
mu=MU, tissue_correction=1.0
)
# Done!
print(trs398_6x.dose_mu_zmax)
# examine other parameters
print(trs398_6x.kq)
print(trs398_6x.k_s)
print(trs398_6x.k_tp)
# change readings if you adjust output
trs398_6x.m_reference_adjusted = (25.44, 25.44, 25.43)
# print new dose value
print(trs398_6x.dose_mu_zmax_adjusted)
# generate a PDF for record-keeping
trs398_6x.publish_pdf('TB1 6MV TRS-398.pdf', notes=['My notes', 'I used Pylinac to do this; so easy!'], open_file=False)
| {
"repo_name": "jrkerns/pylinac",
"path": "docs/source/code_snippets/trs398_class.py",
"copies": "1",
"size": "1182",
"license": "mit",
"hash": -2684705553313882000,
"line_mean": 24.6956521739,
"line_max": 120,
"alpha_frac": 0.6785109983,
"autogenerated": false,
"ratio": 2.299610894941634,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8476147387490665,
"avg_score": 0.00039490115019407085,
"num_lines": 46
} |
''' A script to change some 'c' code style and errors.
I should refactor this :\ '''
import os,sys
types = ["void", "int", "unsigned int", "long int", "char", "unsigned char", "float", "double"]
tokens = []
white_space = []
# Rules
#----------------------
def fflush_before_scanf(i,elements):
return ( i+1 < len(elements) and "scanf" in elements[i+1] and "fflush(stdin)" in elements[i] )
def is_fflush_stdin(element):
return ( "fflush(stdin)" in element )
def scanf_str_has_ampersand(element):
return( "scanf" in element and ("%s" in element) and ("&" in element) )
def main_not_specified_type(i,elements):
if(i>0 and "main" in elements[i]):
for t in types:
if( t in elements[i-1] or t + " main" in elements[i] ):
return 0 #Main has type and its ok
return( 1 ) #Fix main, type missing, add return 0 too
return 0
#---------------------------
def replace(line_set):
tokenize(line_set)
i=0
for i,token in enumerate(tokens):
if(fflush_before_scanf(i,tokens)):
del tokens[i] #fflush(stdin)
tokens.insert( i+1, "getchar();" )
continue
elif(is_fflush_stdin(token)):
del tokens[i]
tokens.insert( i, "getchar();" )
continue
if(scanf_str_has_ampersand(token)):
replace = token.replace( "&", "" )
del tokens[i]
tokens.insert( i, replace )
continue
if(main_not_specified_type(i, tokens)):
del tokens[i]
tokens.insert( i, token.replace("main", "int main") )
tokens.insert( len(tokens)-1, "return 0;")
white_space.insert( len(white_space)-2, white_space[len(white_space)-4] )
white_space.insert( len(white_space)-1, 0)
continue
def tokenize(line_set):
for i,line in enumerate(line_set):
token = line.strip()
tokens.append(token)
if line.startswith(" "):
white_right = len(line.lstrip()) - len(token)
white_left = len(line.rstrip()) - len(token)
#print "LeftW:"+str(white_left)+" RightW:"+str(white_right)
else:
white_left = 0
white_right = 0
white_space.append(white_left)
white_space.append(white_right)
#print "Token:"+line.strip()
def fix_file( filename, result_filename ):
if (filename.split("."))[1] <> "c":
print "Seems that the file is not C/C++ source code\nOutput may be invalid...\n"
return False
result_filename = result + ".result"
try:
source_file = open( filename, "r" )
if os.path.exists( result_filename ):
print ( "The file {} exists".format(result) + "...")
print ( "Saving result as:" + result_filename )
result_file = open( result_filename, "w" )
else:
result_file = open( result, "w" )
lines=[]
for line in source_file:
#print "Read Token:" + line.strip()
lines.append(line)
replace(lines)
r=0
for i, token in enumerate(tokens):
result_file.write(" " * white_space[r] + token + (" " * white_space[r+1]) + "\n" )
r+=2
source_file.close()
result_file.close()
return True
except IOError:
print "Invalid or inexistant file...\nSpecify a valid source code file!...\n"
def main():
if ( len(sys.argv)>=3 and sys.argv[2] ):
fix_file( sys.argv[1],sys.argv[2] )
else:
print ("Try with \"garin_fixer.py source_code.c result_source_code.c\"")
if __name__ == '__main__':
main()
| {
"repo_name": "someoneigna/python-projects",
"path": "garin_fixer/garin_fixer.py",
"copies": "1",
"size": "3730",
"license": "mit",
"hash": 5687201746793003000,
"line_mean": 27.9147286822,
"line_max": 98,
"alpha_frac": 0.5345844504,
"autogenerated": false,
"ratio": 3.5355450236966823,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45701294740966825,
"avg_score": null,
"num_lines": null
} |
"""A script to check and report the size of the grammars."""
import inspect
import os
import importlib
from darglint.parse.grammar import (
BaseGrammar,
)
def convert_filename_to_module(filename):
return filename[:-3].replace('/', '.')
def get_python_modules_in_grammars():
basepath = os.path.join(
os.getcwd(), 'darglint/parse/grammars'
)
return [
(
x,
convert_filename_to_module(
os.path.join('darglint/parse/grammars', x)
)
)
for x in os.listdir(basepath)
if x.endswith('.py')
]
def get_grammars(module):
return [
cls for (name, cls) in
inspect.getmembers(module, inspect.isclass)
if issubclass(cls, BaseGrammar) and cls is not BaseGrammar
]
def get_productions_in_grammar(grammar):
return len(grammar.productions)
if __name__ == '__main__':
modules = get_python_modules_in_grammars()
count = {
'google': 0,
'sphinx': 0,
'numpy': 0,
}
print('BY FILENAME')
for grammar_type in count:
for filename, filepath in filter(
lambda x: x[0].startswith(grammar_type),
modules
):
mod = importlib.import_module(filepath)
grammars = get_grammars(mod)
amount = 0
for grammar in grammars:
amount += get_productions_in_grammar(
grammar
)
count[grammar_type] += amount
print('{} {}'.format(filename.ljust(50), amount))
print('\nTOTALS')
for grammar in count:
print('{}:\t{}'.format(grammar, count[grammar]))
| {
"repo_name": "terrencepreilly/darglint",
"path": "integration_tests/grammar_size.py",
"copies": "1",
"size": "1682",
"license": "mit",
"hash": 4287172595804345300,
"line_mean": 23.7352941176,
"line_max": 66,
"alpha_frac": 0.5558858502,
"autogenerated": false,
"ratio": 3.911627906976744,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9967513757176745,
"avg_score": 0,
"num_lines": 68
} |
"""A Script to compute the effective overlap between a laser and a star Airy disk,
when the starlight is dispersed, in a heterodyne laser frequency comb detection
scheme."""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as sp
#Start with a y(x) jinc function (i.e. an Airy disk) - plot the cross section.
x = 0.01*((np.arange(1000)+0.5) - 500)
y = 2*(sp.jn(1,x*np.pi)/(x*np.pi))
plt.clf()
plt.plot(x,y**2)
plt.plot(x-2,y**2)
plt.plot(x+2,y**2)
plt.plot(x+0.667,y**2,'--')
plt.plot(x-0.667,y**2,'--')
plt.plot([-1,-1,1,1],[-1,1,1,-1], ':', linewidth=2)
plt.axis((-2,2,-0.1,1))
plt.xlabel('Dispersion Axis')
plt.ylabel('Intensity')
#Now for some calculations
hw = 60 #Half-width
x = np.arange(2*hw)-hw
xy = np.meshgrid(x,x) #A 2D co-ordinate grid.
p = (xy[0]**2 + xy[1]**2) < (hw/12.0)**2 #A circle
im = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(p))) #A 2D Fourier transform of a circle (i.e. an image)
win = np.zeros((2*hw,2*hw))
win[hw-12:hw+12,hw-24:hw+24]=1 #A square window (i.e. a "pixel")
#Compute the maximum throughput into a pixel
print("Max Star Throughput: {0:6.2f}".format(np.sum(win*np.abs(im)**2)/np.sum(np.abs(im)**2)))
#Compute the minimum throughput into a pixel, when the dispersion places the image 2/3 of the way
#to the edge of the pixel, which we'll take as the limit of our electrical bandwidth.
print("Min Star Throughput: {0:6.2f}".format(np.sum(np.abs(np.roll(im,8,axis=0))**2*win)/np.sum(np.abs(im)**2)))
#Now compute the overlap between laser and starlight at this point - this is the real throughput
#at the edge of the electrical BW.
overlap = np.sum(np.real(np.roll(im,8,axis=0)*np.conj(im)*win))/np.sum(win*np.abs(im)**2)
print("Min Star Overlap: {0:6.2f}".format(overlap))
print("Effective min Throughput: {0:6.2f}".format(overlap**2)) | {
"repo_name": "mikeireland/pfi",
"path": "pfi/overlap.py",
"copies": "1",
"size": "1957",
"license": "mit",
"hash": -4490286485062641700,
"line_mean": 43.5,
"line_max": 112,
"alpha_frac": 0.6438426162,
"autogenerated": false,
"ratio": 2.6268456375838927,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37706882537838926,
"avg_score": null,
"num_lines": null
} |
# A script to convert an entire directory to a C array, in the "romfs" format
import os, sys
import re
import struct
_crtline = ' '
_numdata = 0
_bytecnt = 0
maxlen = 30
# Line output function
def _add_data( data, outfile, moredata = True ):
global _crtline, _numdata, _bytecnt
_bytecnt = _bytecnt + 1
if moredata:
_crtline = _crtline + "0x%02X, " % data
else:
_crtline = _crtline + "0x%02X" % data
_numdata = _numdata + 1
if _numdata == 16 or not moredata:
outfile.write( _crtline + '\n' )
_crtline = ' '
_numdata = 0
# dirname - the directory where the files are located.
# outname - the name of the C output
# flist - list of files
# mode - preprocess the file system:
# "verbatim" - copy the files directly to the FS as they are
# "compile" - precompile all files to Lua bytecode and then copy them
# "compress" - keep the source code, but compress it with LuaSrcDiet
# compcmd - the command to use for compiling if "mode" is "compile"
# Returns True for OK, False for error
def mkfs( dirname, outname, flist, mode, compcmd ):
# Try to create the output files
outfname = outname + ".h"
try:
outfile = file( outfname, "wb" )
except:
print( "Unable to create output file" )
return False
global _crtline, _numdata, _bytecnt
_crtline = ' '
_numdata = 0
_bytecnt = 0
# Generate headers
outfile.write( "// Generated by mkfs.py\n// DO NOT MODIFY\n\n" )
outfile.write( "#ifndef __%s_H__\n#define __%s_H__\n\n" % ( outname.upper(), outname.upper() ) )
outfile.write( "const unsigned char %s_fs[] = \n{\n" % ( outname.lower() ) )
# Process all files
for fname in flist:
if len( fname ) > maxlen:
print( "Skipping %s (name longer than %d chars)" % ( fname, maxlen ) )
continue
# Get actual file name
realname = os.path.join( dirname, fname )
# Ensure it actually is a file
if not os.path.isfile( realname ):
print( "Skipping %s ... (not found or not a regular file)" % fname )
continue
# Try to open and read the file
try:
crtfile = file( realname, "rb" )
except:
outfile.close()
os.remove( outfname )
print( "Unable to read %s" % fname )
return False
filedata = crtfile.read()
crtfile.close()
# Write name, size, id, numpars
for c in fname:
_add_data( ord( c ), outfile )
_add_data( 0, outfile ) # ASCIIZ
size_l = len( filedata ) & 0xFF
size_h = ( len( filedata ) >> 8 ) & 0xFF
_add_data( size_l, outfile )
_add_data( size_h, outfile )
# Then write the rest of the file
for c in filedata:
_add_data( ord( c ), outfile )
# Report
print( "Encoded file %s (%d bytes)" % ( fname, len( filedata ) ) )
# All done, write the final "0" (terminator)
_add_data( 0, outfile, False )
outfile.write( "};\n\n#endif\n" );
outfile.close()
print( "Done, total size is %d bytes" % _bytecnt )
return True
| {
"repo_name": "simplemachines-italy/hempl",
"path": "mkfs.py",
"copies": "1",
"size": "2977",
"license": "mit",
"hash": -6253239516938278000,
"line_mean": 28.4752475248,
"line_max": 98,
"alpha_frac": 0.6090023514,
"autogenerated": false,
"ratio": 3.214902807775378,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8990361812024645,
"avg_score": 0.06670866943014667,
"num_lines": 101
} |
"""A script to create a plot of the number of issues in a project.
Uses GitHub's API v4 which uses graphql
https://developer.github.com/v4/
For more instructions see the the corresponding Jupyter notebook:
project_stats.ipynb
"""
import argparse
import datetime
from dateutil import parser as date_parser
import json
import logging
import numpy as np
import os
import pandas as pd
import pprint
import requests
def run_query(query, headers): # A simple function to use requests.post to make the API call. Note the json= section.
request = requests.post('https://api.github.com/graphql', json={'query': query}, headers=headers)
if request.status_code == 200:
return request.json()
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, query))
class ProjectStats(object):
def __init__(self, project):
self.query_template = None
self.project = project
def init_df(self, offset=0, size=300):
"""Initialize a dataframe of the specified size."""
return pd.DataFrame({
"time": [datetime.datetime.now()] * size,
"delta": np.zeros(size),
"label": [""] * size,
}, index=offset + np.arange(size))
def grow_df(self, df, offset=0, size=300):
return pd.concat([df, self.init_df(offset, size)])
def main(self):
self.fetch_data()
self.compute_stats()
def compute_stats(self):
# Compute a column to store total delta
total_delta = np.max(np.row_stack([self.data["delta"].values,
np.zeros(self.data.shape[0])]), axis=0)
self.data["total_delta"] = total_delta
self.stats = self.data.pivot_table(values=["delta", "total_delta"],
index=['time'],
columns=['label'], fill_value=0,
aggfunc=np.sum)
self.stats = self.stats.cumsum()
self.stats = self.stats.rename(mapper={"delta": "open", "total_delta":"total"},
axis='columns')
def fetch_data(self):
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser(
description="Find issues that need attention.")
# Create a gitor using an access token
if not os.getenv("GITHUB_TOKEN"):
logging.error("Environment variable GITHUB_TOKEN must be set")
return
# We need to look at ProjectCard and then ProjectCard item
# https://developer.github.com/v4/object/projectcard/
# TODO(jlewi): Take project as an argument
self.query_template="""{{
organization(login:"kubeflow") {{
projects(last:1 search:"{project}") {{
totalCount
edges {{
node {{
name
url
columns(first:1 {columns_cursor}) {{
totalCount
pageInfo {{
endCursor
hasNextPage
}}
edges {{
node {{
cards(first:100 {cards_cursor}) {{
totalCount
pageInfo {{
endCursor
hasNextPage
}}
edges {{
node {{
content {{
__typename
... on Issue {{
url
title
number
createdAt
closedAt
labels(last:15) {{
totalCount
edges {{
node {{
name
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
"""
# Times at which issues were opened and closed
opened = []
closed = []
headers = {"Authorization": "Bearer {0}".format(os.getenv("GITHUB_TOKEN"))}
columns_cursor = None
has_next_columns_page = True
issues = []
issue_numbers = []
# Create a dataframe to store the results
data = self.init_df()
num_items = 0
# We have to paginations to do
# Over ccolumns and over cards
while has_next_columns_page:
columns_cursor_text = ""
if columns_cursor:
columns_cursor_text = "after:\"{0}\"".format(columns_cursor)
has_next_cards_page = True
cards_cursor = None
while has_next_cards_page:
cards_cursor_text = ""
if cards_cursor:
cards_cursor_text = "after:\"{0}\"".format(cards_cursor)
query = self.query_template.format(project=self.project,
columns_cursor=columns_cursor_text,
cards_cursor=cards_cursor_text)
result = run_query(query, headers=headers) # Execute the query
projects_connections = result["data"]["organization"]["projects"]
if projects_connections["totalCount"] != 1:
raise ValueError("Total number of projects: Got {0} want 1".format(
projects_connections["totalCount"]))
project = projects_connections["edges"][0]["node"]
columns_connection = project["columns"]
cards_connection = columns_connection["edges"][0]["node"]["cards"]
cards_cursor = cards_connection["pageInfo"]["endCursor"]
has_next_cards_page = cards_connection["pageInfo"]["hasNextPage"]
# If we reached the end of cards for this column increment the columns_page
# cards cursor
if not has_next_cards_page:
has_next_columns_page = columns_connection["pageInfo"]["hasNextPage"]
columns_cursor = columns_connection["pageInfo"]["endCursor"]
for e in cards_connection["edges"]:
n = e["node"]
c = n["content"]
if not c:
continue
# Cards can contain pull requests and these may not have labels
if not "labels" in c:
continue
labels_connections = c["labels"]
if labels_connections["totalCount"] > 15:
raise ValueError("Number of total labels exceeds the number "
"fetched; need to add pagination")
labels = labels_connections["edges"]
label_names = []
for l in labels:
label_names.append(l["node"]["name"])
if not label_names:
label_names.append("nolabels")
num_entries = len(label_names) * 2
if num_items + num_entries > data.shape[0]:
# Grow the dataframe
data = self.grow_df(data, offset=data.shape[0])
for f in ["createdAt", "closedAt"]:
if not c[f]:
continue
delta = 1
if f == "closedAt":
delta = -1
for l in label_names:
if delta > 0:
data["time"].at[num_items] = date_parser.parse(c["createdAt"])
else:
data["time"].at[num_items] = date_parser.parse(c["closedAt"])
data["delta"].at[num_items] = delta
data["label"].at[num_items] = l
num_items += 1
self.data = data[:num_items]
if __name__ == "__main__":
c = ProjectStats()
c.main() | {
"repo_name": "kubeflow/community",
"path": "scripts/project_stats.py",
"copies": "1",
"size": "7517",
"license": "apache-2.0",
"hash": -3541369354438406000,
"line_mean": 29.9382716049,
"line_max": 117,
"alpha_frac": 0.5184249036,
"autogenerated": false,
"ratio": 4.509298140371926,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5527723043971926,
"avg_score": null,
"num_lines": null
} |
#A script to create custom POX flows
#http://github.com/abh15/pox-flowgen
#import random
#fname="poxscript_"+str(random.randint(1000,9999))+".py"
#uncomment above lines if each script with a new name is required
fname="controllerScript.py"
file=open(fname,'w')
file.close()
#----create a new empty file----
target=open(fname,'w')
target.write("\"\"\"\nScript created by POX custom flow generator (PCFG)\n\"\"\"\n")
target.write("from pox.core import core \nfrom pox.lib.addresses import IPAddr \nfrom pox.lib.addresses import EthAddr \nimport pox.openflow.libopenflow_01 as of")
target.write("\nlog = core.getLogger()\n")
#----print importing
def check(): #call match again or exit depending on if i/p is A or M
f=raw_input("Enter A to move to action\nM to stay in match menu\n>")
if f=="M":
print t
q=raw_input(">")
match(int(q))
else:
pass
#---check if want to stay in match menu i.e to create multiple matches-----
def check2(str1,str2): #call actions again or exit depending on if i/p is A or N
global msg
msg.append(str1+str2) #create/concat array of all used actions
f=raw_input("Enter A to stay in action\nF to create Flow\n>")
if f=="A":
print t2
q=raw_input(">")
actions(int(q))
else:
pass
#---check if want to stay in action menu i.e to create multiple actions----
def checkflows():
global fooflows
fooflows=fooflows-1
if fooflows>0:
fl()
else:
checkswitch()
#-------keeps in flow menu alive until all flows are specified---------
def checkswitch():
global foox
foox=foox-1
if foox>0:
switch()
fl()
else:
pass
#-------keeps in switch/flow menu until all switches/flows are specified---------
def match(k):
#global name
def inport():
f=raw_input("Enter inport>")
target.write(name+"msg.match.in_port="+str(f)+"\n")
check() #check if more matching actions are going to be added
def dltype():
f=raw_input("Enter dltype>")
target.write(name+"msg.match.dl_type="+str(f)+"\n")
check()
def nwtos():
f=raw_input("Enter nwtos>")
target.write(name+"msg.match.nw_tos="+str(f)+"\n")
check()
def nwproto():
f=raw_input("Enter nwproto>")
target.write(name+"msg.match.nw_proto="+str(f)+"\n")
check()
def nwsrc():
f=raw_input("Enter nwsrc>")
target.write(name+"msg.match.nw_src=IPAddr(\""+f+"\")\n")
check()
def nwdst():
f=raw_input("Enter nwdst>")
target.write(name+"msg.match.nw_dst=IPAddr(\""+f+"\")\n")
check()
def dlvlan():
f=raw_input("Enter dlvlan>")
target.write(name+"msg.match.dl_vlan="+str(f))
target.write("\n")
check()
def dlvlanpcp():
f=raw_input("Enter dlvlanpcp>")
target.write(name+"msg.match.dl_vlan_pcp="+str(f))
target.write("\n")
check()
def dlsrc():
f=raw_input("Enter dlsrc>")
target.write(name+"msg.match.dl_src = EthAddr(\""+f+"\")\n")
check()
def dldst():
f=raw_input("Enter dldst>")
target.write(name+"msg.match.dl_dst = EthAddr(\""+f+"\")\n")
check()
def tpsrc():
f=raw_input("Enter tpsrc>")
target.write(name+"msg.match.tp_src="+str(f))
target.write("\n")
check()
def tpdst():
f=raw_input("Enter tp dst>")
target.write(name+"msg.match.tp_dst="+str(f))
target.write("\n")
check()
def priority():
f=raw_input("Enter priority>")
target.write(name+"msg.priority="+str(f))
target.write("\n")
check()
options={1:inport,2:dltype,3:nwtos,4:nwproto,5:nwsrc,
6:nwdst,7:dlvlan,8:dlvlanpcp,9:dlsrc,10:dldst,11:tpsrc,12:tpdst,13:priority} #func_dictionary
target.write("\n#"+name+" Match structure\n")
target.write(baz[int(sw_no)]+"="+str(dpid)+"\n") #write dpid
target.write(name+"msg = of.ofp_flow_mod()\n")
target.write(name+"msg.cookie = 0\n")
options[k]() #call the k'th function depending upon user input
#----------matching structure---------
def actions(k):
#name of current flow instance
def vlan_id():
v=raw_input("Enter vlan id>")
target.write(name+"vlan_id = of.ofp_action_vlan_vid (vlan_vid="+str(v)+")")
target.write("\n")
check2(name,"vlan_id") #check if more actions are going to be added
def stripvlan():
v=raw_input("Enter stripvlan yes or no>")
if v=="yes":
target.write(name+"stripvlan = of.ofp_action_strip_vlan ()")
target.write("\n")
else:
target.write("\n")
check2(name,"stripvlan")
def out():
v=raw_input("Enter out port>")
target.write(name+"out = of.ofp_action_output(port ="+str(v)+")")
target.write("\n")
check2(name,"out")
def vlanPriority():
v=raw_input("Enter vlan priority>")
target.write(name+"vlanPriority = of.ofp_action_vlan_pcp (vlan_pcp="+str(v)+")")
target.write("\n")
check2(name,"vlanPriority")
def enqueue():
v=raw_input("Enter enq>")
target.write(name+"flow0enqueue = of.ofp_action_enqueue (enqueue = "+str(v)+")")
target.write("\n")
check2(name,"enqueue")
def srcPort():
v=raw_input("Enter srcport>")
target.write(name+"srcPort = of.ofp_action_tp_port.set_src = (tp_port = "+str(v)+")")
target.write("\n")
check2(name,"srcPort")
def dstPort():
v=raw_input("Enter destport>")
target.write(name+"dstPort = of.ofp_action_tp_port.set_dst = (tp_port = "+str(v)+")")
target.write("\n")
check2(name,"dstport")
def srcMAC():
v=raw_input("Enter src MAC add>")
target.write(name+"srcMAC = of.ofp_action_dl_addr.set_src(EthAddr(\""+str(v)+"\"))")
target.write("\n")
check2(name,"srcMAC")
def dstMAC():
v=raw_input("Enter dst MAC add>")
target.write(name+"dstMAC = of.ofp_action_dl_addr.set_dst(EthAddr(\""+str(v)+"\"))")
target.write("\n")
check2(name,"dstMAC")
def srcIP():
v=raw_input("Enter source IP>")
target.write(name+"srcIP = of.ofp_action_nw_addr.set_src(IPAddr(\""+str(v)+"\"))")
target.write("\n")
check2(name,"srcIP")
def dstIP():
v=raw_input("Enter dstIP>")
target.write(name+"dstIP = of.ofp_action_nw_addr.set_dst(IPAddr(\""+str(v)+"\"))")
target.write("\n")
check2(name,"dstIP")
def tos():
v=raw_input("Enter tos>")
target.write(name+"tos = of.ofp_action_nw_tos (nw_tos = "+str(v)+")")
target.write("\n")
check2(name,"tos")
options={1:vlan_id,2:stripvlan,3:out,4:vlanPriority,5:enqueue,
6:srcPort,7:dstPort,8:srcMAC,9:dstMAC,10:srcIP,11:dstIP,12:tos}
options[k]() #select action based on user input
#----------actions structure---------
global foox
x=raw_input("Enter no. of switches\n>")
foox=int(x)
y=[]
baz=[]
msg=[]
bar=[]
#------------main------------
def switch(): #get number of switches,flows & dpid
global fooflows
global baz
global y
global sw_no
global dpid
global flows
print "Select switch:\n"
for i in xrange(0,int(x)):
print str(i)+":"+"\tswitch"+str(i)
baz.append("switch"+str(i))
sw_no=raw_input(">")
defaultdpid=int(sw_no)+1
tbp=raw_input("Enter DPID of switch(a hex no.) or D for default dpid\n>")
if (tbp=="D"):
dpid=oct(int(str(defaultdpid),10))
else:
dpid=oct(int(tbp,16))
flows=raw_input("Enter no of flows\n>")
fooflows=int(flows) #used for checkswitch func, possibly buggy
y.append(int(flows)) #create list of no. of flowmsgs per switch for sendToDPID msgs
#----------switch structure------------
def fl(): #display available match/actions & get them
global msg
global bar
global fl_no
global t
global t2
global q
global name
print "Select flow:\n" #display flows to choose from
for i in xrange(0,int(flows)):
print str(i)+":"+"\tflow"+str(sw_no)+"_"+str(i)
bar.append("flow"+str(sw_no)+"_"+str(i))
fl_no=raw_input(">")
name="flow"+sw_no+"_"+fl_no
t= "\n1:inport\n2:dltype\n3:nwtos\n4:nwproto\n5:nwsrc\n6:nwdst\n7:dlvlan\n8:dlvlanpcp\n9:dlsrc\n10:dldst\n11:tpsrc\n12:tpdstn\n13:Priority"
print t #choose a match & call match func
q=raw_input(">")
match(int(q))
#----------------end match
t2="1:vlanid\n2:stripvlan\n3:outport\n4:vlanprior\n5:enqueue\n6:srcport\n7:dstport\n8:srcmac\n9:dstmac\n10:srcip\n11:dstip\n12:tos"
print t2
w=raw_input(">")
target.write("# ACTIONS----------------\n")
actions(int(w)) #choose a action & call action func
target.write(name+"msg.actions="+str(msg).replace('\'','')+"\n") #print the msg arrsy containing actions used
msg=[]
#--------end actions
checkflows() #check for more flows
#-----------flows structure------------
switch()
fl()
#call functions in first iteration
target.write("\ndef install_flows(): \n\tlog.info(\" ### Installing static flows... ###\")\n")
for i in xrange(0,int(x)):
for j in xrange(0,y[i]):
target.write("\tcore.openflow.sendToDPID(switch"+str(i)+",flow"+str(i)+"_"+str(j)+"msg)\n")
target.write("\tlog.info(\"### Static flows installed. ###\")\n")
#---print function to install flows-----
target.write("def launch (): \n\tlog.info(\"####Starting...####\")\n\tcore.callDelayed (15, install_flows)\n\tlog.info(\"### Waiting for switches to connect.. ###\")")
#---print the launch function-----
target.close() #save file
print "Done :)"
| {
"repo_name": "abh15/pox-flowgen",
"path": "pcfg.py",
"copies": "1",
"size": "8950",
"license": "mit",
"hash": 3857085243752984600,
"line_mean": 26.2036474164,
"line_max": 167,
"alpha_frac": 0.6339664804,
"autogenerated": false,
"ratio": 2.651851851851852,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3785818332251852,
"avg_score": null,
"num_lines": null
} |
"A script to demonstrate cross validation"
import numpy as np
import matplotlib.pyplot as plt
import regression
N=100
THETA_START=-1.9
THETA_END=2.2
NOISE_SIGMA=0.4
def fit_model(train_data, eval_data, model):
X_tr, y_tr = train_data
X_eval, y_eval = eval_data
# Define our kernel function.
def kernel_fn(X):
# Bind the RBF kernel to use the training inputs
return regression.rbf_kernel(X, X_tr)
# Compute the kernel
K = kernel_fn(X_tr)
# Fit model
model.fit(K, y_tr)
# Predict the evaluation data
K_eval = kernel_fn(X_eval)
y_pred = model.predict(K_eval)
return regression.mse(y_eval, y_pred)
if __name__ == "__main__":
np.random.seed(1)
x, y = regression.generate_sin_data(
N, theta_start=THETA_START,
theta_end=THETA_END, noise_sigma=NOISE_SIGMA)
train_data, val_data, test_data = (
regression.partition_data(x[:,np.newaxis], y, val_ratio=0.5))
alpha_parameters = np.logspace(1, -5, 7)
val_mse = []
models = []
for alpha in alpha_parameters:
# Create model
model = regression.LinearRegression(alpha=alpha)
models.append(model)
# Fit model and record accuracy
mse = fit_model(train_data, val_data, model)
val_mse.append(mse)
###
# Now pick our best model and evaluate on test data
###
argmin = np.argmin(val_mse)
min_alpha = alpha_parameters[argmin]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xscale('log')
ax.invert_xaxis()
plt.plot(alpha_parameters, val_mse, 'b-')
plt.axvline(min_alpha, color="r")
plt.xlabel('alpha')
plt.ylabel('MSE')
plt.savefig('/tmp/cross-validation.png', bbox_inches="tight")
model = regression.LinearRegression(alpha=min_alpha)
# Now evalute with the test data
test_mse = fit_model(train_data, test_data, model)
print("Using alpha: {}. Test MSE: {}".format(min_alpha, test_mse))
# And plot results
X_tr, y_tr = train_data
x_plot = np.linspace(x.min(), x.max(), 200)
K_plot = regression.rbf_kernel(x_plot[:,np.newaxis], X_tr)
y_plot = model.predict(K_plot)
regression.plot_figure(
X_tr,
y_tr,
x_plot,
y_plot,
filename='/tmp/cross_validation_results.png')
plt.show()
| {
"repo_name": "mfergie/regression-tutorial",
"path": "cross_validation.py",
"copies": "1",
"size": "2326",
"license": "mit",
"hash": -6792783401073928000,
"line_mean": 24.8444444444,
"line_max": 70,
"alpha_frac": 0.6182287188,
"autogenerated": false,
"ratio": 3.1732605729877217,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4291489291787722,
"avg_score": null,
"num_lines": null
} |
"""A script to download slack archives."""
from os.path import abspath, dirname, join
from splinter import Browser
from splinter.exceptions import ElementDoesNotExist
import yaml
HERE = dirname(abspath(__file__))
CONFIG_PATH = join(HERE, '..', 'parktain', 'config.yaml')
with open(CONFIG_PATH, 'r') as ymlfile:
slack = yaml.load(ymlfile).get('slack')
def wait_for_download_completion(browser):
browser.visit("chrome://downloads/")
# FIXME: Figure out what element needs to diappear/appear
import time
time.sleep(30)
with Browser('chrome') as browser:
# Visit URL
url = 'https://my.slack.com/services/export'
browser.visit(url)
browser.fill('domain', slack['domain'])
browser.click_link_by_id('submit_team_domain')
browser.fill('email', slack['email'])
browser.fill('password', slack['password'])
browser.click_link_by_id('signin_btn')
try:
button = browser.find_by_text('Start Export')[0]
button.click()
except ElementDoesNotExist:
pass
try:
link = browser.find_link_by_partial_text('Ready for download')[0]
link.click()
wait_for_download_completion(browser)
except ElementDoesNotExist:
print('Could not download export file')
| {
"repo_name": "punchagan/parktain",
"path": "scripts/download_archive.py",
"copies": "1",
"size": "1259",
"license": "bsd-3-clause",
"hash": -1092621629142414500,
"line_mean": 28.9761904762,
"line_max": 73,
"alpha_frac": 0.6735504369,
"autogenerated": false,
"ratio": 3.780780780780781,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4954331217680781,
"avg_score": null,
"num_lines": null
} |
# A script to ensure that every HWP flat-field is normalized by its full fram MFM
import os
import glob
from astropy.stats import sigma_clipped_stats
import astroimage as ai
# Add the header handler to the BaseImage class
# from Mimir_header_handler import Mimir_header_handler
# ai.reduced.ReducedScience.set_header_handler(Mimir_header_handler)
ai.set_instrument('mimir')
# This is the location of all PPOL reduction directory
PPOL_dir = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\PPOL_reduced\\201611'
# Build the path to the S3_Asotrometry files
S3_dir = os.path.join(PPOL_dir, 'S3_Astrometry')
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\pyPol_Reduced\\201611\\'
# Build the path to the supersky directory
bkgImagesDir = os.path.join(pyPol_data, 'bkgImages')
# Find all the HWP background images
bkgImgFileList = glob.glob(os.path.join(bkgImagesDir, '*.fits'))
# Loop through all the files and renormalize
numberOfFiles = len(bkgImgFileList)
for iFile, bkgImgFile in enumerate(bkgImgFileList):
# Read in the file
tmpImg = ai.reduced.ReducedScience.read(bkgImgFile)
# Force normalization by the median
_, median, _ = sigma_clipped_stats(tmpImg.data)
tmpImg = tmpImg / (median*tmpImg.unit)
# Resave file
tmpImg.write(clobber=True)
print('{0:3.1%} complete'.format(iFile/numberOfFiles), end='\r')
print('100% complete', end='\n\n')
print('Done!')
| {
"repo_name": "jmontgom10/Mimir_pyPol",
"path": "oldCode/03a2_normalize_HWP_flats.py",
"copies": "1",
"size": "1463",
"license": "mit",
"hash": 8886705029684487000,
"line_mean": 32.25,
"line_max": 81,
"alpha_frac": 0.7388926863,
"autogenerated": false,
"ratio": 2.997950819672131,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4236843505972131,
"avg_score": null,
"num_lines": null
} |
# A script to execute all the SQL DDL scripts which are needed in addition to what JPA does.
import pyodbc, os, sys, time
if (len(sys.argv) <= 1):
databaseServer = 'SASERVER1\SQL2008R2'
databaseName = 'imDev'
userName = 'REMOVED'
password = 'REMOVED'
scriptPath = '.'
else:
[scriptPath,databaseServer,databaseName,userName,password] = sys.argv[1:]
os.chdir(scriptPath)
dbConnectionString = "DRIVER={SQL Server};"
dbConnectionString += "SERVER=" + databaseServer + ";"
dbConnectionString += "DATABASE=" + databaseName + ";"
dbConnectionString += "UID=" + userName + ";"
dbConnectionString += "PWD=" + password
#print(dbConnectionString)
cnxn = pyodbc.connect(dbConnectionString)
printSQL = False
printNothing = True
printAllExceptions = False
tablePath = "../SQL/Tables/"
viewPath = "../SQL/Views/"
procPath = "../SQL/StoredProcs/"
functionPath = "../SQL/Functions/"
def executeSQL(fileName, sql):
try:
cursor = cnxn.cursor()
cursor.execute(sql)
cursor.close()
cnxn.commit()
except:
err = sys.exc_info()
if ((not printNothing) and printAllExceptions):
print(err)
elif ((not printNothing) and (-1 == str(err[1]).find("already"))):
print(err)
def executePath(path):
fileNames = os.listdir(path)
for fileName in fileNames:
if (not fileName.endswith(".sql")):
continue
f = open(path + fileName, "r")
sql = f.read()
f.close()
msg = "Executing " + fileName
if (printSQL):
msg += ":\n" + sql
if (not printNothing):
print(msg)
executeSQL(fileName, sql)
executePath(tablePath)
executePath(viewPath)
executePath(procPath)
executePath(functionPath)
cnxn.close()
| {
"repo_name": "JoelBondurant/RandomCodeSamples",
"path": "python/deploySQL.py",
"copies": "1",
"size": "1680",
"license": "apache-2.0",
"hash": -4246797213937200600,
"line_mean": 23.8461538462,
"line_max": 92,
"alpha_frac": 0.6702380952,
"autogenerated": false,
"ratio": 3.0712979890310788,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.90939851788849,
"avg_score": 0.029510181069235888,
"num_lines": 65
} |
# A script to extract and plot significant LVs from PLS
# event-related and block result.mat files in all current
# MATLAB encodings. Not yet compatible with structural PLS
# results !
# Note: If this is your first time running R from within
# python, you'll need to execute the following commands:
# !conda install -c r rpy2
# from rpy2.robjects.packages import importr
# utils = importr('utils')
# utils.install_packages('ggplot2', repos='http://cran.rstudio.com/')
# utils.install_packages('wesanderson', repos='http://cran.rstudio.com/')
import os
import glob
import numpy as np
import scipy.io as sio
import h5py
import pandas as pd
import rpy2.robjects as robj
import rpy2.robjects.pandas2ri
from rpy2.robjects.packages import importr
###############################
### Setting default parameters:
alpha = 0.05
GroupLevels = ['Young','Old']
### If you do not have a preferred condition display
### order, set CondLevels = None
CondLevels = ['Past','Future','Other','Null','Control']
###############################
def set_group_lvls(nRepeat,GroupLevels,nGroup):
"""
A function to determine the number of groups in
a PLS result.mat. Will name groups according to
user-supplied "GroupLevels."
"""
Group = []
for i in range(0,nGroup):
Group.extend([GroupLevels[i]]*nRepeat)
Group = pd.DataFrame(Group,columns=['Group'])
return Group
def event_related(data_array):
"""
A function to extract significant latent variable attributes
(Estimates, associated CIs, and significance levels) from
event-related PLS results.
"""
try:
Estimate = np.array(data_array.get('boot_result')['orig_usc'])
UL = np.array(data_array.get('boot_result')['ulusc'])
LL = np.array(data_array.get('boot_result')['llusc'])
Significance = np.array(data_array.get('perm_result')['s_prob'])
except KeyError:
Estimate = np.array(data_array.get('boot_result')['orig_corr'])
UL = np.array(data_array.get('boot_result')['ulcorr'])
LL = np.array(data_array.get('boot_result')['llcorr'])
Significance = np.array(data_array.get('perm_result')['sprob'])
return Estimate, UL, LL, Significance
def extract_hdf5(data_array,ftype):
"""
A function to extract significant latent variables with their
associated confidence intervals for PLS results with MATLAB
encoding -v7.3 (HDF5 type encoding).
Fun fact: Matlab is really abusing the HDF5 format here. Each
condition name is a collection of integers, referring to ASCII
characters. The for loop for Condition.append reconstructs the
condition name from these integers.
Thanks to Andrew Collette for insight on this on the h5py
Google group!
"""
if ftype == 'block':
Estimate = np.array(data_array.get('result')['boot_result']['orig_usc'])
UL = np.array(data_array.get('result')['boot_result']['ulusc'])
LL = np.array(data_array.get('result')['boot_result']['llusc'])
Significance = np.array(data_array.get('result')['perm_result']['sprob'])
nGroup = np.array(data_array.get('result')['num_subj_lst']).shape[1]
nRepeat = Estimate.shape[1]/nGroup
elif ftype == 'event':
Estimate, UL, LL, Significance = event_related(data_array)
nGroup = np.array(data_array.get('subj_group')).shape[0]
nRepeat = Estimate.shape[1]/nGroup
mask=Significance[0]<alpha
sigLV = np.where(mask)
sigEstimate=Estimate[sigLV].T
sigUL=UL[sigLV].T
sigLL=LL[sigLV].T
Group = set_group_lvls(nRepeat,GroupLevels,nGroup)
cond_array = data_array['cond_name']
Condition = []
for i in range(0, cond_array.shape[0]):
ascii_int = data_array[cond_array[i][0]]
Condition.append(''.join(chr(i) for i in ascii_int[:]))
Condition = pd.DataFrame(Condition, columns = ['Condition'])
Condition = pd.concat([Condition] * nGroup, ignore_index=True)
colnames = (['Estimate_LV'+str(i) for i in sigLV[0]+1] +
['UL_LV'+str(i) for i in sigLV[0]+1] +
['LL_LV'+str(i) for i in sigLV[0]+1])
df = pd.DataFrame(np.hstack((sigEstimate,sigUL,sigLL)))
df.columns = colnames
df = pd.concat([df,Condition,Group],axis = 1)
return df
def extract_unicode(data_array,ftype):
"""
A function to extract significant latent variables with their
associated confidence intervals for PLS results with MATLAB
encoding -v7 or earlier.
"""
if ftype == 'block':
Estimate = data_array.get('result')['boot_result'][0,0]['orig_usc']
UL = data_array.get('result')['boot_result'][0,0]['ulusc']
LL = data_array.get('result')['boot_result'][0,0]['llusc']
Significance = data_array.get('result')['perm_result'][0,0]['sprob']
nGroup = data_array.get('result')['num_subj_lst'][0,0].shape[1]
nRepeat = Estimate[0,0].shape[1]/nGroup
elif ftype == 'event':
Estimate, UL, LL, Significance = event_related(data_array)
nGroup = data_array.get('subj_group').shape[1]
nRepeat = Estimate[0,0].shape[1]/nGroup
mask=Significance[0,0]<alpha
sigLV = np.where(mask)[0]
sigEstimate=pd.DataFrame(Estimate[0,0])[sigLV]
sigUL=pd.DataFrame(UL[0,0])[sigLV]
sigLL=pd.DataFrame(LL[0,0])[sigLV]
Group = set_group_lvls(nRepeat,GroupLevels,nGroup)
cond_array = data_array['cond_name']
Condition = []
for i in range(0, cond_array.shape[1]):
Condition.extend(cond_array[0][i])
Condition = pd.DataFrame(Condition, columns = ['Condition'])
Condition = pd.concat([Condition] * nGroup, ignore_index=True)
colnames = (['Estimate_LV'+str(i) for i in sigLV+1] +
['UL_LV'+str(i) for i in sigLV+1] +
['LL_LV'+str(i) for i in sigLV+1])
df = pd.concat([sigEstimate,sigUL,sigLL],axis=1)
df.columns = colnames
df = pd.concat([df,Condition,Group],axis = 1)
return df
def plot_w_ggplot2(f,df,CondLevels,GroupLevels):
"""
A function to create bar graphs for each of your significant
latent variables in a given file. Can also be supplied with factor
levels for preferred condition display. Make sure you have
necessary packages installed in rpy2! See note at beginning of
script for more information.
"""
plot_func = robj.r("""
library(ggplot2)
library(wesanderson)
library(tools)
function(fname,pandasDF,condLvls,grpLvls){
pandasDF$Condition <- factor(pandasDF$Condition,levels = condLvls)
pandasDF$Group <- factor(pandasDF$Group,levels = grpLvls)
nsigLVs = (ncol(pandasDF)-2)/3
if (nlevels(pandasDF$Group)==1){
for(i in 1:nsigLVs){
ggsave(filename=paste(file_path_sans_ext(fname),colnames(pandasDF[i]),".png",sep=""),
plot=ggplot(pandasDF, aes(x=Condition, y=pandasDF[i])) +
geom_bar(width=.75,position=position_dodge(), stat="identity",
size=.2, fill="#899DA4") +
geom_errorbar(aes(ymin=pandasDF[i+(2*nsigLVs)],
ymax=pandasDF[i+nsigLVs]),
width=.1,
position=position_dodge(.75),
colour="black") +
theme_minimal(base_size = 28, base_family = "Arial") +
theme(axis.text.y = element_blank()) +
theme(axis.title.y = element_blank()) +
theme(axis.title.x = element_text(margin = margin(t= 22))))
}
} else if (nlevels(pandasDF$Group)>1){
for(i in 1:nsigLVs){
ggsave(filename=paste(file_path_sans_ext(fname),colnames(pandasDF[i]),".png",sep=""),
plot=ggplot(pandasDF, aes(x=Condition, y=pandasDF[i], fill=Group)) +
geom_bar(width=.75,position=position_dodge(), stat="identity",
size=.2) +
geom_errorbar(aes(ymin=pandasDF[i+(2*nsigLVs)],
ymax=pandasDF[i+nsigLVs]),
width=.1,
position=position_dodge(.75),
colour="black") +
theme_minimal(base_size = 28, base_family = "Arial") +
theme(axis.text.y = element_blank()) +
theme(axis.title.y = element_blank()) +
theme(axis.title.x = element_text(margin = margin(t= 22))) +
scale_fill_manual(values=wes_palette("Royal1")))
}
}
}
""")
robj.pandas2ri.activate()
df_R = robj.conversion.py2ri(df)
CondLevels_R = robj.conversion.py2ri(CondLevels)
GroupLevels_R = robj.conversion.py2ri(GroupLevels)
plot_func(f,df_R,CondLevels_R,GroupLevels_R)
if __name__ == '__main__':
files = glob.glob('*result.mat')
for f in files:
if f.find('_BfMRIresult.mat') >= 0: ftype = 'block'
elif f.find('_fMRIresult.mat') >=0: ftype = 'event'
else: print('Check file type, or give up all hope.')
try:
data_array = sio.loadmat(f)
df = extract_unicode(data_array,ftype)
except NotImplementedError:
data_array = h5py.File(f,'r')
df = extract_hdf5(data_array,ftype)
plot_w_ggplot2(f,df,CondLevels,GroupLevels)
| {
"repo_name": "emdupre/PFOCv2",
"path": "import_and_display_PLS_results.py",
"copies": "1",
"size": "9646",
"license": "mit",
"hash": 3766693171098194400,
"line_mean": 39.8728813559,
"line_max": 105,
"alpha_frac": 0.5895708066,
"autogenerated": false,
"ratio": 3.491132826637713,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45807036332377127,
"avg_score": null,
"num_lines": null
} |
"""A script to fit tramlines etc for Ghost data.
"""
from __future__ import division, print_function
import pymfe
import astropy.io.fits as pyfits
import numpy as np
import matplotlib.pyplot as plt
import pdb
import shutil
import matplotlib.cm as cm
#plt.ion()
#Define the files in use (NB xmod.txt and wavemod.txt should be correct)
#arc_file = "/home/jbento/code/pymfe/data/ghost/blue/std/arcstd_blue.fits"
flat_file = "/home/jbento/code/pymfe/data/ghost/red/high/flathigh_red.fits"
#instantiate the ghostsim arm
ghost_format = pymfe.ghost.Arm('red',mode='high')
#Create an initial model of the spectrograph.
xx, wave, blaze= ghost_format.spectral_format()
#Get the data and normalize by median
flat_data = pyfits.getdata(flat_file)
#arc_data = pyfits.getdata(arc_file)
nx = flat_data.shape[0]
ny = flat_data.shape[1]
x = flat_data.shape[0]
profilex = np.arange(x) - x // 2
# Now create a model of the slit profile
mod_slit = np.zeros(x)
if ghost_format.mode == 'high':
nfibers = 26
else:
nfibers = ghost_format.nl
for i in range(-nfibers // 2, nfibers // 2):
mod_slit += np.exp(-(profilex - i * ghost_format.fiber_separation)**2 /
2.0 / ghost_format.profile_sigma**2)
plt.plot(flat_data[:,1000])
x=np.arange(flat_data[:,0].shape[0])
plt.plot(x-1,mod_slit*400)
plt.show()
#Have a look at the default model and make small adjustments if needed.
flat_conv=ghost_format.slit_flat_convolve(flat_data)
ghost_format.adjust_model(flat_conv,convolve=False,percentage_variation=10)
#Re-fit
ghost_format.fit_x_to_image(flat_conv,decrease_dim=8,inspect=True)
#shutil.copyfile('xmod.txt', 'data/subaru/xmod.txt')
'''
#Now find the other lines, after first re-loading into the extractor.
ghost_extract = pymfe.Extractor(ghost_format, transpose_data=True)
ghost_extract.find_lines(arc_data.T, arcfile='data/subaru/neon.txt',flat_data=flat_data.T)
#cp arclines.txt data/subaru/
shutil.copyfile('data/subaru/arclines.txt','data/subaru/arclines.backup')
shutil.copyfile('arclines.txt', 'data/subaru/arclines.txt')
#Now finally do the wavelength fit!
ghost_format.read_lines_and_fit()
shutil.copyfile('wavemod.txt', 'data/subaru/wavemod.txt')
'''
| {
"repo_name": "mikeireland/pymfe",
"path": "ghost_fit.py",
"copies": "1",
"size": "2187",
"license": "mit",
"hash": -230040657147824100,
"line_mean": 28.16,
"line_max": 90,
"alpha_frac": 0.7279378144,
"autogenerated": false,
"ratio": 2.690036900369004,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39179747147690036,
"avg_score": null,
"num_lines": null
} |
"""A script to fit tramlines etc for RHEA@Subaru data.
Long wavelengths are down and right. 15 lines visible.
lines = np.loadtxt('argon.txt')
order = 1e7/31.6*2*np.sin(np.radians(64.0))/argon
plt.plot(1375 - (order - np.round(order))/order*1.8e5)
plt.plot(1375 - (order - np.round(order)+1)/order*1.8e5)
plt.plot(1375 - (order - np.round(order)-1)/order*1.8e5)
Super-bright Neon line may be 7032.
15000 counts in 20s
"""
from __future__ import division, print_function
import pymfe
try:
import pyfits
except:
import astropy.io.fits as pyfits
import numpy as np
import matplotlib.pyplot as plt
import glob
import opticstools as ot
import pdb
import scipy.optimize as op
import scipy.interpolate as interp
import time
from astropy.time import Time
from astropy.coordinates import SkyCoord
from astropy import units as u
import PyAstronomy.pyasl as pyasl
from astropy import constants as const
import matplotlib.cm as cm
import pickle
import astropy.modeling as amod
plt.ion()
savefile="Focus00.pkl"
dir = "/Users/mireland/data/rhea_subaru/160221/Focus00/"
savefile="Focus30.pkl"
dir = "/Users/mireland/data/rhea_subaru/160221/Focus30/"
savefile="Focus60.pkl"
dir = "/Users/mireland/data/rhea_subaru/160221/Focus60/"
savefile="1603.pkl"
dir = "/Users/mireland/data/rhea_subaru/160317/dither_final/"
savefile="1603_initial.pkl"
dir = "/Users/mireland/data/rhea_subaru/160317/dither_initial/"
star_files = glob.glob(dir + "*.fits")
nstars = len(star_files)
lenslet_ims = np.empty( (nstars,3,3) )
xpos = np.empty( (nstars) )
ypos = np.empty( (nstars) )
rhea2_format = pymfe.rhea.Format(spect='subaru',mode='slit')
rhea2_extract = pymfe.Extractor(rhea2_format, transpose_data=True)
xx, wave, blaze = rhea2_format.spectral_format()
fluxes = []
for i in range(nstars):
star_data = pyfits.getdata(star_files[i])
star_data -= np.median(star_data[0:500,:])
hh = pyfits.getheader(star_files[i])
xpos[i] = hh['ZABERX']
ypos[i] = hh['ZABERY']
flux,var = rhea2_extract.one_d_extract(data=star_data.T, rnoise=20.0)
fluxes.append(flux)
lenslet_ims[i,:,:] = np.median(np.median(flux[12:20,:,:],axis=0),axis=0)[1:].reshape(3,3)
lenslet_ims[i,1,:] = lenslet_ims[i,1,::-1]
plt.imshow(lenslet_ims[i,:,:],interpolation='nearest', cmap=cm.gray)
pickle.dump((lenslet_ims,xpos,ypos), open(savefile, 'wb'))
plt.clf()
plt.scatter(xpos,ypos,s=100,c=np.sum(np.sum(lenslet_ims,2),1),cmap=cm.gist_heat)
| {
"repo_name": "mikeireland/pymfe",
"path": "rhea_subaru_superK.py",
"copies": "1",
"size": "2445",
"license": "mit",
"hash": -3280144521369125400,
"line_mean": 27.7647058824,
"line_max": 93,
"alpha_frac": 0.7141104294,
"autogenerated": false,
"ratio": 2.533678756476684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3747789185876684,
"avg_score": null,
"num_lines": null
} |
"""A script to generate a cloudbuild yaml."""
import os
import yaml
import argparse
# Add directories for new tests here.
DEP_TESTS = ['small_app', 'medium_app', 'large_app']
APP_SIZE_TESTS = {
'scratch_small': '5',
'scratch_medium': '500',
'scratch_large': '50000'
}
_DATA_DIR = '/workspace/ftl/python/benchmark/data/'
_PYTHON_BASE = 'gcr.io/google-appengine/python:latest'
parser = argparse.ArgumentParser(
description='Generate cloudbuild yaml for FTL benchmarking.')
parser.add_argument(
'--iterations',
action='store',
type=int,
default=5,
help='Number of times to build the image.')
parser.add_argument(
'--dep-test',
dest='dep_test',
action='store_true',
default=False,
help='Flag to enable to dependency test for the benchmark.')
parser.add_argument(
'--app-size-test',
dest='app_size_test',
action='store_true',
default=False,
help='Flag to enable the app size test for the benchmark.')
def main():
args = parser.parse_args()
if not (args.dep_test and args.app_size):
args.dep_test = True
args.app_size = True
cloudbuild_yaml = {
'steps': [
# We need to chmod in some cases for permissions.
{
'name': 'ubuntu',
'args': ['chmod', 'a+rx', '-R', '/workspace']
},
# Build the FTL image from source and load it into the daemon.
{
'name':
'gcr.io/cloud-builders/bazel@sha256:7360c36bded15db68a35cfb1740a994f0a09ad5ce378a97f96d698bc223e442a',
'args': [
'run', '//ftl/python/benchmark:python_benchmark_image',
'--', '--norun'
],
},
# Build the python builder par file
{
'name': 'gcr.io/cloud-builders/bazel@sha256:7360c36bded15db68a35cfb1740a994f0a09ad5ce378a97f96d698bc223e442a',
'args': ['build', 'ftl:python_builder.par']
},
]
}
# Generate a set of steps for each test and add them.
if args.dep_test:
for app_dir in DEP_TESTS:
cloudbuild_yaml['steps'] += dependency_test_step(
app_dir, args.iterations)
# Generate a set of steps for each test and add them.
if args.app_size_test:
for app_dir in APP_SIZE_TESTS:
cloudbuild_yaml['steps'] += app_size_test_step(
app_dir, args.iterations, APP_SIZE_TESTS[app_dir])
print yaml.dump(cloudbuild_yaml)
def dependency_test_step(app_dir, iterations):
name = 'gcr.io/ftl-node-test/benchmark_%s:latest' % app_dir
return [
# First build the image
{
'name':
'bazel/ftl/python/benchmark:python_benchmark_image',
'args': [
'--base', _PYTHON_BASE, '--name', name, '--directory',
os.path.join(_DATA_DIR + app_dir), '--description', app_dir,
'--iterations',
str(iterations)
]
}
]
def app_size_test_step(app_dir, iterations, gen_files):
name = 'gcr.io/ftl-node-test/benchmark_%s:latest' % app_dir
return [
# First build the image
{
'name':
'bazel/ftl/python/benchmark:python_benchmark_image',
'args': [
'--base', _PYTHON_BASE, '--name', name, '--directory',
os.path.join(_DATA_DIR + app_dir), '--description', app_dir,
'--iterations',
str(iterations), '--gen_files', gen_files
]
}
]
if __name__ == "__main__":
main()
| {
"repo_name": "GoogleCloudPlatform/runtimes-common",
"path": "ftl/benchmark/ftl_python_benchmark_yaml.py",
"copies": "3",
"size": "3660",
"license": "apache-2.0",
"hash": -4874140158534433000,
"line_mean": 29,
"line_max": 126,
"alpha_frac": 0.550273224,
"autogenerated": false,
"ratio": 3.616600790513834,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5666874014513834,
"avg_score": null,
"num_lines": null
} |
"""A script to generate a cloudbuild yaml."""
import os
import yaml
import util
# Add directories for new tests here.
TEST_DIRS = [
'destination_test', 'metadata_test', 'lock_test',
'empty_descriptor_test', 'no_descriptor_test',
'no_deps_test', 'additional_directory',
'gcp_build_test'
]
_ST_IMAGE = ('gcr.io/gcp-runtimes/structure-test:'
'6195641f5a5a14c63c7945262066270842150ddb')
_TEST_DIR = '/workspace/ftl/php/testdata'
_PHP_BASE = 'gcr.io/gae-runtimes/php72_app_builder:argo_current'
def main():
cloudbuild_yaml = util.INITIAL_CLOUDBUILD_YAML
cloudbuild_yaml['steps'].append(
# Build the FTL image from source and load it into the daemon.
{
'name': 'gcr.io/cloud-builders/bazel@sha256:7360c36bded15db68a35cfb1740a994f0a09ad5ce378a97f96d698bc223e442a',
'args': ['run', '//ftl:php_builder_image', '--', '--norun'],
'id': 'build-builder',
}, )
# Generate a set of steps for each test and add them.
test_map = {}
for test in TEST_DIRS:
test_map[test] = [
'--base', _PHP_BASE, '--name',
'gcr.io/ftl-node-test/%s-image:latest' % test, '--directory',
os.path.join(_TEST_DIR, test), '--no-cache'
]
test_map['destination_test'].extend(['--destination', '/alternative-app'])
test_map['metadata_test'].extend(['--entrypoint', '/bin/echo'])
test_map['metadata_test'].extend(['--exposed-ports', '8090,8091'])
test_map['additional_directory'].extend([
'--additional-directory',
'/workspace/ftl/php/testdata/additional_directory'
])
for test, args in test_map.iteritems():
cloudbuild_yaml['steps'] += util.run_test_steps(
'php_builder_image', 'gcr.io/ftl-node-test/%s-image:latest' % test,
os.path.join(_TEST_DIR, test), args)
print yaml.dump(cloudbuild_yaml)
if __name__ == "__main__":
main()
| {
"repo_name": "sharifelgamal/runtimes-common",
"path": "ftl/integration_tests/ftl_php_integration_tests_yaml.py",
"copies": "3",
"size": "1938",
"license": "apache-2.0",
"hash": -7273752538870537000,
"line_mean": 32.4137931034,
"line_max": 122,
"alpha_frac": 0.6150670795,
"autogenerated": false,
"ratio": 3.171849427168576,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00014017381553125876,
"num_lines": 58
} |
"""A script to generate a cloudbuild yaml."""
import os
import yaml
import util
# Add directories for new tests here.
TEST_DIRS = [
'gcp_build_test', 'packages_test', 'packages_lock_test',
'destination_test', 'metadata_test', 'npmrc_test', 'file_test',
'empty_descriptor_test', 'no_descriptor_test',
'no_deps_test', 'additional_directory', 'function_to_app_test'
]
_TEST_DIR = '/workspace/ftl/node/testdata'
_NODE_BASE = 'gcr.io/gae-runtimes/nodejs8_app_builder:argo_current'
def main():
cloudbuild_yaml = util.INITIAL_CLOUDBUILD_YAML
cloudbuild_yaml['steps'].append(
# Build the FTL image from source and load it into the daemon.
{
'name': 'gcr.io/cloud-builders/bazel',
'args': ['run', '//ftl:node_builder_image', '--', '--norun'],
'id': 'build-builder',
'waitFor': [cloudbuild_yaml['steps'][0]['id']],
}, )
# Generate a set of steps for each test and add them.
test_map = {}
for test in TEST_DIRS:
test_map[test] = [
'--base', _NODE_BASE, '--name',
'gcr.io/ftl-node-test/%s-image' % test, '--directory',
os.path.join(_TEST_DIR, test), '--no-cache'
]
test_map['destination_test'].extend(['--destination', '/alternative-app'])
test_map['metadata_test'].extend(['--entrypoint', '/bin/echo'])
test_map['metadata_test'].extend(['--exposed-ports', '8090,8091'])
test_map['additional_directory'].extend([
'--additional-directory',
'/workspace/ftl/node/testdata/additional_directory'
])
for test, args in test_map.iteritems():
cloudbuild_yaml['steps'] += util.run_test_steps(
'node_builder_image', 'gcr.io/ftl-node-test/%s-image' % test,
os.path.join(_TEST_DIR, test), args)
print yaml.dump(cloudbuild_yaml)
if __name__ == "__main__":
main()
| {
"repo_name": "priyawadhwa/runtimes-common",
"path": "ftl/integration_tests/ftl_node_integration_tests_yaml.py",
"copies": "1",
"size": "1889",
"license": "apache-2.0",
"hash": -5215474930423090000,
"line_mean": 32.1403508772,
"line_max": 78,
"alpha_frac": 0.5955532028,
"autogenerated": false,
"ratio": 3.3257042253521125,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9421257428152112,
"avg_score": 0,
"num_lines": 57
} |
"""A script to generate a cloudbuild yaml."""
import os
import yaml
import util
# Add directories for new tests here.
TEST_DIRS = [
'packages_test', 'metadata_test',
'python3_test', 'pipfile_test',
'venv_dir_test']
_ST_IMAGE = ('gcr.io/gcp-runtimes/structure-test:'
'6195641f5a5a14c63c7945262066270842150ddb')
_TEST_DIR = '/workspace/ftl/python/testdata'
_PYTHON_BASE = 'gcr.io/google-appengine/python:latest'
def main():
cloudbuild_yaml = util.INITIAL_CLOUDBUILD_YAML
cloudbuild_yaml['steps'].append(
# Build the FTL image from source and load it into the daemon.
{
'name': 'gcr.io/cloud-builders/bazel',
'args': ['run', '//ftl:python_builder_image', '--', '--norun'],
'id': 'build-builder',
}, )
# Generate a set of steps for each test and add them.
test_map = {}
for test in TEST_DIRS:
test_map[test] = [
'--base', _PYTHON_BASE, '--name',
'gcr.io/ftl-node-test/%s-image:latest' % test, '--directory',
os.path.join(_TEST_DIR, test), '--no-cache'
]
test_map['metadata_test'].extend(['--entrypoint', '/bin/echo'])
test_map['metadata_test'].extend(['--exposed-ports', '8090,8091'])
test_map['python3_test'].extend(['--python-cmd', 'python3.6'])
test_map['python3_test'].extend(['--pip-cmd', 'python3.6 -m pip'])
test_map['venv_dir_test'].extend(['--virtualenv-dir', '/alternate-env'])
for test, args in test_map.iteritems():
cloudbuild_yaml['steps'] += util.run_test_steps(
'python_builder_image',
'gcr.io/ftl-node-test/%s-image:latest' % test,
os.path.join(_TEST_DIR, test), args)
print yaml.dump(cloudbuild_yaml)
if __name__ == "__main__":
main()
| {
"repo_name": "nkubala/runtimes-common",
"path": "ftl/integration_tests/ftl_python_integration_tests_yaml.py",
"copies": "1",
"size": "1793",
"license": "apache-2.0",
"hash": 1595223677785717200,
"line_mean": 31.6,
"line_max": 76,
"alpha_frac": 0.5906302287,
"autogenerated": false,
"ratio": 3.2075134168157424,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4298143645515743,
"avg_score": null,
"num_lines": null
} |
"""A script to generate a list of products we are interested in found in job descriptions.
It takes a list of urls from infile (stdin by default) and returns a |-separated output
for each url. The script requires a file with techs we are searching for, the file is
specified by --techs-file option. The urls that led to exceptions are written to a separate
file, configured by --errors-file.
"""
import argparse
import logging
import multiprocessing as mp
import pathlib
import sys
import time
import threading
from urllib.parse import urlparse
from jobtechs.common import iter_good_lines
from jobtechs.fetcher import ThrottledFetcher
from jobtechs.parser import NETLOC_TO_PARSER_MAP, TermsExtractor, PageParser
G_LOG = logging.getLogger(__name__)
class TechsExtractionRunner:
"""Class containing the functionality of running the techs extraction process."""
# pylint: disable=no-self-use
def __init__(self, terms_path='techs.txt', errors_path='failed_urls.txt', save_pages_to=None):
self.save_pages_to = save_pages_to
self.terms_path = terms_path
self.errors_path = errors_path
self._q_out = self._q_err = None
self._init_queues()
self._fetchers = {}
self._init_fetchers()
self._writers = []
self._init_writers()
def make_terms_extractor(self, terms_path):
"""A factory method for instantiating a terms extractor."""
return TermsExtractor(terms_path)
def make_queue(self):
"""A factory method for the queue."""
return mp.Queue()
def _init_queues(self):
self._q_out = self.make_queue()
self._q_err = self.make_queue()
def _init_fetchers(self):
terms_extractor = self.make_terms_extractor(self.terms_path)
# add specific parsers for job aggregators
self._fetchers = {
netloc:
ThrottledFetcher(
parser=parser_cls(save_pages_to=self.save_pages_to),
terms_extractor=terms_extractor,
q_out=self._q_out, q_err=self._q_err,
max_workers=5)
for netloc, parser_cls in NETLOC_TO_PARSER_MAP.items()
}
# add general parser for other pages
generic_parser = PageParser(
save_pages_to=self.save_pages_to,
agg_parsers=[fetcher.parser for fetcher in self._fetchers.values()]
)
self._fetchers['default'] = \
ThrottledFetcher(
parser=generic_parser,
terms_extractor=terms_extractor,
q_out=self._q_out, q_err=self._q_err,
max_workers=5, max_rps=0)
for fetcher in self._fetchers.values():
fetcher.start()
def _write_results(self, q_out):
while True:
result = q_out.get()
if not result:
break
print(result)
def _write_errors(self, q_err, errors_path):
with open(errors_path, 'w') as errors_file:
while True:
result = q_err.get()
if not result:
break
print(*result, sep='\t', file=errors_file)
def _init_writers(self):
self._writers = [
threading.Thread(target=self._write_results, args=(self._q_out,)),
threading.Thread(target=self._write_errors, args=(self._q_err, self.errors_path)),
]
for writer in self._writers:
writer.start()
def run(self, infile):
"""Process urls from the infile.
The method can be run several times (for several files)."""
fetchers = self._fetchers
default_fetcher = fetchers['default']
for url in iter_good_lines(infile):
urlp = urlparse(url)
fetcher = fetchers.get(urlp.netloc, default_fetcher)
fetcher.q_in.put(url)
for fetcher in fetchers.values():
fetcher.q_in.join()
G_LOG.info('finished processing urls')
def close(self):
"""Release resources by sending messages to subprocesses and threads
that there is no more urls to process.
"""
# signal to fetchers
for fetcher in self._fetchers.values():
fetcher.q_in.put(None)
fetcher.q_in.join()
# signal to writers
self._q_out.put(None)
self._q_err.put(None)
G_LOG.info('poison pills sent to subprocesses and threads.')
@classmethod
def main2(cls):
"""Run the functionality of the script."""
parser = argparse.ArgumentParser(description=sys.modules[__name__].__doc__)
parser.add_argument(
'--techs-file', type=pathlib.Path, default='techs.txt',
help=('A file where the searched techs are listed: each tech on a separate line. '
'Defaults to techs.txt.'))
parser.add_argument(
'infile', nargs='*', type=argparse.FileType('r'), default=[sys.stdin],
help=('A file or a list of files with a list of urls. Each url is supposed '
'to contain a job description. Defaults to stdin.'))
parser.add_argument(
'--errors-file', type=pathlib.Path, default='failed_urls.txt',
help=('A tab-separated file to which we are going to dump urls requesting or parsing '
'of which resulted in an error. The error message is dumped after the url. '
'Defaults to failed_urls.txt.'))
parser.add_argument(
'--log-file', type=argparse.FileType('a'), default='extract_techs.log',
help='A file where we write logs to. Defaults to extract_techs.log.')
parser.add_argument(
'--save-pages-to',
help=('Save copies of the html into the specified directory. '
'By default html-files are not saved.'))
args = parser.parse_args()
if not args.techs_file.exists():
parser.error(
('The file with techs {} does not exist. '
'Use --techs-file option').format(args.techs_file.as_posix()))
try:
with args.errors_file.open('w'):
pass
except IOError:
parser.error('Can not open {} for writing.'.format(args.errors_path.as_posix()))
if args.save_pages_to:
save_pages_to = pathlib.Path(args.save_pages_to)
save_pages_to.mkdir(parents=True, exist_ok=True)
logging.basicConfig(level=logging.INFO, stream=args.log_file)
start = time.time()
runner = TechsExtractionRunner(
terms_path=args.techs_file.as_posix(),
errors_path=args.errors_file.as_posix(),
save_pages_to=args.save_pages_to)
for file_ in args.infile:
runner.run(file_)
runner.close()
end = time.time()
G_LOG.info('The execution of the script took {:0.3f}.'.format(end-start))
if __name__ == '__main__':
TechsExtractionRunner.main2()
| {
"repo_name": "newtover/process_jobs",
"path": "jobtechs/scripts/extract_techs.py",
"copies": "1",
"size": "7040",
"license": "mit",
"hash": 8928828892238553000,
"line_mean": 35.2886597938,
"line_max": 98,
"alpha_frac": 0.5909090909,
"autogenerated": false,
"ratio": 3.984153933220147,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5075063024120147,
"avg_score": null,
"num_lines": null
} |
"""A script to generate a metanetx database.
The purpose of the metanetx database is to provide mapping
from InChI keys to a number of database identifiers. This database
will then populate the website if there is an inchi match.
Running this script requires downloading the following from
https://www.metanetx.org/mnxdoc/mnxref.html
1. chem_xref.tsv
2. chem_prop.tsv
The data version used is based on the description in the header:
#Based on the following resources:
#
#RESOURCE: MetaNetX/MNXref
#VERSION: 4.1
#DATE: 2020/09/17
#URL: https://www.metanetx.org
"""
from pathlib import Path
import pandas as pd
from collections import defaultdict
import pymongo
pwd = Path(__file__)
pwd = pwd.parent
METANETX_PATH = (pwd / "../local_data/metanetx").resolve()
def get_cross_references(row):
current_reference = {}
if ":" in row["#source"]:
current_reference["source"] = row["#source"].split(":")[0]
current_reference["source_id"] = row["#source"].split(":")[1]
else:
current_reference["source"] = row["#source"]
current_reference["source_id"] = row["#source"]
current_reference["description"] = (
row["description"] if not pd.isna(row["description"])
else None
)
cross_ref_dict[row.ID].append(current_reference)
def get_db_entry(row):
dict_for_db[row["#ID"]] = {
"mnxm_id": row["#ID"],
"inchi_key": row.InChIKey,
"primary_reference": row.reference,
"cross_references": cross_ref_dict[row["#ID"]]
}
if __name__ == "__main__":
# First step: Generate panda dfs of the xref and props
skiprows = 347
chem_prop_df = pd.read_csv(
METANETX_PATH / "chem_prop.tsv",
delimiter="\t",
skiprows=skiprows
)
chem_prop_df = chem_prop_df[~chem_prop_df["InChIKey"].isna()]
chem_prop_df = chem_prop_df[~chem_prop_df["formula"].isna()]
chem_xref_df = pd.read_csv(
METANETX_PATH / "chem_xref.tsv",
delimiter="\t",
skiprows=skiprows
)
# Map functions on pandas dataframes to populate dictionaries
cross_ref_dict = defaultdict(list)
dict_for_db = dict()
chem_xref_df.apply(get_cross_references, axis=1)
chem_prop_df.apply(get_db_entry, axis=1)
print("Inserting into Mongo.")
mongo_uri = open(pwd / "../mongo_uri.csv").readline().strip("\n")
client = pymongo.MongoClient(mongo_uri)
client.compound_references.data.insert_many(dict_for_db.values(), ordered=False) | {
"repo_name": "JamesJeffryes/MINE-Database",
"path": "Scripts/generate_metanetx_database.py",
"copies": "1",
"size": "2515",
"license": "mit",
"hash": 749110798309199100,
"line_mean": 28.9523809524,
"line_max": 84,
"alpha_frac": 0.6429423459,
"autogenerated": false,
"ratio": 3.249354005167959,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43922963510679586,
"avg_score": null,
"num_lines": null
} |
"""A script to generate fake perfect reads from a reference. """
import argparse
import sys
import random
def read_reference(args):
with open(args.reference, 'r') as ref_fh:
reference_seq = ""
for i, line in enumerate(ref_fh):
if line.startswith('>'):
if i != 0:
raise ValueError("Silly you, you expect sillymap to accept more than one reference sequence?")
reference_id = line[1:].strip()
else:
reference_seq += line.strip()
return reference_id, reference_seq
def main(args):
reference_id, reference_seq = read_reference(args)
fastq_template = """@read{{0}}
{{1}}
+
{0}
""".format("A"*args.read_length)
with open(args.read_output_file, 'w') as ofh:
for i, start_pos in enumerate([random.randrange(0, len(reference_seq)-args.read_length, 1) for _ in range(args.nr_reads)]):
ofh.write(fastq_template.format(i+1, reference_seq[start_pos:start_pos+args.read_length]))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument("reference", help="The input reference fasta file")
parser.add_argument("read_output_file")
parser.add_argument("nr_reads", type=int, help="The number of reads to generate")
parser.add_argument("--read_length", type=int, default=25)
args = parser.parse_args()
main(args)
| {
"repo_name": "alneberg/sillymap",
"path": "scripts/generate_test_reads.py",
"copies": "1",
"size": "1422",
"license": "mit",
"hash": -7746032089655724000,
"line_mean": 34.55,
"line_max": 131,
"alpha_frac": 0.6279887482,
"autogenerated": false,
"ratio": 3.618320610687023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4746309358887023,
"avg_score": null,
"num_lines": null
} |
# A script to generate helper files for dynamic linking to the Python dll
#
decls = '''
void, Py_Initialize, (void)
int, PyRun_SimpleString, (char *)
void, Py_Finalize, (void)
char *, Py_GetPath, (void)
void, Py_SetPythonHome, (char *)
void, Py_SetProgramName, (char *)
PyObject *, PyMarshal_ReadObjectFromString, (char *, Py_ssize_t)
PyObject *, PyObject_CallFunction, (PyObject *, char *, ...)
int, PyString_AsStringAndSize, (PyObject *, char **, Py_ssize_t *)
char *, PyString_AsString, (PyObject *)
int, PyArg_ParseTuple, (PyObject *, char *, ...)
PyObject *, PyErr_Format, (PyObject *, const char *, ...)
PyObject *, PyImport_ImportModule, (char *)
PyObject *, PyInt_FromLong, (long)
long, PyInt_AsLong, (PyObject *)
PyObject *, PyLong_FromVoidPtr, (void *)
PyObject *, Py_InitModule4, (char *, PyMethodDef *, char *, PyObject *, int)
PyObject *, PyTuple_New, (Py_ssize_t)
int, PyTuple_SetItem, (PyObject*, Py_ssize_t, PyObject *)
int, Py_IsInitialized, (void)
int, PyObject_SetAttrString, (PyObject *, char *, PyObject *)
PyObject *, PyCFunction_NewEx, (PyMethodDef *, PyObject *, PyObject *)
PyObject *, PyObject_GetAttrString, (PyObject *, char *)
PyObject *, Py_BuildValue, (char *, ...)
PyObject *, PyObject_Call, (PyObject *, PyObject *, PyObject *)
void, PySys_WriteStderr, (const char *, ...)
PyObject *, PyErr_Occurred, (void)
void, PyErr_Clear, (void)
int, PyObject_IsInstance, (PyObject *, PyObject *)
PyObject, PyInt_Type
PyObject, _Py_NoneStruct
PyObject *, PyExc_ImportError
PyObject *, PyExc_Exception
char *, _Py_PackageContext
PyGILState_STATE, PyGILState_Ensure, (void)
void, PyGILState_Release, (PyGILState_STATE)
void, PySys_SetObject, (char *, PyObject *)
PyObject *, PySys_GetObject, (char *)
PyObject *, PyString_FromString, (char *)
int, Py_FdIsInteractive, (FILE *, char *)
int, PyRun_InteractiveLoop, (FILE *, char *)
void, PySys_SetArgv, (int, char **)
PyObject *, PyImport_AddModule, (char *)
PyObject *, PyModule_GetDict, (PyObject *)
Py_ssize_t, PySequence_Length, (PyObject *)
PyObject *, PySequence_GetItem, (PyObject *, Py_ssize_t)
//int, PyCode_Check, (PyObject *)
PyObject *, PyEval_EvalCode, (PyCodeObject *, PyObject *, PyObject *)
void, PyErr_Print, (void)
PyObject *, PyBool_FromLong, (long)
int, Py_VerboseFlag
int, Py_NoSiteFlag
int, Py_OptimizeFlag
int, Py_IgnoreEnvironmentFlag
PyObject *, PyObject_Str, (PyObject *)
PyObject *, PyList_New, (Py_ssize_t)
int, PyList_SetItem, (PyObject *, Py_ssize_t, PyObject *)
int, PyList_Append, (PyObject *, PyObject *)
PyObject *, PyThreadState_GetDict, (void)
int, PyObject_IsTrue, (PyObject *)
void, PyErr_SetString, (PyObject *, const char *)
void, PyEval_InitThreads, (void)
'''.strip().splitlines()
import string
hfile = open("import-tab.h", "w")
cfile = open("import-tab.c", "w")
index = 0
for decl in decls:
if not decl or decl.startswith("//"):
continue
items = decl.split(',', 2)
if len(items) == 3:
# exported function with argument list
restype, name, argtypes = map(string.strip, items)
print >> hfile, '#define %(name)s ((%(restype)s(*)%(argtypes)s)imports[%(index)d].proc)' % locals()
elif len(items) == 2:
# exported data
typ, name = map(string.strip, items)
print >> hfile, '#define %(name)s (*(%(typ)s(*))imports[%(index)s].proc)' % locals()
else:
raise ValueError, "could not parse %r" % decl
if name == "Py_InitModule4":
print >> cfile, '#ifdef _DEBUG'
print >> cfile, '\t{ "Py_InitModule4TraceRefs", NULL },' % locals()
print >> cfile, '#else'
print >> cfile, '# if defined (_WIN64)'
print >> cfile, '\t{ "Py_InitModule4_64", NULL },' % locals()
print >> cfile, '# else'
print >> cfile, '\t{ "Py_InitModule4", NULL },' % locals()
print >> cfile, '# endif'
print >> cfile, '#endif'
else:
print >> cfile, '\t{ "%(name)s", NULL },' % locals()
index += 1
hfile.close()
cfile.close()
| {
"repo_name": "sovaa/backdoorme",
"path": "backdoors/shell/pupy/client/sources/mktab.py",
"copies": "34",
"size": "3973",
"license": "mit",
"hash": -6375581162205862000,
"line_mean": 35.787037037,
"line_max": 107,
"alpha_frac": 0.6516486282,
"autogenerated": false,
"ratio": 3.211802748585287,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# A script to help doing the deliveries.
# The user is asked to provide a project ID, a run name, and an UPPMAX project
import sys, os, yaml, glob, shutil
from datetime import datetime
def fixProjName(pname):
newname = pname[0].upper()
postperiod = False
for i in range(1, len(pname)):
if pname[i] == ".":
newname += pname[i]
postperiod = True
elif postperiod:
newname += pname[i].upper()
postperiod = False
else:
newname += pname[i]
postperiod = False
return newname
if len(sys.argv) < 5:
print "USAGE: python " + sys.argv[0] + " <project ID> <run name> <UPPMAX project> <Dry run, y/n>"
sys.exit(0)
base_path = '/proj/a2010002/nobackup/illumina/'
base_yaml_path = '/proj/a2010002/archive/'
# base_yaml_path = '/bubo/home/h9/mikaelh/'
dry = True
projid = sys.argv[1].lower()
runname = sys.argv[2].strip("/")
abbr_runname = runname.split("_")[0] + "_" + runname.split("_")[3]
yamlfile = base_yaml_path + runname + "/run_info.yaml"
uppmaxproj = sys.argv[3]
# print "Project name: ", fixProjName(projid)
if sys.argv[4].lower() == "n": dry = False
projdata = yaml.load(open(yamlfile))
dt = datetime.now()
time_str = str(dt.year) + "_" + str(dt.month) + "_" + str(dt.day) + "_" + str(dt.hour) + "_" + str(dt.minute) + "_" + str(dt.second)
if not dry:
logfilename = "/bubo/home/h9/mikaelh/delivery_logs/" + time_str + ".log"
logfile = open(logfilename, "w")
print "Project to move files for:", projid
if not dry: logfile.write("Project to move files for:" + "\n" + fixProjName(projid) + "\n")
matching = set()
available = set()
for entry in projdata:
available.add(entry['description'].split(',')[-1].strip())
if entry['description'].split(',')[-1].strip().lower()==projid:
matching.add(entry['lane'])
elif entry.has_key('multiplex'):
for sample in entry['multiplex']:
if sample.has_key('sample_prj'):
available.add(sample['sample_prj'])
if sample['sample_prj'].split(',')[-1].strip().lower()==projid:
matching.add(entry['lane'])
if len(matching)==0:
print "No matching project found. Possibilities:"
for prid in sorted(available):
print prid
sys.exit(0)
elif dry:
print "I will move files from lanes " + ",".join(matching)
if not dry: logfile.flush()
# Create directory in user's INBOX
temp = runname.split('_')
start_date = temp[0]
flow_cell = temp[3][0] # A or B
created_proj_dir_name = fixProjName(projid)
created_run_dir_name = "20" + start_date + flow_cell + "_hiseq2000"
# "Old school" style
#del_path = '/proj/' + uppmaxproj + "/INBOX/" + created_proj_dir_name + "/" + created_run_dir_name
# New
del_path_top = '/proj/' + uppmaxproj + "/INBOX/" + created_proj_dir_name
print "Will create a top-level project directory", del_path_top
if not dry:
logfile.write("Creating top-level delivery directory:" + del_path_top + " (or leaving it in place if already present)\n")
if os.path.exists(del_path_top):
print "Directory", del_path_top, " already exists!"
else:
try:
os.mkdir(del_path_top)
except:
print "Could not create delivery directory!"
sys.exit(0)
del_path = del_path_top + "/" + abbr_runname
print "Will create a run directory", del_path
if not dry:
logfile.write("Creating run-level delivery directory:" + del_path + " (or leaving it in place if already present)\n")
if os.path.exists(del_path):
print "Directory", del_path, " already exists!"
else:
try:
os.mkdir(del_path)
except:
print "Could not create delivery directory!"
sys.exit(0)
# Start looking for the files to transfer
temp = runname.split('_')
dirs_to_process = []
for m in sorted(matching):
d = m + "_" + temp[0] + "_" + temp[3] + "_nophix"
dirs_to_process.append(d)
os.chdir(base_path + runname )
for d in dirs_to_process:
#dirpath = d + "_barcode/2_mismatch"
dirpath = d + "_barcode"
if not os.path.exists(dirpath):
print "Could not find directory", dirpath
print "Standing in ", os.getcwd()
sys.exit(0)
os.chdir(dirpath)
bcname = d + "_bc.metrics"
if not os.path.exists(bcname):
bcname = d + ".bc_metrics"
lane = dirpath[0]
print "LANE ", lane
if not dry: logfile.write("LANE " + lane + "\n")
# Print table of Illumina vs. bcbb barcodes
sample_id_and_idx = {}
lane_info = "none"
# The 'main_proj_for_lane' stuff is outdated since March 2012
# main_proj_for_lane = ''
for entry in projdata:
if entry['lane'] == lane:
lane_info = entry
# is_main_proj = True
# main_proj_for_lane = entry['description'].split(',')[-1].strip().lower()
# if main_proj_for_lane == projid:
# print projid, "is the main project for lane", lane
# else:
# print "This project is not the main project for lane ", lane, ". The main project is ", main_proj_for_lane
# is_main_proj = False
lane_sample = ''
if lane_info.has_key('multiplex'):
for bc in lane_info['multiplex']:
if bc.has_key('sample_prj'):
if bc['sample_prj'].split(',')[-1].strip().lower() == projid:
sample_id_and_idx[bc['barcode_id']] = bc['name']
# elif is_main_proj:
# sample_id_and_idx[bc['barcode_id']] = bc['name']
print "Pipeline index\tSampleName\t# matching sequences"
if not dry: logfile.write("Pipeline index\tIllumina index/sample ID\tMatches\n")
if os.path.exists(bcname):
for line in open(bcname):
[bcbb_bc, hits] = line.strip().split()
try:
if sample_id_and_idx.has_key(int(bcbb_bc)):
print bcbb_bc + "\t" + sample_id_and_idx[int(bcbb_bc)] + "\t" + hits
if not dry: logfile.write(bcbb_bc + "\t" + sample_id_and_idx[int(bcbb_bc)] + "\t" + hits + "\n")
except:
if bcbb_bc == "unmatched":
print bcbb_bc + "\t" + "N.A." + "\t" + hits
if not dry: logfile.write(bcbb_bc + "\t" + "N.A." + "\t" + hits + "\n")
else:
print "Encountered parsing error in barcode conversion: " + bcbb_bc
print sample_id_and_idx
sys.exit(0)
else:
print "BC metrics file", bcname, " not found"
sys.exit(0)
else:
print "Non-multiplexed lane"
print "Please type a sample name for this lane"
lane_sample = raw_input()
if not dry: logfile.write("Non-multiplexed lane\n")
# print os.listdir(".")
files_to_copy = []
for fastq_file in (glob.glob("*fastq.txt") + glob.glob("*fastq.txt.gz")):
# Skip if this is a non-compressed file and there exists a compressed version
if os.path.exists(fastq_file + ".gz"):
continue
ext = ""
if os.path.splitext(fastq_file)[1] == ".gz":
ext = os.path.splitext(fastq_file)[1]
if lane_info.has_key('multiplex'):
new_file_name = ''
if 'unmatched' in fastq_file: continue
# Extract barcode
[lane, date, run_id, nophix, bcbb_bc, pe_read, dummy] = fastq_file.split("_")
#[lane, date, run_id, bcbb_bc, pe_read, dummy] = fastq_file.split("_")
if sample_id_and_idx.has_key(int(bcbb_bc)):
customer_sample_id = sample_id_and_idx[int(bcbb_bc)]
new_file_name = lane + "_" + date + "_" + run_id + "_" + customer_sample_id.replace("/", "_") + "_" + pe_read + ".fastq" + ext
else:
[lane, date, run_id, nophix, name, pe_read,dummy] = fastq_file.split("_")
#[lane, date, run_id, name, pe_read,dummy] = fastq_file.split("_")
new_file_name = lane + "_" + date + "_" + run_id + "_" + lane_sample + "_" + pe_read + ".fastq" + ext
# print "Preparing to copy file", fastq_file, "as ", new_file_name
if new_file_name != '': files_to_copy.append([fastq_file, new_file_name])
for pair in files_to_copy:
source = os.getcwd() + "/" + pair[0]
dest = del_path + "/" + pair[1]
print "Will copy (rsync) ", source, "to ", dest
if not dry:
command_to_execute = 'rsync -ac ' + source + ' ' + dest
logfile.write("Executing command: " + command_to_execute + "\n")
logfile.flush()
os.system(command_to_execute)
os.chdir('..')
#os.chdir('../..')
if not dry:
os.chdir(del_path)
logfile.close()
os.system("chmod -R g+rw " + del_path)
| {
"repo_name": "SciLifeLab/scilifelab",
"path": "scripts/assisted_delivery.py",
"copies": "4",
"size": "8979",
"license": "mit",
"hash": -7215780009638414000,
"line_mean": 36.2572614108,
"line_max": 144,
"alpha_frac": 0.5528455285,
"autogenerated": false,
"ratio": 3.312061969752859,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009966617319253626,
"num_lines": 241
} |
"""A script to import a dumped pickle file from the pipeline and plot the flux as a function of zaber position to determine if the alignment was correct.
"""
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
import glob
import pdb
import pickle
import matplotlib.cm as cm
import scipy.interpolate as interpolate
f=open('all.pkl','rb')
wave,fluxes,flat_flux,arc_flux,lenslet_ims,xpos,ypos=pickle.load(f)
#lenslet_flat = np.median(np.median(flat_flux,axis=0),axis=0)[1:].reshape(3,3)
#flatnorm=lenslet_flat/(np.median(lenslet_flat))
#lenslet_ims_f=lenslet_ims / flatnorm
height=np.sum(np.sum(lenslet_ims,2),1)
#plt.scatter(xpos,ypos,c=np.sum(np.sum(lenslet_ims_f,2),1),s=40,cmap=cm.gist_heat)
#plt.title('flat')
#plt.figure()
#plt.scatter(xpos,ypos,c=np.sum(np.sum(lenslet_ims,2),1),s=40,cmap=cm.gist_heat)
#plt.show()
#Linearly interpolate over the values of the height over 500 positions to make the image clearer to the eye.
numIndexes = 500
xi = np.linspace(np.min(xpos), np.max(xpos),numIndexes)
yi = np.linspace(np.min(ypos), np.max(ypos),numIndexes)
XI, YI = np.meshgrid(xi, yi)
points = np.vstack((xpos,ypos)).T
values = np.asarray(height)
points = np.asarray(points)
#values = np.asarray(estimatedHeightList)
DEM = interpolate.griddata(points, values, (XI,YI), method='linear')
#Now plot all.
plt.imshow(DEM,cmap ='RdYlGn_r',origin='lower',extent=[np.min(xpos), np.max(xpos),np.min(ypos), np.max(ypos)] )
plt.colorbar()
plt.scatter(xpos,ypos,c=np.sum(np.sum(lenslet_ims,2),1),s=40,cmap=cm.gist_heat,alpha=0.25)
plt.title('Global flux using all fibers')
#Now do the same considering only the flux from single fibers
#This parameter has the median of the medians of the fluxes for fibers with flux on them for each frame.
ind_lenslet=np.zeros((fluxes.shape[0],fluxes.shape[3]))
for i in range(fluxes.shape[0]):
ind_lenslet[i]=np.median(np.median(fluxes[i,16:21,:,:],axis=1),axis=0)
#New figure, using subplots for each fiber, hopefully the locations are roughly correct.
plt.figure()
for i in range(9):
plt.subplot(3,3,i+1)
height=ind_lenslet[:,i]
numIndexes = 500
xi = np.linspace(np.min(xpos), np.max(xpos),numIndexes)
yi = np.linspace(np.min(ypos), np.max(ypos),numIndexes)
XI, YI = np.meshgrid(xi, yi)
points = np.vstack((xpos,ypos)).T
values = np.asarray(height)
points = np.asarray(points)
#values = np.asarray(estimatedHeightList)
DEM = interpolate.griddata(points, values, (XI,YI), method='linear')
plt.imshow(DEM,cmap ='RdYlGn_r',origin='lower',extent=[np.min(xpos), np.max(xpos),np.min(ypos), np.max(ypos)] )
plt.colorbar()
plt.title('Using only fiber'+str(i+1))
plt.show()
| {
"repo_name": "mikeireland/pymfe",
"path": "determine_IFU_alignment.py",
"copies": "1",
"size": "2723",
"license": "mit",
"hash": 4539091607042373000,
"line_mean": 32.2073170732,
"line_max": 153,
"alpha_frac": 0.7139184723,
"autogenerated": false,
"ratio": 2.6514118792599803,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3865330351559981,
"avg_score": null,
"num_lines": null
} |
# a script to import annotations saved in kw18 files to girder.
# assumes the name of the kw18 file matches the item name.
import time
from datetime import date
import girder_client
import girder as g
import sys
import os
import csv
import pdb
import csv
from pprint import pprint
def process_stack(gc, stack_id):
# A dictionary to keep track of the output stack annotaions (indexed by the annotations name).
stack_annotations = {}
resp = gc.get('item/%s'%stack_id)
folder_id = resp['folderId']
items = gc.get('item?folderId=%s&limit=5000&offset=0&sort=lowerName&sortdir=1'%folder_id)
stack = []
for item in items:
if 'largeImage' in item:
frame_id = item['_id']
idx = len(stack)
print("Processing frame %d"%idx)
stack.append(item)
# get all the annotations ids for this image.
resp = gc.get('annotation?itemId=%s&limit=50'%frame_id)
for annot_info in resp:
annot_id = annot_info['_id']
# Get the annotation elements.
annot_resp = gc.get('annotation/%s'%annot_id)
annot = annot_resp['annotation']
annot_name = annot['name']
elements = annot['elements']
# Set the z index of all points to contain the frame index.
for e in elements:
if e['type'] == 'rectangle':
e['center'][2] = idx
# Append the elements into the stack annotation
if annot_name in stack_annotations:
stack_annot = stack_annotations[annot_name]
stack_annot['elements'] = stack_annot['elements'] + elements
else:
stack_annotations[annot_name] = annot
# Now save the annotations in the stack item
for annot_name in stack_annotations.keys():
annot = stack_annotations[annot_name]
resp = gc.get("annotation?itemId=%s&name=%s" % (stack_id, annot_name))
if len(resp) > 0:
resp = gc.put("annotation/%s"%resp[0]['_id'], parameters={"itemId":stack_id}, json=annot)
print("Updating annotation: %s" % resp["_id"])
else:
resp = gc.post("annotation", parameters={"itemId":stack_id}, json=annot)
print("New annotation: %s" % resp["_id"])
if __name__ == '__main__':
keys = {'lemon':'', \
'images': ''}
urls = {'lemon':'http://lemon/api/v1', \
'images': 'https://images.slide-atlas.org/api/v1'}
server_name = 'images'
gc = girder_client.GirderClient(apiUrl=urls[server_name])
gc.authenticate('law12019', apiKey=keys[server_name])
fish_folder_id = '5b68667670aaa94f2e5bd976'
resp = gc.get('folder?parentType=folder&parentId=%s&limit=50&sort=lowerName&sortdir=1'%fish_folder_id)
for folder in resp:
# Get the stack id
resp = gc.get('item?folderId=%s&name=.stack'%folder['_id'])
stack_id = resp[0]['_id']
name = folder['name']
print('folder %s'%name)
if name == 'ehu':
continue
"""
if name == 'Gindai':
continue
if name == 'grouper1':
continue
if name == 'grouper2':
continue
if name == 'grouper3':
continue
if name == 'kalekale1':
continue
if name == 'kalekale2':
continue
if name == 'lehi':
continue
if name == 'lehi1':
continue
if name == 'lehi2':
continue
if name == 'onaga1':
continue
if name == 'onaga2':
continue
if name == 'onaga3':
continue
if name == 'paka':
continue
"""
#stack_id = "5b68afba70aaa94f2e5c5a07"
process_stack(gc, stack_id)
| {
"repo_name": "law12019/deep_learning",
"path": "scripts/copyAnnotationsToStack.py",
"copies": "1",
"size": "3966",
"license": "apache-2.0",
"hash": -3948363280870622000,
"line_mean": 32.3277310924,
"line_max": 106,
"alpha_frac": 0.5350479072,
"autogenerated": false,
"ratio": 3.8844270323212537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9889486513153221,
"avg_score": 0.005997685273606632,
"num_lines": 119
} |
# A script to migrate old keen analytics to a new collection, generate in-between points for choppy
# data, or a little of both
import os
import csv
import copy
import pytz
import logging
import argparse
import datetime
from dateutil.parser import parse
from keen.client import KeenClient
from website.settings import KEEN as keen_settings
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
VERY_LONG_TIMEFRAME = 'this_20_years'
def parse_args():
parser = argparse.ArgumentParser(
description='Enter a start date and end date to gather, smooth, and send back analytics for keen'
)
parser.add_argument('-s', '--start', dest='start_date')
parser.add_argument('-e', '--end', dest='end_date')
parser.add_argument('-t', '--transfer', dest='transfer_collection', action='store_true')
parser.add_argument('-sc', '--source', dest='source_collection')
parser.add_argument('-dc', '--destination', dest='destination_collection')
parser.add_argument('-sm', '--smooth', dest='smooth_events', action='store_true')
parser.add_argument('-o', '--old', dest='old_analytics', action='store_true')
parser.add_argument('-d', '--dry', dest='dry', action='store_true')
parser.add_argument('-r', '--reverse', dest='reverse', action='store_true')
parser.add_argument('-re', '--removeevent', dest="remove_event")
parsed = parser.parse_args()
validate_args(parsed)
return parsed
def validate_args(args):
""" Go through supplied command line args an determine if you have enough to continue
:param args: argparse args object, to sift through and figure out if you need more info
:return: None, just raise errors if it finds something wrong
"""
if args.dry:
logger.info('Running analytics on DRY RUN mode! No data will actually be sent to Keen.')
potential_operations = [args.smooth_events, args.transfer_collection, args.old_analytics]
if len([arg for arg in potential_operations if arg]) > 1:
raise ValueError('You may only choose one analytic type to run: transfer, smooth, or import old analytics.')
if args.smooth_events and not (args.start_date and args.end_date):
raise ValueError('To smooth data, please enter both a start date and end date.')
if args.start_date and args.end_date:
if parse(args.start_date) > parse(args.end_date):
raise ValueError('Please enter an end date that is after the start date.')
if args.smooth_events and not args.source_collection:
raise ValueError('Please specify a source collection to smooth data from.')
if args.transfer_collection and not (args.source_collection and args.destination_collection):
raise ValueError('To transfer between keen collections, enter both a source and a destination collection.')
if any([args.start_date, args.end_date]) and not all([args.start_date, args.end_date]):
raise ValueError('You must provide both a start and an end date if you provide either.')
if args.remove_event and not args.source_collection:
raise ValueError('You must provide both a source collection to remove an event from.')
def fill_in_event_gaps(collection_name, events):
""" A method to help fill in gaps between events that might be far apart,
so that one event happens per day.
:param collection_name: keen collection events are from
:param events: events to fill in gaps between
:return: list of "generated and estimated" events to send that will fill in gaps.
"""
given_days = [parse(event['keen']['timestamp']).date() for event in events if not event.get('generated')]
given_days.sort()
date_chunks = [given_days[x-1:x+1] for x in range(1, len(given_days))]
events_to_add = []
if given_days:
if collection_name == 'addon_snapshot':
all_providers = list(set([event['provider']['name'] for event in events]))
for provider in all_providers:
for date_pair in date_chunks:
if date_pair[1] - date_pair[0] > datetime.timedelta(1) and date_pair[0] != date_pair[1]:
first_event = [
event for event in events if date_from_event_ts(event) == date_pair[0] and event['provider']['name'] == provider and not event.get('generated')
]
if first_event:
events_to_add += generate_events_between_events(date_pair, first_event[0])
elif collection_name == 'institution_summary':
all_instutitions = list(set([event['institution']['name'] for event in events]))
for institution in all_instutitions:
for date_pair in date_chunks:
if date_pair[1] - date_pair[0] > datetime.timedelta(1) and date_pair[0] != date_pair[1]:
first_event = [
event for event in events if date_from_event_ts(event) == date_pair[0] and event['institution']['name'] == institution and not event.get('generated')
]
if first_event:
events_to_add += generate_events_between_events(date_pair, first_event[0])
else:
for date_pair in date_chunks:
if date_pair[1] - date_pair[0] > datetime.timedelta(1) and date_pair[0] != date_pair[1]:
first_event = [event for event in events if date_from_event_ts(event) == date_pair[0] and not event.get('generated')]
if first_event:
events_to_add += generate_events_between_events(date_pair, first_event[0])
logger.info('Generated {} events to add to the {} collection.'.format(len(events_to_add), collection_name))
else:
logger.info('Could not retrieve events for the date range you provided.')
return events_to_add
def date_from_event_ts(event):
return parse(event['keen']['timestamp']).date()
def generate_events_between_events(given_days, first_event):
first_day = given_days[0]
last_day = given_days[-1]
next_day = first_day + datetime.timedelta(1)
first_event['keen'].pop('created_at')
first_event['keen'].pop('id')
first_event['generated'] = True # Add value to tag generated data
generated_events = []
while next_day < last_day:
new_event = copy.deepcopy(first_event)
new_event['keen']['timestamp'] = datetime.datetime(next_day.year, next_day.month, next_day.day).replace(tzinfo=pytz.UTC).isoformat()
if next_day not in given_days:
generated_events.append(new_event)
next_day += datetime.timedelta(1)
if generated_events:
logger.info('Generated {} events for the interval {} to {}'.format(
len(generated_events),
given_days[0].isoformat(),
given_days[1].isoformat()
)
)
return generated_events
def get_keen_client():
keen_project = keen_settings['private'].get('project_id')
read_key = keen_settings['private'].get('read_key')
master_key = keen_settings['private'].get('master_key')
write_key = keen_settings['private'].get('write_key')
if keen_project and read_key and master_key:
client = KeenClient(
project_id=keen_project,
read_key=read_key,
master_key=master_key,
write_key=write_key
)
else:
raise ValueError('Cannot connect to Keen clients - all keys not provided.')
return client
def extract_events_from_keen(client, event_collection, start_date=None, end_date=None):
""" Get analytics from keen to use as a starting point for smoothing or transferring
:param client: keen client to use for connection
:param start_date: datetime object, datetime to start gathering from keen
:param end_date: datetime object, datetime to stop gathering from keen
:param event_collection: str, name of the event collection to gather from
:return: a list of keen events to use in other methods
"""
timeframe = VERY_LONG_TIMEFRAME
if start_date and end_date:
logger.info('Gathering events from the {} collection between {} and {}'.format(event_collection, start_date, end_date))
timeframe = {"start": start_date.isoformat(), "end": end_date.isoformat()}
else:
logger.info('Gathering events from the {} collection using timeframe {}'.format(event_collection, VERY_LONG_TIMEFRAME))
return client.extraction(event_collection, timeframe=timeframe)
def make_sure_keen_schemas_match(source_collection, destination_collection, keen_client):
""" Helper function to check if two given collections have matching schemas in keen, to make sure
they can be transfered between one another
:param source_collection: str, collection that events are stored now
:param destination_collection: str, collection to transfer to
:param keen_client: KeenClient, instantiated for the connection
:return: bool, if the two schemas match in keen
"""
source_schema = keen_client.get_collection(source_collection)
destination_schema = keen_client.get_collection(destination_collection)
return source_schema == destination_schema
def transfer_events_to_another_collection(client, source_collection, destination_collection, dry, reverse=False):
""" Transfer analytics from source collection to the destination collection.
Will only work if the source and destination have the same schemas attached, will error if they don't
:param client: KeenClient, client to use to make connection to keen
:param source_collection: str, keen collection to transfer from
:param destination_collection: str, keen collection to transfer to
:param dry: bool, whether or not to make a dry run, aka actually send events to keen
:return: None
"""
schemas_match = make_sure_keen_schemas_match(source_collection, destination_collection, client)
if not schemas_match:
raise ValueError('The two provided schemas in keen do not match, you will need to do a bit more work.')
events_from_source = extract_events_from_keen(client, source_collection)
for event in events_from_source:
event['keen'].pop('created_at')
event['keen'].pop('id')
if reverse:
remove_events_from_keen(client, destination_collection, events_from_source, dry)
else:
add_events_to_keen(client, destination_collection, events_from_source, dry)
logger.info(
'Transferred {} events from the {} collection to the {} collection'.format(
len(events_from_source),
source_collection,
destination_collection
)
)
def add_events_to_keen(client, collection, events, dry):
logger.info('Adding {} events to the {} collection...'.format(len(events), collection))
if not dry:
client.add_events({collection: events})
def smooth_events_in_keen(client, source_collection, start_date, end_date, dry, reverse):
base_events = extract_events_from_keen(client, source_collection, start_date, end_date)
events_to_fill_in = fill_in_event_gaps(source_collection, base_events)
if reverse:
remove_events_from_keen(client, source_collection, events_to_fill_in, dry)
else:
add_events_to_keen(client, source_collection, events_to_fill_in, dry)
def remove_events_from_keen(client, source_collection, events, dry):
for event in events:
filters = [{'property_name': 'keen.timestamp', 'operator': 'eq', 'property_value': event['keen']['timestamp']}]
# test to see if you get back the correct events from keen
filtered_event = client.extraction(source_collection, filters=filters)
if filtered_event:
filtered_event = filtered_event[0]
filtered_event['keen'].pop('id')
filtered_event['keen'].pop('created_at')
filtered_event['keen']['timestamp'] = filtered_event['keen']['timestamp'][:10] # ends of timestamps differ
event['keen']['timestamp'] = event['keen']['timestamp'][:10]
if event != filtered_event:
logger.error('Filtered event not equal to the event you have gathered, not removing...')
else:
logger.info('About to delete a generated event from the {} collection from the date {}'.format(
source_collection, event['keen']['timestamp']
))
if not dry:
client.delete_events(source_collection, filters=filters)
else:
logger.info('No filtered event found.')
def import_old_events_from_spreadsheet():
home = os.path.expanduser("~")
spreadsheet_path = home + '/daily_user_counts.csv'
key_map = {
'active-users': 'active',
'logs-gte-11-total': 'depth',
'number_users': 'total_users', # really is active - number_users
'number_projects': 'projects.total',
'number_projects_public': 'projects.public',
'number_projects_registered': 'registrations.total',
'Date': 'timestamp',
'dropbox-users-enabled': 'enabled',
'dropbox-users-authorized': 'authorized',
'dropbox-users-linked': 'linked',
'profile-edits': 'profile_edited'
}
with open(spreadsheet_path) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
col_names = reader.next()
dictReader = csv.DictReader(open(spreadsheet_path, 'rb'), fieldnames=col_names, delimiter=',')
events = []
for row in dictReader:
event = {}
for key in row:
equiv_key = key_map.get(key, None)
if equiv_key:
event[equiv_key] = row[key]
events.append(event)
user_summary_cols = ['active', 'depth', 'total_users', 'timestamp', 'profile_edited']
node_summary_cols = ['registrations.total', 'projects.total', 'projects.public', 'timestamp']
addon_summary_cols = ['enabled', 'authorized', 'linked', 'timestamp']
user_events = []
node_events = []
addon_events = []
for event in events[3:]: # The first few rows have blank and/or bad data because they're extra headers
node_event = {}
user_event = {}
addon_event = {}
for key, value in event.iteritems():
if key in node_summary_cols:
node_event[key] = value
if key in user_summary_cols:
user_event[key] = value
if key in addon_summary_cols:
addon_event[key] = value
formatted_user_event = format_event(user_event, analytics_type='user')
formatted_node_event = format_event(node_event, analytics_type='node')
formatted_addon_event = format_event(addon_event, analytics_type='addon')
if formatted_node_event:
node_events.append(formatted_node_event)
if formatted_user_event:
user_events.append(formatted_user_event)
if formatted_addon_event:
addon_events.append(formatted_addon_event)
logger.info(
'Gathered {} old user events, {} old node events and {} old dropbox addon events for keen'.format(
len(user_events),
len(node_events),
len(addon_events)
)
)
return {'user_summary': user_events, 'node_summary': node_events, 'addon_snapshot': addon_events}
def comma_int(value):
if value and value != 'MISSING':
return int(value.replace(',', ''))
def format_event(event, analytics_type):
user_event_template = {
"status": {},
"keen": {}
}
node_event_template = {
"projects": {},
"registered_projects": {},
"keen": {}
}
addon_event_template = {
"keen": {},
"users": {}
}
template_to_use = None
if analytics_type == 'user':
template_to_use = user_event_template
if event['active'] and event['active'] != 'MISSING':
template_to_use['status']['active'] = comma_int(event['active'])
if event['total_users'] and event['active']:
template_to_use['status']['unconfirmed'] = comma_int(event['total_users']) - comma_int(event['active'])
if event['profile_edited']:
template_to_use['status']['profile_edited'] = comma_int(event['profile_edited'])
elif analytics_type == 'node':
template_to_use = node_event_template
if event['projects.total']:
template_to_use['projects']['total'] = comma_int(event['projects.total'])
if event['projects.public']:
template_to_use['projects']['public'] = comma_int(event['projects.public'])
if event['registrations.total']:
template_to_use['registered_projects']['total'] = comma_int(event['registrations.total'])
if event['projects.total'] and event['projects.public']:
template_to_use['projects']['private'] = template_to_use['projects']['total'] - template_to_use['projects']['public']
elif analytics_type == 'addon':
template_to_use = addon_event_template
if event['enabled']:
template_to_use['users']['enabled'] = comma_int(event['enabled'])
if event['authorized']:
template_to_use['users']['authorized'] = comma_int(event['authorized'])
if event['linked']:
template_to_use['users']['linked'] = comma_int(event['linked'])
if event['authorized'] or event['enabled'] or event['linked']:
template_to_use["provider"] = {"name": "dropbox"}
template_to_use['keen']['timestamp'] = parse(event['timestamp']).replace(hour=12, tzinfo=pytz.UTC).isoformat()
template_to_use['imported'] = True
formatted_event = {key: value for key, value in template_to_use.items() if value}
if len(formatted_event.items()) > 2: # if there's more than just the auto-added timestamp for keen
return template_to_use
def remove_event_from_keen(client, source_collection, event_id):
filters = [{'property_name': 'keen.id', 'operator': 'eq', 'property_value': event_id}]
client.delete_events(source_collection, filters=filters)
def parse_and_send_old_events_to_keen(client, dry, reverse):
old_events = import_old_events_from_spreadsheet()
for key, value in old_events.iteritems():
if reverse:
remove_events_from_keen(client, key, value, dry)
else:
add_events_to_keen(client, key, value, dry)
def main():
""" Main function for moving around and adjusting analytics gotten from keen and sending them back to keen.
Usage:
* Transfer all events from the 'institution_analytics' to the 'institution_summary' collection:
`python -m scripts.analytics.migrate_analytics -d -t -sc institution_analytics -dc institution_summary`
* Fill in the gaps in analytics for the 'addon_snapshot' collection between 2016-11-01 and 2016-11-15:
`python -m scripts.analytics.migrate_analytics -d -sm -sc addon_snapshot -s 2016-11-01 -e 2016-11-15`
* Reverse the above action by adding -r:
`python -m scripts.analytics.migrate_analytics -d -sm -sc addon_snapshot -s 2016-11-01 -e 2016-11-15 -r`
* Parse old analytics from the old analytics CSV stored on your filesystem:
`python -m scripts.analytics.migrate_analytics -o -d`
"""
args = parse_args()
client = get_keen_client()
dry = args.dry
reverse = args.reverse
if args.remove_event:
remove_event_from_keen(client, args.source_collection, args.remove_event)
if args.smooth_events:
smooth_events_in_keen(client, args.source_collection, parse(args.start_date), parse(args.end_date), dry, reverse)
elif args.transfer_collection:
transfer_events_to_another_collection(client, args.source_collection, args.destination_collection, dry, reverse)
elif args.old_analytics:
parse_and_send_old_events_to_keen(client, dry, reverse)
if __name__ == '__main__':
main()
| {
"repo_name": "icereval/osf.io",
"path": "scripts/analytics/migrate_analytics.py",
"copies": "9",
"size": "20115",
"license": "apache-2.0",
"hash": -8601566881216215000,
"line_mean": 41.6165254237,
"line_max": 177,
"alpha_frac": 0.641163311,
"autogenerated": false,
"ratio": 3.931014266171585,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0022062666138305104,
"num_lines": 472
} |
# A script to migrate old keen analytics to a new collection, generate in-between points for choppy
# data, or a little of both
import os
import csv
import copy
import time
import pytz
import logging
import argparse
import datetime
from dateutil.parser import parse
from keen.client import KeenClient
from website.settings import KEEN as keen_settings
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
VERY_LONG_TIMEFRAME = 'this_20_years'
def parse_args():
parser = argparse.ArgumentParser(
description='Enter a start date and end date to gather, smooth, and send back analytics for keen'
)
parser.add_argument('-s', '--start', dest='start_date')
parser.add_argument('-e', '--end', dest='end_date')
parser.add_argument('-t', '--transfer', dest='transfer_collection', action='store_true')
parser.add_argument('-sc', '--source', dest='source_collection')
parser.add_argument('-dc', '--destination', dest='destination_collection')
parser.add_argument('-sm', '--smooth', dest='smooth_events', action='store_true')
parser.add_argument('-o', '--old', dest='old_analytics', action='store_true')
parser.add_argument('-d', '--dry', dest='dry', action='store_true')
parser.add_argument('-r', '--reverse', dest='reverse', action='store_true')
parser.add_argument('-re', '--removeevent', dest="remove_event")
parsed = parser.parse_args()
validate_args(parsed)
return parsed
def validate_args(args):
""" Go through supplied command line args an determine if you have enough to continue
:param args: argparse args object, to sift through and figure out if you need more info
:return: None, just raise errors if it finds something wrong
"""
if args.dry:
logger.info('Running analytics on DRY RUN mode! No data will actually be sent to Keen.')
potential_operations = [args.smooth_events, args.transfer_collection, args.old_analytics]
if len([arg for arg in potential_operations if arg]) > 1:
raise ValueError('You may only choose one analytic type to run: transfer, smooth, or import old analytics.')
if args.smooth_events and not (args.start_date and args.end_date):
raise ValueError('To smooth data, please enter both a start date and end date.')
if args.start_date and args.end_date:
if parse(args.start_date) > parse(args.end_date):
raise ValueError('Please enter an end date that is after the start date.')
if args.smooth_events and not args.source_collection:
raise ValueError('Please specify a source collection to smooth data from.')
if args.transfer_collection and not (args.source_collection and args.destination_collection):
raise ValueError('To transfer between keen collections, enter both a source and a destination collection.')
if any([args.start_date, args.end_date]) and not all([args.start_date, args.end_date]):
raise ValueError('You must provide both a start and an end date if you provide either.')
if args.remove_event and not args.source_collection:
raise ValueError('You must provide both a source collection to remove an event from.')
def fill_in_event_gaps(collection_name, events):
""" A method to help fill in gaps between events that might be far apart,
so that one event happens per day.
:param collection_name: keen collection events are from
:param events: events to fill in gaps between
:return: list of "generated and estimated" events to send that will fill in gaps.
"""
given_days = [parse(event['keen']['timestamp']).date() for event in events if not event.get('generated')]
given_days.sort()
date_chunks = [given_days[x-1:x+1] for x in range(1, len(given_days))]
events_to_add = []
if given_days:
if collection_name == 'addon_snapshot':
all_providers = list(set([event['provider']['name'] for event in events]))
for provider in all_providers:
for date_pair in date_chunks:
if date_pair[1] - date_pair[0] > datetime.timedelta(1) and date_pair[0] != date_pair[1]:
first_event = [
event for event in events if date_from_event_ts(event) == date_pair[0] and event['provider']['name'] == provider and not event.get('generated')
]
if first_event:
events_to_add += generate_events_between_events(date_pair, first_event[0])
elif collection_name == 'institution_summary':
all_instutitions = list(set([event['institution']['name'] for event in events]))
for institution in all_instutitions:
for date_pair in date_chunks:
if date_pair[1] - date_pair[0] > datetime.timedelta(1) and date_pair[0] != date_pair[1]:
first_event = [
event for event in events if date_from_event_ts(event) == date_pair[0] and event['institution']['name'] == institution and not event.get('generated')
]
if first_event:
events_to_add += generate_events_between_events(date_pair, first_event[0])
else:
for date_pair in date_chunks:
if date_pair[1] - date_pair[0] > datetime.timedelta(1) and date_pair[0] != date_pair[1]:
first_event = [event for event in events if date_from_event_ts(event) == date_pair[0] and not event.get('generated')]
if first_event:
events_to_add += generate_events_between_events(date_pair, first_event[0])
logger.info('Generated {} events to add to the {} collection.'.format(len(events_to_add), collection_name))
else:
logger.info('Could not retrieve events for the date range you provided.')
return events_to_add
def date_from_event_ts(event):
return parse(event['keen']['timestamp']).date()
def generate_events_between_events(given_days, first_event):
first_day = given_days[0]
last_day = given_days[-1]
next_day = first_day + datetime.timedelta(1)
first_event['keen'].pop('created_at')
first_event['keen'].pop('id')
first_event['generated'] = True # Add value to tag generated data
generated_events = []
while next_day < last_day:
new_event = copy.deepcopy(first_event)
new_event['keen']['timestamp'] = datetime.datetime(next_day.year, next_day.month, next_day.day).replace(tzinfo=pytz.UTC).isoformat()
if next_day not in given_days:
generated_events.append(new_event)
next_day += datetime.timedelta(1)
if generated_events:
logger.info('Generated {} events for the interval {} to {}'.format(
len(generated_events),
given_days[0].isoformat(),
given_days[1].isoformat()
)
)
return generated_events
def get_keen_client():
keen_project = keen_settings['private'].get('project_id')
read_key = keen_settings['private'].get('read_key')
master_key = keen_settings['private'].get('master_key')
write_key = keen_settings['private'].get('write_key')
if keen_project and read_key and master_key:
client = KeenClient(
project_id=keen_project,
read_key=read_key,
master_key=master_key,
write_key=write_key
)
else:
raise ValueError('Cannot connect to Keen clients - all keys not provided.')
return client
def extract_events_from_keen(client, event_collection, start_date=None, end_date=None):
""" Get analytics from keen to use as a starting point for smoothing or transferring
:param client: keen client to use for connection
:param start_date: datetime object, datetime to start gathering from keen
:param end_date: datetime object, datetime to stop gathering from keen
:param event_collection: str, name of the event collection to gather from
:return: a list of keen events to use in other methods
"""
timeframe = VERY_LONG_TIMEFRAME
if start_date and end_date:
logger.info('Gathering events from the {} collection between {} and {}'.format(event_collection, start_date, end_date))
timeframe = {"start": start_date.isoformat(), "end": end_date.isoformat()}
else:
logger.info('Gathering events from the {} collection using timeframe {}'.format(event_collection, VERY_LONG_TIMEFRAME))
return client.extraction(event_collection, timeframe=timeframe)
def make_sure_keen_schemas_match(source_collection, destination_collection, keen_client):
""" Helper function to check if two given collections have matching schemas in keen, to make sure
they can be transfered between one another
:param source_collection: str, collection that events are stored now
:param destination_collection: str, collection to transfer to
:param keen_client: KeenClient, instantiated for the connection
:return: bool, if the two schemas match in keen
"""
source_schema = keen_client.get_collection(source_collection)
destination_schema = keen_client.get_collection(destination_collection)
return source_schema == destination_schema
def transfer_events_to_another_collection(client, source_collection, destination_collection, dry, reverse=False):
""" Transfer analytics from source collection to the destination collection.
Will only work if the source and destination have the same schemas attached, will error if they don't
:param client: KeenClient, client to use to make connection to keen
:param source_collection: str, keen collection to transfer from
:param destination_collection: str, keen collection to transfer to
:param dry: bool, whether or not to make a dry run, aka actually send events to keen
:return: None
"""
schemas_match = make_sure_keen_schemas_match(source_collection, destination_collection, client)
if not schemas_match:
raise ValueError('The two provided schemas in keen do not match, you will need to do a bit more work.')
events_from_source = extract_events_from_keen(client, source_collection)
for event in events_from_source:
event['keen'].pop('created_at')
event['keen'].pop('id')
if reverse:
remove_events_from_keen(client, destination_collection, events_from_source, dry)
else:
add_events_to_keen(client, destination_collection, events_from_source, dry)
logger.info(
'Transferred {} events from the {} collection to the {} collection'.format(
len(events_from_source),
source_collection,
destination_collection
)
)
def add_events_to_keen(client, collection, events, dry):
logger.info('Adding {} events to the {} collection...'.format(len(events), collection))
if not dry:
client.add_events({collection: events})
def smooth_events_in_keen(client, source_collection, start_date, end_date, dry, reverse):
base_events = extract_events_from_keen(client, source_collection, start_date, end_date)
events_to_fill_in = fill_in_event_gaps(source_collection, base_events)
if reverse:
remove_events_from_keen(client, source_collection, events_to_fill_in, dry)
else:
add_events_to_keen(client, source_collection, events_to_fill_in, dry)
def remove_events_from_keen(client, source_collection, events, dry):
for event in events:
filters = [{'property_name': 'keen.timestamp', 'operator': 'eq', 'property_value': event['keen']['timestamp']}]
# test to see if you get back the correct events from keen
filtered_event = client.extraction(source_collection, filters=filters)
if filtered_event:
filtered_event = filtered_event[0]
filtered_event['keen'].pop('id')
filtered_event['keen'].pop('created_at')
filtered_event['keen']['timestamp'] = filtered_event['keen']['timestamp'][:10] # ends of timestamps differ
event['keen']['timestamp'] = event['keen']['timestamp'][:10]
if event != filtered_event:
logger.error('Filtered event not equal to the event you have gathered, not removing...')
else:
logger.info('About to delete a generated event from the {} collection from the date {}'.format(
source_collection, event['keen']['timestamp']
))
if not dry:
client.delete_events(source_collection, filters=filters)
else:
logger.info('No filtered event found.')
def import_old_events_from_spreadsheet():
home = os.path.expanduser("~")
spreadsheet_path = home + '/daily_user_counts.csv'
key_map = {
'active-users': 'active',
'logs-gte-11-total': 'depth',
'number_users': 'total_users', # really is active - number_users
'number_projects': 'projects.total',
'number_projects_public': 'projects.public',
'number_projects_registered': 'registrations.total',
'Date': 'timestamp',
'dropbox-users-enabled': 'enabled',
'dropbox-users-authorized': 'authorized',
'dropbox-users-linked': 'linked',
'profile-edits': 'profile_edited'
}
with open(spreadsheet_path) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
col_names = reader.next()
dictReader = csv.DictReader(open(spreadsheet_path, 'rb'), fieldnames=col_names, delimiter=',')
events = []
for row in dictReader:
event = {}
for key in row:
equiv_key = key_map.get(key, None)
if equiv_key:
event[equiv_key] = row[key]
events.append(event)
user_summary_cols = ['active', 'depth', 'total_users', 'timestamp', 'profile_edited']
node_summary_cols = ['registrations.total', 'projects.total', 'projects.public', 'timestamp']
addon_summary_cols = ['enabled', 'authorized', 'linked', 'timestamp']
user_events = []
node_events = []
addon_events = []
for event in events[3:]: # The first few rows have blank and/or bad data because they're extra headers
node_event = {}
user_event = {}
addon_event = {}
for key, value in event.iteritems():
if key in node_summary_cols:
node_event[key] = value
if key in user_summary_cols:
user_event[key] = value
if key in addon_summary_cols:
addon_event[key] = value
formatted_user_event = format_event(user_event, analytics_type='user')
formatted_node_event = format_event(node_event, analytics_type='node')
formatted_addon_event = format_event(addon_event, analytics_type='addon')
if formatted_node_event:
node_events.append(formatted_node_event)
if formatted_user_event:
user_events.append(formatted_user_event)
if formatted_addon_event:
addon_events.append(formatted_addon_event)
logger.info(
'Gathered {} old user events, {} old node events and {} old dropbox addon events for keen'.format(
len(user_events),
len(node_events),
len(addon_events)
)
)
return {'user_summary': user_events, 'node_summary': node_events, 'addon_snapshot': addon_events}
def comma_int(value):
if value and value != 'MISSING':
return int(value.replace(',', ''))
def format_event(event, analytics_type):
user_event_template = {
"status": {},
"keen": {}
}
node_event_template = {
"projects": {},
"registered_projects": {},
"keen": {}
}
addon_event_template = {
"keen": {},
"users": {}
}
template_to_use = None
if analytics_type == 'user':
template_to_use = user_event_template
if event['active'] and event['active'] != 'MISSING':
template_to_use['status']['active'] = comma_int(event['active'])
if event['total_users'] and event['active']:
template_to_use['status']['unconfirmed'] = comma_int(event['total_users']) - comma_int(event['active'])
if event['profile_edited']:
template_to_use['status']['profile_edited'] = comma_int(event['profile_edited'])
elif analytics_type == 'node':
template_to_use = node_event_template
if event['projects.total']:
template_to_use['projects']['total'] = comma_int(event['projects.total'])
if event['projects.public']:
template_to_use['projects']['public'] = comma_int(event['projects.public'])
if event['registrations.total']:
template_to_use['registered_projects']['total'] = comma_int(event['registrations.total'])
if event['projects.total'] and event['projects.public']:
template_to_use['projects']['private'] = template_to_use['projects']['total'] - template_to_use['projects']['public']
elif analytics_type == 'addon':
template_to_use = addon_event_template
if event['enabled']:
template_to_use['users']['enabled'] = comma_int(event['enabled'])
if event['authorized']:
template_to_use['users']['authorized'] = comma_int(event['authorized'])
if event['linked']:
template_to_use['users']['linked'] = comma_int(event['linked'])
if event['authorized'] or event['enabled'] or event['linked']:
template_to_use["provider"] = {"name": "dropbox"}
template_to_use['keen']['timestamp'] = parse(event['timestamp']).replace(hour=12, tzinfo=pytz.UTC).isoformat()
template_to_use['imported'] = True
formatted_event = {key: value for key, value in template_to_use.items() if value}
if len(formatted_event.items()) > 2: # if there's more than just the auto-added timestamp for keen
return template_to_use
def remove_event_from_keen(client, source_collection, event_id):
filters = [{'property_name': 'keen.id', 'operator': 'eq', 'property_value': event_id}]
client.delete_events(source_collection, filters=filters)
def parse_and_send_old_events_to_keen(client, dry, reverse):
old_events = import_old_events_from_spreadsheet()
for key, value in old_events.iteritems():
if reverse:
remove_events_from_keen(client, key, value, dry)
else:
add_events_to_keen(client, key, value, dry)
def main():
""" Main function for moving around and adjusting analytics gotten from keen and sending them back to keen.
Usage:
* Transfer all events from the 'institution_analytics' to the 'institution_summary' collection:
`python -m scripts.analytics.migrate_analytics -d -t -sc institution_analytics -dc institution_summary`
* Fill in the gaps in analytics for the 'addon_snapshot' collection between 2016-11-01 and 2016-11-15:
`python -m scripts.analytics.migrate_analytics -d -sm -sc addon_snapshot -s 2016-11-01 -e 2016-11-15`
* Reverse the above action by adding -r:
`python -m scripts.analytics.migrate_analytics -d -sm -sc addon_snapshot -s 2016-11-01 -e 2016-11-15 -r`
* Parse old analytics from the old analytics CSV stored on your filesystem:
`python -m scripts.analytics.migrate_analytics -o -d`
"""
args = parse_args()
client = get_keen_client()
dry = args.dry
reverse = args.reverse
if args.remove_event:
remove_event_from_keen(client, args.source_collection, args.remove_event)
if args.smooth_events:
smooth_events_in_keen(client, args.source_collection, parse(args.start_date), parse(args.end_date), dry, reverse)
elif args.transfer_collection:
transfer_events_to_another_collection(client, args.source_collection, args.destination_collection, dry, reverse)
elif args.old_analytics:
parse_and_send_old_events_to_keen(client, dry, reverse)
if __name__ == '__main__':
main()
| {
"repo_name": "hmoco/osf.io",
"path": "scripts/analytics/migrate_analytics.py",
"copies": "9",
"size": "20127",
"license": "apache-2.0",
"hash": -7665635534734873000,
"line_mean": 41.5517970402,
"line_max": 177,
"alpha_frac": 0.6412778854,
"autogenerated": false,
"ratio": 3.9310546875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.90723325729,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.