text string | size int64 | token_count int64 |
|---|---|---|
# Copyright 2014 Google Inc. All Rights Reserved.
"""Commands for reading and manipulating HTTP health checks."""
from googlecloudsdk.calliope import base
class HttpHealthChecks(base.Group):
"""Read and manipulate HTTP health checks for load balanced instances."""
HttpHealthChecks.detailed_help = {
'brief': ('Read and manipulate HTTP health checks for load balanced '
'instances')
}
| 410 | 116 |
##!/usr/bin/python
#
# ============================================================================
#
# 06.02.18 <-- Date of Last Modification.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ----------------------------------------------------------------------------
#
# BASIC TASK WRAPPER
#
# Command-line: N/A
#
# Copyright (C) Eugene Krissinel, Andrey Lebedev 2017-2018
#
# ============================================================================
#
# Function list:
#
# TaskDriver::
# report_page_id ()
# setReportWidget ( widgetId )
# resetReportPage ()
# log_page_id ()
# err_page_id ()
# file_stdout_path ()
# file_stderr_path ()
# file_stdin_path ()
# reportDir ()
# outputDir ()
# inputDir ()
# reportDocumentName()
# refmac_section ()
# refmac_report ()
# importDir ()
# import_summary_id ()
# python native imports
import os
import sys
import shutil
import traceback
# ccp4-python imports
import pyrvapi
import pyrvapi_ext.parsers
# pycofe imports
from pycofe.dtypes import dtype_template, dtype_xyz, dtype_structure, databox
from pycofe.dtypes import dtype_ensemble, dtype_hkl, dtype_ligand
from pycofe.dtypes import dtype_sequence
from pycofe.proc import edmap, import_merged
from pycofe.varut import signal, jsonut, command
# ============================================================================
# driver class
class TaskDriver(object):
# ========================================================================
# common definitions
rvrow = 0 # current report row
_rvrow_bak = 0 # saved report row
_report_widget_id = "report_page"
_scriptNo = 0 # input script counter
def report_page_id (self): return self._report_widget_id
def setReportWidget ( self,widgetId,row=0 ):
self._rvrow_bak = self.rvrow
self.rvrow = row
self._report_widget_id = widgetId
return self._rvrow_bak
def resetReportPage ( self,row=-1):
rvrow = self.rvrow
if row<0:
self.rvrow = self._rvrow_bak
else:
self.rvrow = row
self._report_widget_id = "report_page"
return rvrow
def log_page_id (self): return "log_page"
def err_page_id (self): return "err_page"
def traceback_page_id (self): return "python_exception"
def file_stdout_path (self): return "_stdout.log" # reserved name, used in NC
def file_stderr_path (self): return "_stderr.log"
def file_stdin_path (self): return "_stdin." + str(self._scriptNo) + ".script"
def reportDir (self): return "report" # in current directory ( job_dir )
def outputDir (self): return "output" # in current directory ( job_dir )
def inputDir (self): return "input" # in current directory ( job_dir )
def reportDocumentName(self): return "rvapi_document"
def refmac_section (self): return "refmac_section"
def refmac_report (self): return "refmac_report"
def importDir (self): return "uploads" # in current directory ( job_dir )
def import_summary_id (self): return "import_summary_id" # summary table id
# ========================================================================
# class variables
exeType = None
job_dir = None
job_id = None
# create output data list structure
outputDataBox = databox.DataBox()
# standard output file handlers
file_stdout = None
file_stderr = None
file_stdin = None
# task and input data
task = None
input_data = None
outputFName = ""
# report parsers
log_parser = None
generic_parser_summary = {}
# data register counter
dataSerialNo = 0
summary_row = 0 # current row in import summary table
summary_row_0 = 0 # reference row in import summary table
widget_no = 0 # widget Id unificator
navTreeId = "" # navigation tree Id
# ========================================================================
# cofe config
# This needs to be obtained from the jscofe config-file.
# maintainerEmail = None
maintainerEmail = "my.name@gmail.com"
# ========================================================================
# initiaisation
def __init__ ( self,title_str,module_name,options={}, args=None ):
#
# options = { // all optional
# report_page : { show : True, name : "Report" },
# log_page : { show : True, name : "Log file" },
# err_page : { show : True, name : "Errors" },
# nav_tree : { id : treeId, name : "Workflow" }
# // will do nav tree instead of tabs if given
# }
# args = optional replacement for sys.argv to allow this class to be
# called from within other Python programs (such as tests)
#
def getOption(name1,name2,default):
try:
return options[name1][name2]
except:
return default
# clear signal file; this is mostly for command-line debugging, the signal
# should be cleared in JS layer before this script is invoked
signal.CofeSignal.clear()
# get command line arguments and change to job directory; keep all file names
# relative to job directory, this is a must
if args is None:
args = sys.argv[1:]
self.exeType = args[0]
self.job_dir = args[1]
self.job_id = args[2]
# set scratch area if necessary
if self.exeType=="SGE" and "TMP" in os.environ:
os.environ["CCP4_SCR"] = os.environ["TMP"]
if "CCP4_SCR" in os.environ:
os.environ["TMPDIR"] = os.environ["CCP4_SCR"]
# always make job directory current
os.chdir ( self.job_dir )
# initialise execution logs
self.file_stdout = open ( self.file_stdout_path(),'w' )
self.file_stderr = open ( self.file_stderr_path(),'w' )
# fetch task data
self.task = jsonut.readjObject ( "job.meta" )
if self.task is None:
self.file_stdout.write ( "\n\n *** task read failed in '" + module_name + "'\n\n" )
self.file_stderr.write ( "\n\n *** task read failed in '" + module_name + "'\n\n" )
print " task read failed in '" + module_name + "'"
raise signal.TaskReadFailure()
self.input_data = databox.readDataBox ( self.inputDir() )
if self.task.uoname:
self.outputFName = self.task.uoname
else:
self.outputFName = self.task.oname
# print title in standard logs
if title_str:
tstr = title_str
else:
tstr = self.task.title
self.file_stdout.write ( "[" + self.job_id.zfill(4) + "] " + tstr.upper() + "\n\n" )
self.file_stderr.write ( " " )
# initialise HTML report document; note that we use absolute path for
# the report directory, which is necessary for passing RVAPI document
# to applications via creation of the rvapi_document file with
# pyrvapi.rvapi_store_document2(..)
# Make a tree or tab layout
if "nav_tree" in options:
pyrvapi.rvapi_init_document (
"jscofe_report", # document Id
os.path.join(os.getcwd(),self.reportDir()), # report directory (reserved)
"Title", # title (immaterial)
1, # HTML report to be produced
0, # Report will have tabs
"jsrview", # where to look for js support (reserved)
None,None,
"task.tsk",
"i2.xml" )
self.navTreeId = options["nav_tree"]["id"]
pyrvapi.rvapi_add_tree_widget (
self.navTreeId, # tree widget reference (Id)
options["nav_tree"]["name"], # tree widget title
"body", # reference to widget holder (grid)
0, # holder row
0, # holder column
1, # row span
1 # column span
)
pyrvapi.rvapi_set_tab_proxy ( self.navTreeId,"" )
else:
pyrvapi.rvapi_init_document (
"jscofe_report", # document Id
os.path.join(os.getcwd(),self.reportDir()), # report directory (reserved)
"Title", # title (immaterial)
1, # HTML report to be produced
4, # Report will have tabs
"jsrview", # where to look for js support (reserved)
None,None,
"task.tsk",
"i2.xml" )
self.rvrow = 0;
focus = True
if getOption("report_page","show",True):
pyrvapi.rvapi_add_tab ( self.report_page_id(),
getOption("report_page","name","Report"),focus )
self.putTitle ( tstr )
focus = False
if getOption("log_page","show",True):
if self.navTreeId:
pyrvapi.rvapi_set_tab_proxy ( self.navTreeId,self.report_page_id() )
pyrvapi.rvapi_add_tab ( self.log_page_id(),
getOption("log_page","name","Log file"),focus )
pyrvapi.rvapi_append_content ( os.path.join("..",self.file_stdout_path()+'?capsize'),
True,self.log_page_id() )
focus = False
if getOption("err_page","show",True):
if self.navTreeId:
pyrvapi.rvapi_set_tab_proxy ( self.navTreeId,self.report_page_id() )
pyrvapi.rvapi_add_tab ( self.err_page_id(),
getOption("err_page","name","Errors"),focus )
pyrvapi.rvapi_append_content ( os.path.join("..",self.file_stderr_path()),
True,self.err_page_id() )
if self.navTreeId:
pyrvapi.rvapi_set_tab_proxy ( self.navTreeId,"" )
pyrvapi.rvapi_flush()
return
# ============================================================================
def getOFName ( self,extention,modifier=-1 ):
if modifier >= 0:
return self.outputFName + "." + str(modifier).zfill(3) + extention
else:
return self.outputFName + extention
def getCIFOFName ( self,modifier=-1 ):
return self.getOFName ( ".cif",modifier )
def getXYZOFName ( self,modifier=-1 ):
return self.getOFName ( ".pdb",modifier )
def getMTZOFName ( self,modifier=-1 ):
return self.getOFName ( ".mtz",modifier )
def getMapOFName ( self,modifier=-1 ):
return self.getOFName ( ".map",modifier )
def getDMapOFName ( self,modifier=-1 ):
return self.getOFName ( ".diff.map",modifier )
# ============================================================================
def getWidgetId ( self,wid ):
widgetId = wid + "_" + str(self.widget_no)
self.widget_no += 1
return widgetId
# ============================================================================
def checkPDB(self):
if "PDB_DIR" not in os.environ:
pyrvapi.rvapi_set_text (
"<b>Error: jsCoFE is not configured to work with PDB archive.</b><p>" + \
"Please look for support.",
self.report_page_id(),self.rvrow,0,1,1 )
self.fail ( " *** Error: jsCofe is not configured to work with PDB archive \n" + \
" Please look for support\n","No PDB configured" )
return
# ============================================================================
def insertReportTab ( self,title_str,focus=True ):
pyrvapi.rvapi_insert_tab ( self.report_page_id(),title_str,
self.log_page_id(),focus )
self.rvrow = 0;
self.putTitle ( title_str )
pyrvapi.rvapi_flush ()
return
def putMessage ( self,message_str ):
pyrvapi.rvapi_set_text ( message_str,self.report_page_id(),self.rvrow,0,1,1 )
self.rvrow += 1
return
def putMessage1 ( self,pageId,message_str,row,colSpan=1 ):
pyrvapi.rvapi_set_text ( message_str,pageId,row,0,1,colSpan )
return
def putMessageLF ( self,message_str ):
pyrvapi.rvapi_set_text ( "<font style='font-size:120%;'>" + message_str +
"</font>",self.report_page_id(),self.rvrow,0,1,1 )
self.rvrow += 1
return
def putWaitMessageLF ( self,message_str ):
gridId = "wait_message_" + str(self.widget_no)
pyrvapi.rvapi_add_grid ( gridId,False,self.report_page_id(),self.rvrow,0,1,1 )
pyrvapi.rvapi_set_text ( "<font style='font-size:120%;'>" + message_str +
"</font>",gridId,0,0,1,1 )
pyrvapi.rvapi_set_text ( "<div class='activity_bar'/>",gridId,0,1,1,1 )
self.widget_no += 1
return
def putTitle ( self,title_str ):
if self.rvrow>0:
pyrvapi.rvapi_set_text ( " ",self.report_page_id(),self.rvrow,0,1,1 )
self.rvrow += 1
self.putTitle1 ( self.report_page_id(),title_str,self.rvrow,1 )
self.rvrow += 1
return
def insertTab ( self,tabId,tabName,content,focus=False ):
pyrvapi.rvapi_insert_tab ( tabId,tabName,self.log_page_id(),focus )
if content:
pyrvapi.rvapi_append_content ( content,True,tabId )
return
def flush(self):
pyrvapi.rvapi_flush()
return
def putTitle1 ( self,pageId,title_str,row,colSpan=1 ):
pyrvapi.rvapi_set_text (
"<h2>[" + self.job_id.zfill(4) + "] " + title_str + "</h2>",
pageId,row,0,1,colSpan )
return
# ============================================================================
def putPanel ( self,panel_id ):
pyrvapi.rvapi_add_panel ( panel_id,self.report_page_id(),self.rvrow,0,1,1 )
self.rvrow += 1
return
def putFieldset ( self,fset_id,title ):
pyrvapi.rvapi_add_fieldset ( fset_id,title,self.report_page_id(),self.rvrow,0,1,1 )
self.rvrow += 1
return
def putSection ( self,sec_id,sectionName,openState_bool=False ):
pyrvapi.rvapi_add_section ( sec_id,sectionName,self.report_page_id(),
self.rvrow,0,1,1,openState_bool )
self.rvrow += 1
return
# ============================================================================
# define basic HTML report functions
def putSummaryLine ( self,line0,line1,line2 ):
if self.import_summary_id():
pyrvapi.rvapi_put_table_string ( self.import_summary_id(),line0,self.summary_row,0 )
pyrvapi.rvapi_put_table_string ( self.import_summary_id(),line1,self.summary_row,1 )
pyrvapi.rvapi_put_table_string ( self.import_summary_id(),line2,self.summary_row,2 )
self.summary_row_0 = self.summary_row
self.summary_row += 1
return
def addSummaryLine ( self,line1,line2 ):
if self.import_summary_id():
pyrvapi.rvapi_put_table_string ( self.import_summary_id(),line1,self.summary_row,0 )
pyrvapi.rvapi_put_table_string ( self.import_summary_id(),line2,self.summary_row,1 )
self.summary_row += 1
pyrvapi.rvapi_shape_table_cell ( self.import_summary_id(),self.summary_row_0,0,"","","",
self.summary_row-self.summary_row_0,1 );
return
def putSummaryLine_red ( self,line0,line1,line2 ):
if self.import_summary_id():
pyrvapi.rvapi_put_table_string ( self.import_summary_id(),line0,self.summary_row,0 )
pyrvapi.rvapi_put_table_string ( self.import_summary_id(),line1,self.summary_row,1 )
pyrvapi.rvapi_put_table_string ( self.import_summary_id(),line2,self.summary_row,2 )
pyrvapi.rvapi_shape_table_cell ( self.import_summary_id(),self.summary_row,0,"",
"text-align:left;color:maroon;","",1,1 )
pyrvapi.rvapi_shape_table_cell ( self.import_summary_id(),self.summary_row,1,"",
"text-align:left;color:maroon;","",1,1 )
pyrvapi.rvapi_shape_table_cell ( self.import_summary_id(),self.summary_row,2,"",
"text-align:left;color:maroon;","",1,1 )
self.summary_row += 1
return
def putTable ( self,tableId,title_str,holderId,row,mode=100 ):
pyrvapi.rvapi_add_table ( tableId,title_str,holderId,row,0,1,1, mode )
pyrvapi.rvapi_set_table_style ( tableId,
"table-blue","text-align:left;" )
return
def setTableHorzHeaders ( self,tableId,header_list,tooltip_list ):
for i in range(len(header_list)):
pyrvapi.rvapi_put_horz_theader ( tableId,header_list[i],
tooltip_list[i],i )
return
def putTableLine ( self,tableId,header,tooltip,line,row ):
pyrvapi.rvapi_put_vert_theader ( tableId,header,tooltip,row )
if line:
pyrvapi.rvapi_put_table_string ( tableId,line,row,0 )
pyrvapi.rvapi_shape_table_cell ( tableId,row,0,"",
"text-align:left;width:100%;white-space:nowrap;" + \
"font-family:\"Courier\";text-decoration:none;" + \
"font-weight:normal;font-style:normal;width:auto;",
"",1,1 );
return row+1
# ============================================================================
def open_stdin ( self ):
self.file_stdin = open ( self.file_stdin_path(),"w" )
return
def write_stdin ( self,S ):
self.file_stdin.write ( S )
return
def close_stdin ( self ):
self.file_stdin.close()
return
# ============================================================================
def writeKWParameter ( self,item ):
if item.visible:
if (item.type == "integer" or item.type == "real"):
self.file_stdin.write ( item.keyword + " " + str(item.value) + "\n" )
elif (item.type == "integer_" or item.type == "real_") and (item.value != ""):
self.file_stdin.write ( item.keyword + " " + str(item.value) + "\n" )
elif (item.type == "combobox"):
self.file_stdin.write ( item.keyword + " " + item.value + "\n" )
elif (item.type == "checkbox"):
if item.value:
self.file_stdin.write ( item.keyword + " " + item.translate[1] + "\n" )
else:
self.file_stdin.write ( item.keyword + " " + item.translate[0] + "\n" )
return
def putKWParameter ( self,item ):
if item.visible:
if item.type=="checkbox":
if item.value:
return item.keyword + "\n"
else:
return ""
else:
return item.keyword + " " + str(item.value) + "\n"
else:
return ""
def getKWParameter ( self,keyword,item ):
if item.visible:
if item.type=="checkbox":
if item.value:
return " " + keyword
else:
return ""
else:
v = str(item.value)
if v:
if keyword.endswith("=") or keyword.endswith("::"):
return " " + keyword + v
else:
return " " + keyword + " " + v
else:
return ""
def getKWItem ( self,item ):
if item.visible:
if item.type=="checkbox":
if hasattr(item,'translate'):
if item.value:
return " " + item.keyword + str(item.translate[1])
else:
return " " + item.keyword + str(item.translate[0])
elif item.value:
return " " + item.keyword
else:
return ""
else:
v = str(item.value)
if v and v!="_blank_":
if item.keyword.endswith("=") or item.keyword.endswith("::"):
return " " + item.keyword + v
else:
return " " + item.keyword + " " + v
return ""
def getParameter ( self,item,checkVisible=True ):
if item.visible or not checkVisible:
return str(item.value)
return ""
"""
if (item.type == "integer" or item.type == "real"):
return str(item.value)
elif (item.type == "integer_" or item.type == "real_") and (item.value != ""):
return str(item.value)
else:
return str(item.value)
return ""
"""
# ============================================================================
def makeClass ( self,dict ):
return databox.make_class ( dict )
# ============================================================================
def unsetLogParser ( self ):
self.file_stdout.flush()
self.log_parser = None
pyrvapi.rvapi_flush()
#self.file_stdout = open ( self.file_stdout_path(),'a' )
return
def setGenericLogParser ( self,panel_id,split_sections_bool,graphTables=False ):
self.putPanel ( panel_id )
#self.generic_parser_summary = {}
self.log_parser = pyrvapi_ext.parsers.generic_parser (
panel_id,split_sections_bool,
summary=self.generic_parser_summary,
graph_tables=graphTables )
pyrvapi.rvapi_flush()
return
def setMolrepLogParser ( self,panel_id ):
self.putPanel ( panel_id )
self.log_parser = pyrvapi_ext.parsers.molrep_parser ( panel_id )
pyrvapi.rvapi_flush()
return
# ============================================================================
def stampFileName ( self,serNo,fileName ):
return dtype_template.makeFileName ( self.job_id,serNo,fileName )
def makeDataId ( self,serNo ):
return dtype_template.makeDataId ( self.job_id,serNo )
# ============================================================================
def storeReportDocument(self,meta_str):
if meta_str:
pyrvapi.rvapi_put_meta ( meta_str )
pyrvapi.rvapi_store_document2 ( self.reportDocumentName() )
return
def restoreReportDocument(self):
pyrvapi.rvapi_restore_document2 ( self.reportDocumentName() )
return pyrvapi.rvapi_get_meta()
# ============================================================================
def makeFullASUSequenceFile ( self,seq_list,title,fileName ):
combseq = ""
for s in seq_list:
seqstring = self.makeClass(s).getSequence ( self.inputDir() )
for i in range(s.ncopies):
combseq += seqstring
dtype_sequence.writeSeqFile ( fileName,title,combseq )
return
# ============================================================================
def runApp ( self,appName,cmd,quitOnError=True ):
input_script = None
if self.file_stdin:
input_script = self.file_stdin_path()
self._scriptNo += 1
rc = command.call ( appName,cmd,"./",input_script,
self.file_stdout,self.file_stderr,self.log_parser )
self.file_stdin = None
if rc.msg and quitOnError:
raise signal.JobFailure ( rc.msg )
return rc
# ============================================================================
def calcEDMap ( self,xyzPath,hklData,libPath,filePrefix ):
edmap.calcEDMap ( xyzPath,os.path.join(self.inputDir(),hklData.files[0]),
libPath,hklData.dataset,filePrefix,self.job_dir,
self.file_stdout,self.file_stderr,self.log_parser )
return [ filePrefix + edmap.file_pdb (),
filePrefix + edmap.file_mtz (),
filePrefix + edmap.file_map (),
filePrefix + edmap.file_dmap() ]
def calcAnomEDMap ( self,xyzPath,hklData,anom_form,filePrefix ):
edmap.calcAnomEDMap ( xyzPath,os.path.join(self.inputDir(),hklData.files[0]),
hklData.dataset,anom_form,filePrefix,self.job_dir,
self.file_stdout,self.file_stderr,self.log_parser )
return [ filePrefix + edmap.file_pdb(),
filePrefix + edmap.file_mtz(),
filePrefix + edmap.file_map(),
filePrefix + edmap.file_dmap() ]
def calcCCP4Maps ( self,mtzPath,filePrefix,source_key="refmac" ):
edmap.calcCCP4Maps ( mtzPath,filePrefix,self.job_dir,
self.file_stdout,self.file_stderr,
source_key,self.log_parser )
return [ filePrefix + edmap.file_map(),
filePrefix + edmap.file_dmap() ]
def finaliseStructure ( self,xyzPath,name_pattern,hkl,libPath,associated_data_list,
subtype,openState_bool=False,
title="Output Structure" ):
# subtype = 0: copy subtype from associated data
# = 1: set MR subtype
# = 2: set EP subtype
structure = None
if os.path.isfile(xyzPath):
sec_id = self.refmac_section() + "_" + str(self.widget_no)
self.putSection ( sec_id,"Electron Density Calculations with Refmac",
openState_bool )
panel_id = self.refmac_report() + "_" + str(self.widget_no)
pyrvapi.rvapi_add_panel ( panel_id,sec_id,0,0,1,1 )
#self.log_parser = pyrvapi_ext.parsers.generic_parser ( panel_id,False )
self.log_parser = pyrvapi_ext.parsers.generic_parser (
panel_id,False,
summary=self.generic_parser_summary,
graph_tables=False )
fnames = self.calcEDMap ( xyzPath,hkl,libPath,name_pattern )
# Register output data. This moves needful files into output directory
# and puts the corresponding metadata into output databox
structure = self.registerStructure (
fnames[0],fnames[1],fnames[2],fnames[3],libPath )
if structure:
structure.addDataAssociation ( hkl.dataId )
structure.setRefmacLabels ( hkl )
for i in range(len(associated_data_list)):
if associated_data_list[i]:
structure.addDataAssociation ( associated_data_list[i].dataId )
if subtype==0:
for i in range(len(associated_data_list)):
if associated_data_list[i]:
structure.copySubtype ( associated_data_list[i] )
elif subtype==1:
structure.addMRSubtype()
else:
structure.addEPSubtype()
structure.addXYZSubtype()
if title!="":
self.putTitle ( title )
self.putMessage ( " " )
self.putStructureWidget ( "structure_btn_",
"Structure and electron density",
structure )
else:
self.putTitle ( "No Solution Found" )
self.widget_no += 1
return structure
def finaliseAnomSubstructure ( self,xyzPath,name_pattern,hkl,
associated_data_list,
anom_form,openState_bool=False,
title="" ):
anom_structure = self.finaliseAnomSubstructure1 ( xyzPath,name_pattern,
hkl,associated_data_list,anom_form,
self.report_page_id(),self.rvrow,
openState_bool,title )
self.rvrow += 2
if anom_structure:
self.rvrow += 1
if title:
self.rvrow += 1
return anom_structure
def finaliseAnomSubstructure1 ( self,xyzPath,name_pattern,hkl,
associated_data_list,anom_form,pageId,
row,openState_bool=False,title="" ):
sec_id = self.refmac_section() + "_" + str(self.widget_no)
row1 = row
pyrvapi.rvapi_add_section ( sec_id,
"Anomalous Electron Density Calculations with Refmac",
pageId,row1,0,1,1,openState_bool )
row1 += 1
panel_id = self.refmac_report() + "_" + str(self.widget_no)
pyrvapi.rvapi_add_panel ( panel_id,sec_id,0,0,1,1 )
#self.log_parser = pyrvapi_ext.parsers.generic_parser ( panel_id,False )
self.log_parser = pyrvapi_ext.parsers.generic_parser (
panel_id,False,
summary=self.generic_parser_summary,
graph_tables=False )
fnames = self.calcAnomEDMap ( xyzPath,hkl,anom_form,name_pattern )
anom_structure = self.registerStructure (
fnames[0],fnames[1],fnames[2],fnames[3],None )
if anom_structure:
anom_structure.addDataAssociation ( hkl.dataId )
anom_structure.setRefmacLabels ( hkl )
for i in range(len(associated_data_list)):
if associated_data_list[i]:
structure.addDataAssociation ( associated_data_list[i].dataId )
anom_structure.setAnomSubstrSubtype() # anomalous maps
self.putMessage1 ( pageId," ",row1,1 )
row1 += 1
if title!="":
self.putTitle1 ( pageId,title,row1,1 )
row1 += 1
openState = -1
if openState_bool:
openState = 1
self.putStructureWidget1 ( pageId,"anom_structure_btn_",
"Anomalous substructure and electron density",
anom_structure,openState,row1,1 )
return anom_structure
else:
self.putTitle1 ( pageId,"No Anomalous Structure Found",row1,1 )
return None
def finaliseLigand ( self,code,xyzPath,cifPath,openState_bool=False,
title="Ligand Structure" ):
ligand = None
if os.path.isfile(xyzPath):
# Register output data. This moves needful files into output directory
# and puts the corresponding metadata into output databox
ligand = self.registerLigand ( xyzPath,cifPath )
if ligand:
if title!="":
self.putTitle ( title )
ligand.code = code
self.putLigandWidget ( "ligand_btn_","Ligand structure",
ligand )
else:
self.putTitle ( "No Ligand Formed" )
self.widget_no += 1
return ligand
# ============================================================================
def putInspectButton ( self,dataObject,title,gridId,row,col ):
buttonId = "inspect_data_" + str(self.widget_no)
self.widget_no += 1
pyrvapi.rvapi_add_button ( buttonId,title,"{function}",
"window.parent.rvapi_inspectData(" + self.job_id +\
",'" + dataObject._type + "','" + dataObject.dataId + "')",
False,gridId, row,col,1,1 )
return
# ============================================================================
def putRevisionWidget ( self,gridId,row,message,revision ):
buttonId = "inspect_" + str(self.widget_no)
self.widget_no += 1
pyrvapi.rvapi_add_button ( buttonId,"Inspect","{function}",
"window.parent.rvapi_inspectData(" + self.job_id +\
",'DataRevision','" + revision.dataId + "')",
False,gridId, row,0,1,1 )
pyrvapi.rvapi_set_text ( message,gridId, row,1,1,1 )
pyrvapi.rvapi_set_text ( "<font style='font-size:120%;'>\"" + revision.dname +
"\"</font>", gridId, row,2,1,1 )
return
def registerRevision ( self,revision,serialNo=1,title="Structure Revision",
message="<b><i>New structure revision name:</i></b>",
gridId = "" ):
revision.makeRevDName ( self.job_id,serialNo,self.outputFName )
revision.register ( self.outputDataBox )
if title:
self.putTitle ( title )
grid_id = gridId
if len(gridId)<=0:
grid_id = "revision_" + str(self.widget_no)
self.widget_no += 1
pyrvapi.rvapi_add_grid ( grid_id,False,self.report_page_id(),self.rvrow,0,1,1 )
self.putRevisionWidget ( grid_id,0,message,revision )
self.rvrow += 1
return grid_id
#def registerRevision1 ( self,revision,serialNo,pageId,row,title="Structure Revision" ):
# revision.makeRevDName ( self.job_id,serialNo,self.outputFName )
# revision.register ( self.outputDataBox )
# self.putTitle1 ( pageId,title,row,1 )
# self.putMessage1 ( pageId,"<b><i>New structure revision name:</i></b> " +\
# "<font size='+1'>\"" + revision.dname + "\"</font>",
# row+1,1 )
# return
def registerStructure ( self,xyzPath,mtzPath,mapPath,dmapPath,
libPath=None,copy=False ):
self.dataSerialNo += 1
structure = dtype_structure.register (
xyzPath,mtzPath,mapPath,dmapPath,libPath,
self.dataSerialNo ,self.job_id,
self.outputDataBox,self.outputDir(),
copy=copy )
if not structure:
self.file_stderr.write ( " NONE STRUCTURE" )
self.file_stderr.flush()
else:
structure.putXYZMeta ( self.outputDir(),self.file_stdout,
self.file_stderr,None )
return structure
def _move_file_to_output_dir ( self,fpath,fname_dest ):
if os.path.isfile(fpath):
fpath_dest = os.path.join ( self.outputDir(),fname_dest )
if not os.path.isfile(fpath_dest):
os.rename ( fpath,fpath_dest )
return True
return False
def registerStructure1 ( self,xyzPath,mtzPath,mapPath,dmapPath,libPath,regName ):
self.dataSerialNo += 1
structure = dtype_structure.register1 (
xyzPath,mtzPath,mapPath,dmapPath,libPath,
regName,self.dataSerialNo,self.job_id,
self.outputDataBox )
if not structure:
self.file_stderr.write ( " NONE STRUCTURE\n" )
self.file_stderr.flush()
else:
self._move_file_to_output_dir ( xyzPath ,structure.files[0] )
self._move_file_to_output_dir ( mtzPath ,structure.files[1] )
if (len(structure.files)>2) and structure.files[2]:
self._move_file_to_output_dir ( mapPath ,structure.files[2] )
if len(structure.files)>3 and structure.files[3]:
self._move_file_to_output_dir ( dmapPath,structure.files[3] )
if len(structure.files)>4 and structure.files[4]:
self._move_file_to_output_dir ( libPath,structure.files[4] )
structure.putXYZMeta ( self.outputDir(),self.file_stdout,
self.file_stderr,None )
return structure
def registerLigand ( self,xyzPath,cifPath,copy=False ):
self.dataSerialNo += 1
ligand = dtype_ligand.register ( xyzPath,cifPath,
self.dataSerialNo ,self.job_id,
self.outputDataBox,self.outputDir(),
copy=copy )
if not ligand:
self.file_stderr.write ( " NONE LIGAND" )
self.file_stderr.flush()
return ligand
# ----------------------------------------------------------------------------
def putHKLWidget ( self,widgetId,title_str,hkl,openState=-1 ):
self.putHKLWidget1 ( self.report_page_id(),widgetId + str(self.widget_no),
title_str,hkl,openState,self.rvrow,1 )
self.rvrow += 2
self.widget_no += 1
return
def putHKLWidget1 ( self,pageId,widgetId,title_str,hkl,openState,row,colSpan ):
self.putMessage1 ( pageId,"<b>Assigned name:</b> " + hkl.dname,
row,0,colSpan )
pyrvapi.rvapi_add_data ( widgetId + str(self.widget_no),title_str,
# always relative to job_dir from job_dir/html
os.path.join("..",self.outputDir(),hkl.files[0]),
"hkl:hkl",pageId,row+1,0,1,colSpan,openState )
self.widget_no += 1
return row + 2
def putStructureWidget ( self,widgetId,title_str,structure,openState=-1 ):
self.putStructureWidget1 ( self.report_page_id(),
widgetId + str(self.widget_no),title_str,
structure,openState,self.rvrow,1 )
self.rvrow += 2
self.widget_no += 1
return
def putStructureWidget1 ( self,pageId,widgetId,title_str,structure,openState,row,colSpan ):
self.putMessage1 ( pageId,"<b>Assigned name:</b> " +
structure.dname +
"<font size='+2'><sub> </sub></font>", row,1 )
wId = widgetId + str(self.widget_no)
self.widget_no += 1
type = ["xyz","hkl:map","hkl:ccp4_map","hkl:ccp4_dmap","LIB"]
created = False
for i in range(len(structure.files)):
if structure.files[i]:
if not created:
pyrvapi.rvapi_add_data ( wId,title_str,
# always relative to job_dir from job_dir/html
os.path.join("..",self.outputDir(),structure.files[i]),
type[i],pageId,row+1,0,1,colSpan,openState )
created = True
else:
pyrvapi.rvapi_append_to_data ( wId,
# always relative to job_dir from job_dir/html
os.path.join("..",self.outputDir(),structure.files[i]),
type[i] )
return row+2
# ============================================================================
def putLigandWidget ( self,widgetId,title_str,ligand,openState=-1 ):
self.putLigandWidget1 ( self.report_page_id(),
widgetId + str(self.widget_no),title_str,
ligand,openState,self.rvrow,1 )
self.rvrow += 2
self.widget_no += 1
return
def putLigandWidget1 ( self,pageId,widgetId,title_str,ligand,openState,row,colSpan ):
wId = widgetId + str(self.widget_no)
self.putMessage1 ( pageId,"<b>Assigned name:</b> " + ligand.dname +
"<font size='+2'><sub> </sub></font>", row,1 )
pyrvapi.rvapi_add_data ( wId,title_str,
# always relative to job_dir from job_dir/html
os.path.join("..",self.outputDir(),ligand.files[0]),
"xyz",pageId,row+1,0,1,colSpan,openState )
if len(ligand.files) > 1:
pyrvapi.rvapi_append_to_data ( wId,
# always relative to job_dir from job_dir/html
os.path.join("..",self.outputDir(),ligand.files[1]),
"LIB" )
self.widget_no += 1
return row+2
# ============================================================================
def registerXYZ ( self,xyzPath,checkout=True ):
self.dataSerialNo += 1
if checkout:
xyz = dtype_xyz.register ( xyzPath,self.dataSerialNo,self.job_id,
self.outputDataBox,self.outputDir() )
else:
xyz = dtype_xyz.register ( xyzPath,self.dataSerialNo,self.job_id,
None,self.outputDir() )
if not xyz:
self.file_stderr.write ( " NONE XYZ DATA\n" )
self.file_stderr.flush()
return xyz
"""
def registerHKL ( self,mtzPath ):
self.dataSerialNo += 1
hkl = dtype_hkl.register ( mtzPath,self.dataSerialNo,self.job_id,
self.outputDataBox,self.outputDir() )
if not hkl:
self.file_stderr.write ( " NONE HKL DATA\n" )
self.file_stderr.flush()
return hkl
"""
# ----------------------------------------------------------------------------
def putXYZWidget ( self,widgetId,title_str,xyz,openState=-1 ):
pyrvapi.rvapi_add_data ( widgetId,title_str,
# always relative to job_dir from job_dir/html
os.path.join("..",self.outputDir(),xyz.files[0]),
"xyz",secId,secrow,0,1,1,-1 )
return
# ============================================================================
def registerEnsemble ( self,sequence,ensemblePath,checkout=True ):
self.dataSerialNo += 1
if checkout:
ensemble = dtype_ensemble.register ( sequence,ensemblePath,
self.dataSerialNo,self.job_id,
self.outputDataBox,
self.outputDir() )
else:
ensemble = dtype_ensemble.register ( sequence,ensemblePath,
self.dataSerialNo,self.job_id,
None,self.outputDir() )
if not ensemble:
self.file_stderr.write ( " NONE ENSEMBLE DATA\n" )
self.file_stderr.flush()
else:
ensemble.putXYZMeta ( self.outputDir(),self.file_stdout,
self.file_stderr,None )
return ensemble
# ----------------------------------------------------------------------------
def putEnsembleWidget ( self,widgetId,title_str,ensemble,openState=-1 ):
self.putEnsembleWidget1 ( self.report_page_id(),widgetId,title_str,
ensemble,openState,self.rvrow,1 )
self.rvrow += 2
return
def putEnsembleWidget1 ( self,pageId,widgetId,title_str,ensemble,openState,row,colSpan ):
self.putMessage1 ( pageId,"<b>Assigned name:</b> " +\
ensemble.dname + "<br> ", row,1 )
pyrvapi.rvapi_add_data ( widgetId,title_str,
# always relative to job_dir from job_dir/html
os.path.join("..",self.outputDir(),ensemble.files[0]),
"xyz",pageId,row+1,0,1,colSpan,openState )
return row+2
# ============================================================================
def checkSpaceGroupChanged ( self,sol_spg,hkl,mtzfilepath ):
# Parameters:
# sol_spg a string with space group obtained from solution's XYZ file
# hkl HKL class of reflection data used to produce the XYZ file
# mtzfilepath path to solution's MTZ file (with possibly changed SpG)
#
# Returns:
# None if space group has not changed
# (newMTZPath,newHKL) path to new HKL file and HKL class if SpG changed
# the following will provide for import of generated HKL dataset(s)
# def importDir (self): return "./" # import from working directory
# def import_summary_id(self): return None # don't make summary table
solSpg = sol_spg.replace(" ", "")
if solSpg and (solSpg!=hkl.getSpaceGroup().replace(" ", "")):
self.putMessage ( "<font style='font-size:120%;'><b>Space Group changed to " +\
sol_spg + "</b></font>" )
rvrow0 = self.rvrow
self.rvrow += 1
#if not self.generic_parser_summary:
# self.generic_parser_summary = {}
self.generic_parser_summary["z01"] = {'SpaceGroup':sol_spg}
newHKLFPath = self.getOFName ( "_" + solSpg + "_" + hkl.files[0],-1 )
os.rename ( mtzfilepath,newHKLFPath )
self.files_all = [ newHKLFPath ]
import_merged.run ( self,"New reflection dataset details" )
if dtype_hkl.dtype() in self.outputDataBox.data:
sol_hkl = self.outputDataBox.data[dtype_hkl.dtype()][0]
pyrvapi.rvapi_set_text ( "<b>New reflection dataset created:</b> " +\
sol_hkl.dname,self.report_page_id(),rvrow0,0,1,1 )
self.putMessage (
"<p><i>Consider re-merging your original dataset using " +\
"this new one as a reference</i>" )
# Copy new reflection file to input directory in order to serve
# Refmac job(s) (e.g. as part of self.finaliseStructure()). The
# job needs reflection data for calculating Rfree, other stats
# and density maps.
shutil.copy2 ( os.path.join(self.outputDir(),sol_hkl.files[0]),
self.inputDir() )
return (newHKLFPath,sol_hkl)
else:
self.putMessage (
"Data registration error -- report to developers." )
else:
return None
def checkSpaceGroupChanged1 ( self,sol_spg,hkl_list ):
# reindexing of array HKL dataset, returns None if space group does
# not change
solSpg = sol_spg.replace(" ", "")
if solSpg and (solSpg!=hkl_list[0].getSpaceGroup().replace(" ", "")):
self.putMessage ( "<font style='font-size:120%;'><b>Space Group changed to " +\
sol_spg + "</b></font>" )
#rvrow0 = self.rvrow
self.rvrow += 1
self.generic_parser_summary["z01"] = {'SpaceGroup':sol_spg}
# prepare script for reindexing
self.open_stdin ()
self.write_stdin ( "SYMM \"" + sol_spg + "\"\n" )
self.close_stdin ()
f_stdin = self.file_stdin
self.unsetLogParser()
# make list of files to import
self.files_all = []
for i in range(len(hkl_list)):
# make new hkl file name
newHKLFPath = self.getOFName ( "_" + solSpg + "_" + hkl_list[i].files[0],-1 )
# make command-line parameters for reindexing
cmd = [ "hklin" ,hkl_list[i].getFilePath(self.inputDir()),
"hklout",newHKLFPath ]
# run reindex
self.file_stdin = f_stdin # for repeat use of input script file
self.runApp ( "reindex",cmd )
if os.path.isfile(newHKLFPath):
self.files_all.append ( newHKLFPath )
else:
self.putMessage ( "Error: cannot reindex " + hkl_list[i].dname )
import_merged.run ( self,"New reflection datasets" )
return self.outputDataBox.data[hkl_list[0]._type]
else:
return None
# ============================================================================
def success(self):
if self.task and self.generic_parser_summary:
self.task.scores = self.generic_parser_summary
with open('job.meta','w') as file_:
file_.write ( self.task.to_JSON() )
self.rvrow += 1
self.putMessage ( "<p> " ) # just to make extra space after report
self.outputDataBox.save ( self.outputDir() )
pyrvapi.rvapi_flush ()
self.file_stdout.close()
self.file_stderr.close()
raise signal.Success()
def fail ( self,pageMessage,signalMessage ):
if self.task and self.generic_parser_summary:
self.task.scores = self.generic_parser_summary
with open('job.meta','w') as file_:
file_.write ( self.task.to_JSON() )
self.putMessage ( "<p> " ) # just to make extra space after report
pyrvapi.rvapi_set_text ( pageMessage,self.report_page_id(),self.rvrow,0,1,1 )
pyrvapi.rvapi_flush ()
msg = pageMessage.replace("<b>","").replace("</b>","").replace("<i>","") \
.replace("</i>","").replace("<br>","\n").replace("<p>","\n")
self.file_stdout.write ( msg + "\n" )
self.file_stderr.write ( msg + "\n" )
self.file_stdout.close ()
self.file_stderr.close ()
raise signal.JobFailure ( signalMessage )
def python_fail_tab ( self ):
trace = ''.join( traceback.format_exception( *sys.exc_info() ) )
msg = '<h2><i>Job Driver Failure</i></h2>'
msg += '<p>Catched error:<pre>' + trace + '</pre>'
msg += """
<p>This is an internal error, which may be caused by different
sort of hardware and network malfunction, but most probably due
to a bug or not anticipated properties of input data.
"""
if self.maintainerEmail:
msg += """
<p>You may contribute to the improvement of jsCoFE by sending this
message <b>together with</b> input data <b>and task description</b> to
"""
msg += self.maintainerEmail
page_id = self.traceback_page_id()
pyrvapi.rvapi_add_tab ( page_id, "Error Trace", True )
pyrvapi.rvapi_set_text ( msg, page_id, 0, 0, 1, 1 )
def start(self):
try:
self.run()
except signal.Success, s:
signal_obj = s
except signal.CofeSignal, s:
self.python_fail_tab()
signal_obj = s
except:
self.python_fail_tab()
signal_obj = signal.JobFailure()
else:
signal_obj = signal.Success()
finally:
pass
signal_obj.quitApp()
| 52,014 | 15,237 |
from .wideresnet import *
from .wideresnet_lk import *
WRN_MODELS = {
'WideResNet':WideResNet,
'WideResNet_Lk': WideResNet_Lk,
}
| 154 | 66 |
# This script is based on moltres/python/extractSerpent2GCs.py
import os
import numpy as np
import argparse
import subprocess
import serpentTools as sT
def makePropertiesDir(
outdir,
filebase,
mapFile,
unimapFile,
serp1=False,
fromMain=False):
""" Takes in a mapping from branch names to material temperatures,
then makes a properties directory.
Serp1 means that the group transfer matrix is transposed."""
# the constants moltres looks for:
goodStuff = ['Tot', 'Sp0', 'Sp2', 'Fiss', 'Nsf', 'Kappa', 'Sp1', 'Sp3',
'Invv', 'Chit', 'Chip', 'Chid', 'BETA_EFF', 'lambda']
goodMap = dict([(thing, 'inf' + thing) for thing in goodStuff])
goodMap['BETA_EFF'] = 'betaEff'
goodMap['lambda'] = 'lambda'
# map material names to universe names from serpent
with open(unimapFile) as fh:
uniMap = []
for line in fh:
uniMap.append(tuple(line.split()))
# this now maps material names to serpent universes
uniMap = dict(uniMap)
# list of material names
inmats = list(uniMap.keys())
print("Making properties for materials:")
print(inmats)
coeList = dict([(mat, sT.read(mat + '.coe')) for mat in inmats])
# primary branch to temp mapping
branch2TempMapping = open(mapFile)
# Check if calculation uses 6 neutron precursor groups.
# This prevents writing of excess zeros. Check if any
# entries in the 7th and 8th group precursor positions
# are nonzero, if so, use 8 groups.
use8Groups = False
for line in branch2TempMapping:
item, temp = tuple(line.split())
for mat in inmats:
if mat in item:
currentMat = mat
break
strData = coeList[currentMat].branches[item].universes[
uniMap[currentMat], 0, 0, None].gc[goodMap['BETA_EFF']]
strData = strData[1:9]
if np.any(strData[-2:] != 0.0):
use8Groups = True
# Now loop through a second time
branch2TempMapping.close()
branch2TempMapping = open(mapFile)
for line in branch2TempMapping:
item, temp = tuple(line.split())
for mat in inmats:
if mat in item:
currentMat = mat
break
else:
print('Considered materials: {}'.format(inmats))
raise Exception(
'Couldnt find a material corresponding to branch {}'.format(
item))
try:
totxsdata = coeList[currentMat].branches[item].universes[
uniMap[currentMat], 0, 0, None].infExp[goodMap['Tot']]
sp0xsdata = coeList[currentMat].branches[item].universes[
uniMap[currentMat], 0, 0, None].infExp[goodMap['Sp0']]
sp1xsdata = coeList[currentMat].branches[item].universes[
uniMap[currentMat], 0, 0, None].infExp[goodMap['Sp1']]
sp2xsdata = coeList[currentMat].branches[item].universes[
uniMap[currentMat], 0, 0, None].infExp[goodMap['Sp2']]
sp3xsdata = coeList[currentMat].branches[item].universes[
uniMap[currentMat], 0, 0, None].infExp[goodMap['Sp3']]
G = len(totxsdata)
remxs0g = totxsdata - sp0xsdata.reshape((G, G)).diagonal()
remxs1g = totxsdata - sp1xsdata.reshape((G, G)).diagonal()
remxs2g = totxsdata - sp2xsdata.reshape((G, G)).diagonal()
remxs3g = totxsdata - sp3xsdata.reshape((G, G)).diagonal()
with open(outdir + '/' + filebase + '_' + currentMat +
'_DIFFCOEFA.txt', 'a') as fh:
strData = 1./3./remxs1g
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else strData
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
with open(outdir + '/' + filebase + '_' + currentMat +
'_DIFFCOEFB.txt', 'a') as fh:
strData = 9./35./remxs3g
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else strData
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
with open(outdir + '/' + filebase + '_' + currentMat +
'_REMXSA.txt', 'a') as fh:
strData = remxs0g
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else strData
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
with open(outdir + '/' + filebase + '_' + currentMat +
'_REMXSB.txt', 'a') as fh:
strData = remxs2g + 4./5*remxs0g
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else strData
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
with open(outdir + '/' + filebase + '_' + currentMat +
'_COUPLEXSA.txt', 'a') as fh:
strData = 2*remxs0g
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else strData
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
with open(outdir + '/' + filebase + '_' + currentMat +
'_COUPLEXSB.txt', 'a') as fh:
strData = 2./5*remxs0g
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else strData
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
for coefficient in ['Chit', 'Chip', 'Chid', 'Fiss', 'Nsf', 'Sp0',
'Kappa', 'Invv', 'BETA_EFF', 'lambda']:
with open(outdir + '/' + filebase + '_' + currentMat +
'_' + coefficient.upper() + '.txt', 'a') as fh:
if coefficient == 'lambda' or coefficient == 'BETA_EFF':
strData = coeList[currentMat].branches[
item].universes[
uniMap[currentMat], 0, 0, None].gc[
goodMap[coefficient]]
# some additional formatting is needed here
strData = strData[1:9]
# Cut off group 7 and 8 precursor params in 6
# group calcs
if not use8Groups:
strData = strData[0:6]
else:
strData = coeList[currentMat].branches[
item].universes[
uniMap[currentMat], 0, 0, None].infExp[
goodMap[coefficient]]
strData = ' '.join(
[str(dat) for dat in strData]) if isinstance(
strData, np.ndarray) else strData
fh.write(str(temp) + ' ' + strData)
fh.write('\n')
except KeyError:
raise Exception('Check your mapping and secondary branch files.')
if __name__ == '__main__':
# make it act like a nice little terminal program
parser = argparse.ArgumentParser(
description='Extracts Serpent 2 group constants, \
and puts them in a directory suitable for moltres.')
parser.add_argument('outDir', metavar='o', type=str, nargs=1,
help='name of directory to write properties to.')
parser.add_argument('fileBase', metavar='f', type=str,
nargs=1, help='File base name to give moltres')
parser.add_argument(
'mapFile',
metavar='b',
type=str,
nargs=1,
help='File that maps branches to temperatures')
parser.add_argument(
'universeMap',
metavar='u',
type=str,
nargs=1,
help='File that maps material names to serpent universe')
parser.add_argument(
'--serp1',
dest='serp1',
action='store_true',
help='use this flag for serpent 1 group transfer matrices')
parser.set_defaults(serp1=False)
args = parser.parse_args()
# these are unpacked, so it fails if they werent passed to the script
outdir = args.outDir[0]
fileBase = args.fileBase[0]
mapFile = args.mapFile[0]
unimapFile = args.universeMap[0]
makePropertiesDir(outdir, fileBase, mapFile, unimapFile, serp1=args.serp1,
fromMain=True)
print("Successfully made property files in directory {}.".format(outdir))
| 8,954 | 2,703 |
import os
from . import utils
import numpy as np
from scipy.stats import scoreatpercentile
from scipy.optimize import curve_fit
from scipy import exp
import operator
from copy import copy, deepcopy
from collections import defaultdict, Counter
import re
from pyteomics import parser, mass, fasta, auxiliary as aux, achrom
try:
from pyteomics import cmass
except ImportError:
cmass = mass
import subprocess
from sklearn import linear_model
import tempfile
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from multiprocessing import Queue, Process, cpu_count
from itertools import chain
try:
import seaborn
seaborn.set(rc={'axes.facecolor':'#ffffff'})
seaborn.set_style('whitegrid')
except:
pass
from .utils import calc_sf_all, recalc_spc
import lightgbm as lgb
import pandas as pd
from sklearn.model_selection import train_test_split
from scipy.stats import zscore, spearmanr
import pandas as pd
from pyteomics import pepxml, achrom, auxiliary as aux, mass, fasta, mzid, parser
from pyteomics import electrochem
import numpy as np
import random
SEED = 42
from sklearn.model_selection import train_test_split
from os import path, mkdir
from collections import Counter, defaultdict
import warnings
import pylab as plt
warnings.formatwarning = lambda msg, *args, **kw: str(msg) + '\n'
import pandas as pd
from sklearn.model_selection import train_test_split, KFold
import os
from collections import Counter, defaultdict
from scipy.stats import scoreatpercentile
from sklearn.isotonic import IsotonicRegression
import warnings
import numpy as np
import matplotlib
import numpy
import pandas
import random
import sklearn
import matplotlib.pyplot as plt
from sklearn import (
feature_extraction, feature_selection, decomposition, linear_model,
model_selection, metrics, svm
)
import scipy
from scipy.stats import rankdata
from copy import deepcopy
import csv
from scipy.stats import rankdata
import lightgbm as lgb
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from itertools import chain
import time as timemodule
import ast
from sklearn import metrics
SEED = 50
def worker_RT(qin, qout, shift, step, RC=False, elude_path=False, ns=False, nr=False, win_sys=False):
pepdict = dict()
if elude_path:
outtrain_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain = open(outtrain_name, 'w')
outres_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
for seq, RT in zip(ns, nr):
outtrain.write(seq + '\t' + str(RT) + '\n')
outtrain.close()
outtest_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtest = open(outtest_name, 'w')
maxval = len(qin)
start = 0
while start + shift < maxval:
item = qin[start+shift]
outtest.write(item + '\n')
start += step
outtest.close()
subprocess.call([elude_path, '-t', outtrain_name, '-e', outtest_name, '-a', '-o', outres_name])
for x in open(outres_name).readlines()[3:]:
seq, RT = x.strip().split('\t')
pepdict[seq] = float(RT)
else:
maxval = len(qin)
start = 0
while start + shift < maxval:
item = qin[start+shift]
pepdict[item] = achrom.calculate_RT(item, RC)
start += step
if win_sys:
return pepdict
else:
qout.put(pepdict)
qout.put(None)
def final_iteration(resdict, mass_diff, rt_diff, pept_prot, protsN, base_out_name, prefix, isdecoy, isdecoy_key, escore, fdr, nproc, fname=False):
n = nproc
prots_spc_basic = dict()
p1 = set(resdict['seqs'])
pep_pid = defaultdict(set)
pid_pep = defaultdict(set)
banned_dict = dict()
for pep, pid in zip(resdict['seqs'], resdict['ids']):
pep_pid[pep].add(pid)
pid_pep[pid].add(pep)
if pep in banned_dict:
banned_dict[pep] += 1
else:
banned_dict[pep] = 1
if len(p1):
prots_spc_final = dict()
prots_spc_copy = False
prots_spc2 = False
unstable_prots = set()
p0 = False
names_arr = False
tmp_spc_new = False
decoy_set = False
while 1:
if not prots_spc2:
best_match_dict = dict()
n_map_dict = defaultdict(list)
for k, v in protsN.items():
n_map_dict[v].append(k)
decoy_set = set()
for k in protsN:
if isdecoy_key(k):
decoy_set.add(k)
decoy_set = list(decoy_set)
prots_spc2 = defaultdict(set)
for pep, proteins in pept_prot.items():
if pep in p1:
for protein in proteins:
prots_spc2[protein].add(pep)
for k in protsN:
if k not in prots_spc2:
prots_spc2[k] = set([])
prots_spc2 = dict(prots_spc2)
unstable_prots = set(prots_spc2.keys())
top100decoy_N = sum([val for key, val in protsN.items() if isdecoy_key(key)])
names_arr = np.array(list(prots_spc2.keys()))
n_arr = np.array([protsN[k] for k in names_arr])
tmp_spc_new = dict((k, len(v)) for k, v in prots_spc2.items())
top100decoy_score_tmp = [tmp_spc_new.get(dprot, 0) for dprot in decoy_set]
top100decoy_score_tmp_sum = float(sum(top100decoy_score_tmp))
tmp_spc = tmp_spc_new
prots_spc = tmp_spc_new
if not prots_spc_copy:
prots_spc_copy = deepcopy(prots_spc)
for idx, v in enumerate(decoy_set):
if v in unstable_prots:
top100decoy_score_tmp_sum -= top100decoy_score_tmp[idx]
top100decoy_score_tmp[idx] = prots_spc.get(v, 0)
top100decoy_score_tmp_sum += top100decoy_score_tmp[idx]
p = float(sum(top100decoy_score_tmp)) / top100decoy_N
p = top100decoy_score_tmp_sum / top100decoy_N
n_change = set(protsN[k] for k in unstable_prots)
for n_val in n_change:
for k in n_map_dict[n_val]:
v = prots_spc[k]
if n_val not in best_match_dict or v > prots_spc[best_match_dict[n_val]]:
best_match_dict[n_val] = k
n_arr_small = []
names_arr_small = []
v_arr_small = []
for k, v in best_match_dict.items():
n_arr_small.append(k)
names_arr_small.append(v)
v_arr_small.append(prots_spc[v])
prots_spc_basic = dict()
all_pvals = calc_sf_all(np.array(v_arr_small), n_arr_small, p)
for idx, k in enumerate(names_arr_small):
prots_spc_basic[k] = all_pvals[idx]
if not p0:
p0 = float(p)
prots_spc_tmp = dict()
v_arr = np.array([prots_spc[k] for k in names_arr])
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc_tmp[k] = all_pvals[idx]
sortedlist_spc = sorted(prots_spc_tmp.items(), key=operator.itemgetter(1))[::-1]
with open(base_out_name + '_proteins_full_noexclusion.tsv', 'w') as output:
output.write('dbname\tscore\tmatched peptides\ttheoretical peptides\n')
for x in sortedlist_spc:
output.write('\t'.join((x[0], str(x[1]), str(prots_spc_copy[x[0]]), str(protsN[x[0]]))) + '\n')
best_prot = utils.keywithmaxval(prots_spc_basic)
best_score = prots_spc_basic[best_prot]
unstable_prots = set()
if best_prot not in prots_spc_final:
prots_spc_final[best_prot] = best_score
banned_pids = set()
for pep in prots_spc2[best_prot]:
for pid in pep_pid[pep]:
banned_pids.add(pid)
for pid in banned_pids:
for pep in pid_pep[pid]:
banned_dict[pep] -= 1
if banned_dict[pep] == 0:
for bprot in pept_prot[pep]:
tmp_spc_new[bprot] -= 1
unstable_prots.add(bprot)
else:
v_arr = np.array([prots_spc[k] for k in names_arr])
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc_basic[k] = all_pvals[idx]
for k, v in prots_spc_basic.items():
if k not in prots_spc_final:
prots_spc_final[k] = v
break
prot_fdr = aux.fdr(prots_spc_final.items(), is_decoy=isdecoy)
if prot_fdr >= 12.5 * fdr:
v_arr = np.array([prots_spc[k] for k in names_arr])
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc_basic[k] = all_pvals[idx]
for k, v in prots_spc_basic.items():
if k not in prots_spc_final:
prots_spc_final[k] = v
break
prots_spc_basic2 = copy(prots_spc_final)
prots_spc_final = dict()
prots_spc_final2 = dict()
if n == 0:
try:
n = cpu_count()
except NotImplementedError:
n = 1
if n == 1 or os.name == 'nt':
qin = []
qout = []
for mass_koef in range(10):
rtt_koef = mass_koef
qin.append((mass_koef, rtt_koef))
qout = worker(qin, qout, mass_diff, rt_diff, resdict, protsN, pept_prot, isdecoy_key, isdecoy, fdr, prots_spc_basic2, True)
for item, item2 in qout:
if item2:
prots_spc_copy = item2
for k in protsN:
if k not in prots_spc_final:
prots_spc_final[k] = [item.get(k, 0.0), ]
else:
prots_spc_final[k].append(item.get(k, 0.0))
else:
qin = Queue()
qout = Queue()
for mass_koef in range(10):
rtt_koef = mass_koef
qin.put((mass_koef, rtt_koef))
for _ in range(n):
qin.put(None)
procs = []
for proc_num in range(n):
p = Process(target=worker, args=(qin, qout, mass_diff, rt_diff, resdict, protsN, pept_prot, isdecoy_key, isdecoy, fdr, prots_spc_basic2))
p.start()
procs.append(p)
for _ in range(n):
for item, item2 in iter(qout.get, None):
if item2:
prots_spc_copy = item2
for k in protsN:
if k not in prots_spc_final:
prots_spc_final[k] = [item.get(k, 0.0), ]
else:
prots_spc_final[k].append(item.get(k, 0.0))
for p in procs:
p.join()
for k in prots_spc_final.keys():
prots_spc_final[k] = np.mean(prots_spc_final[k])
prots_spc = deepcopy(prots_spc_final)
sortedlist_spc = sorted(prots_spc.items(), key=operator.itemgetter(1))[::-1]
with open(base_out_name + '_proteins_full.tsv', 'w') as output:
output.write('dbname\tscore\tmatched peptides\ttheoretical peptides\n')
for x in sortedlist_spc:
output.write('\t'.join((x[0], str(x[1]), str(prots_spc_copy[x[0]]), str(protsN[x[0]]))) + '\n')
checked = set()
for k, v in list(prots_spc.items()):
if k not in checked:
if isdecoy_key(k):
if prots_spc.get(k.replace(prefix, ''), -1e6) > v:
del prots_spc[k]
checked.add(k.replace(prefix, ''))
else:
if prots_spc.get(prefix + k, -1e6) > v:
del prots_spc[k]
checked.add(prefix + k)
filtered_prots = aux.filter(prots_spc.items(), fdr=fdr, key=escore, is_decoy=isdecoy, remove_decoy=True, formula=1, full_output=True, correction=1)
if len(filtered_prots) < 1:
filtered_prots = aux.filter(prots_spc.items(), fdr=fdr, key=escore, is_decoy=isdecoy, remove_decoy=True, formula=1, full_output=True, correction=0)
identified_proteins = 0
for x in filtered_prots:
identified_proteins += 1
print('TOP 5 identified proteins:')
print('dbname\tscore\tnum matched peptides\tnum theoretical peptides')
for x in filtered_prots[:5]:
print('\t'.join((str(x[0]), str(x[1]), str(int(prots_spc_copy[x[0]])), str(protsN[x[0]]))))
print('results:%s;number of identified proteins = %d' % (base_out_name, identified_proteins, ))
# print('R=', r)
with open(base_out_name + '_proteins.tsv', 'w') as output:
output.write('dbname\tscore\tmatched peptides\ttheoretical peptides\n')
for x in filtered_prots:
output.write('\t'.join((x[0], str(x[1]), str(prots_spc_copy[x[0]]), str(protsN[x[0]]))) + '\n')
if fname:
fig = plt.figure(figsize=(16, 12))
DPI = fig.get_dpi()
fig.set_size_inches(2000.0/float(DPI), 2000.0/float(DPI))
df0 = pd.read_table(os.path.splitext(fname)[0].replace('.features', '') + '.features' + '.tsv')
# Features RT distribution
# TODO add matched features and matched to 1% FDR proteins features
ax = fig.add_subplot(3, 1, 1)
bns = np.arange(0, df0['rtApex'].max() + 1, 1)
ax.hist(df0['rtApex'], bins = bns)
ax.set_xlabel('RT, min', size=16)
ax.set_ylabel('# features', size=16)
# Features mass distribution
# TODO add matched features and matched to 1% FDR proteins features
ax = fig.add_subplot(3, 1, 2)
bns = np.arange(0, df0['massCalib'].max() + 6, 5)
ax.hist(df0['massCalib'], bins = bns)
ax.set_xlabel('neutral mass, Da', size=16)
ax.set_ylabel('# features', size=16)
# Features intensity distribution
# TODO add matched features and matched to 1% FDR proteins features
ax = fig.add_subplot(3, 1, 3)
bns = np.arange(np.log10(df0['intensityApex'].min()) - 0.5, np.log10(df0['intensityApex'].max()) + 0.5, 0.5)
ax.hist(np.log10(df0['intensityApex']), bins = bns)
ax.set_xlabel('log10(Intensity)', size=16)
ax.set_ylabel('# features', size=16)
plt.savefig(base_out_name + '.png')
def noisygaus(x, a, x0, sigma, b):
return a * exp(-(x - x0) ** 2 / (2 * sigma ** 2)) + b
def calibrate_mass(bwidth, mass_left, mass_right, true_md):
bbins = np.arange(-mass_left, mass_right, bwidth)
H1, b1 = np.histogram(true_md, bins=bbins)
b1 = b1 + bwidth
b1 = b1[:-1]
popt, pcov = curve_fit(noisygaus, b1, H1, p0=[1, np.median(true_md), 1, 1])
mass_shift, mass_sigma = popt[1], abs(popt[2])
return mass_shift, mass_sigma, pcov[0][0]
def calibrate_RT_gaus(bwidth, mass_left, mass_right, true_md):
bbins = np.arange(-mass_left, mass_right, bwidth)
H1, b1 = np.histogram(true_md, bins=bbins)
b1 = b1 + bwidth
b1 = b1[:-1]
popt, pcov = curve_fit(noisygaus, b1, H1, p0=[1, np.median(true_md), bwidth * 5, 1])
mass_shift, mass_sigma = popt[1], abs(popt[2])
return mass_shift, mass_sigma, pcov[0][0]
def process_file(args):
utils.seen_target.clear()
utils.seen_decoy.clear()
args = utils.prepare_decoy_db(args)
return process_peptides(args)
def peptide_processor(peptide, **kwargs):
seqm = peptide
results = []
m = cmass.fast_mass(seqm, aa_mass=kwargs['aa_mass']) + kwargs['aa_mass'].get('Nterm', 0) + kwargs['aa_mass'].get('Cterm', 0)
acc_l = kwargs['acc_l']
acc_r = kwargs['acc_r']
dm_l = acc_l * m / 1.0e6
if acc_r == acc_l:
dm_r = dm_l
else:
dm_r = acc_r * m / 1.0e6
start = nmasses.searchsorted(m - dm_l)
end = nmasses.searchsorted(m + dm_r)
for i in range(start, end):
peak_id = ids[i]
I = Is[i]
massdiff = (m - nmasses[i]) / m * 1e6
mods = 0
results.append((seqm, massdiff, mods, i))
return results
def prepare_peptide_processor(fname, args):
global nmasses
global rts
global charges
global ids
global Is
global Scans
global Isotopes
global mzraw
global avraw
global imraw
min_ch = args['cmin']
max_ch = args['cmax']
min_isotopes = args['i']
min_scans = args['sc']
print('Reading spectra ...')
df_features = utils.iterate_spectra(fname, min_ch, max_ch, min_isotopes, min_scans)
# Sort by neutral mass
df_features = df_features.sort_values(by='massCalib')
nmasses = df_features['massCalib'].values
rts = df_features['rtApex'].values
charges = df_features['charge'].values
ids = df_features['id'].values
Is = df_features['intensityApex'].values
Scans = df_features['nScans'].values
Isotopes = df_features['nIsotopes'].values
mzraw = df_features['mz'].values
avraw = np.zeros(len(df_features))
if len(set(df_features['FAIMS'])) > 1:
imraw = df_features['FAIMS'].values
else:
imraw = df_features['ion_mobility'].values
print('Number of peptide isotopic clusters: %d' % (len(nmasses), ))
fmods = args['fmods']
aa_mass = mass.std_aa_mass
if fmods:
for mod in fmods.split(','):
m, aa = mod.split('@')
if aa == '[':
aa_mass['Nterm'] = float(m)
elif aa == ']':
aa_mass['Cterm'] = float(m)
else:
aa_mass[aa] += float(m)
acc_l = args['ptol']
acc_r = args['ptol']
return {'aa_mass': aa_mass, 'acc_l': acc_l, 'acc_r': acc_r, 'args': args}, df_features
def peptide_processor_iter_isoforms(peptide, **kwargs):
out = []
out.append(peptide_processor(peptide, **kwargs))
return out
def get_results(ms1results):
resdict = dict()
labels = [
'seqs',
'md',
'mods',
'iorig',
# 'rt',
# 'ids',
# 'Is',
# 'Scans',
# 'Isotopes',
# 'mzraw',
# 'av',
# 'ch',
# 'im',
]
for label, val in zip(labels, zip(*ms1results)):
resdict[label] = np.array(val)
return resdict
def filter_results(resultdict, idx):
tmp = dict()
for label in resultdict:
tmp[label] = resultdict[label][idx]
return tmp
def process_peptides(args):
fname = args['file']
fdr = args['fdr'] / 100
min_isotopes_calibration = args['ci']
try:
outpath = args['outpath']
except:
outpath = False
if outpath:
base_out_name = os.path.splitext(os.path.join(outpath, os.path.basename(fname)))[0]
else:
base_out_name = os.path.splitext(fname)[0]
out_log = open(base_out_name + '_log.txt', 'w')
out_log.close()
out_log = open(base_out_name + '_log.txt', 'w')
elude_path = args['elude']
elude_path = elude_path.strip()
deeplc_path = args['deeplc']
deeplc_path = deeplc_path.strip()
calib_path = args['pl']
calib_path = calib_path.strip()
if calib_path and args['ts']:
args['ts'] = 0
print('Two-stage RT prediction does not work with list of MS/MS identified peptides...')
args['enzyme'] = utils.get_enzyme(args['e'])
ms1results = []
peps = utils.peptide_gen(args)
kwargs, df_features = prepare_peptide_processor(fname, args)
func = peptide_processor_iter_isoforms
print('Running the search ...')
for y in utils.multimap(1, func, peps, **kwargs):
for result in y:
if len(result):
ms1results.extend(result)
prefix = args['prefix']
protsN, pept_prot = utils.get_prot_pept_map(args)
resdict = get_results(ms1results)
del ms1results
resdict['mc'] = np.array([parser.num_sites(z, args['enzyme']) for z in resdict['seqs']])
isdecoy = lambda x: x[0].startswith(prefix)
isdecoy_key = lambda x: x.startswith(prefix)
escore = lambda x: -x[1]
e_ind = np.array([Isotopes[iorig] for iorig in resdict['iorig']]) >= min_isotopes_calibration
# e_ind = resdict['Isotopes'] >= min_isotopes_calibration
# e_ind = resdict['Isotopes'] >= 1
resdict2 = filter_results(resdict, e_ind)
e_ind = resdict2['mc'] == 0
resdict2 = filter_results(resdict2, e_ind)
p1 = set(resdict2['seqs'])
if len(p1):
prots_spc2 = defaultdict(set)
for pep, proteins in pept_prot.items():
if pep in p1:
for protein in proteins:
prots_spc2[protein].add(pep)
for k in protsN:
if k not in prots_spc2:
prots_spc2[k] = set([])
prots_spc = dict((k, len(v)) for k, v in prots_spc2.items())
names_arr = np.array(list(prots_spc.keys()))
v_arr = np.array(list(prots_spc.values()))
n_arr = np.array([protsN[k] for k in prots_spc])
top100decoy_score = [prots_spc.get(dprot, 0) for dprot in protsN if isdecoy_key(dprot)]
top100decoy_N = [val for key, val in protsN.items() if isdecoy_key(key)]
p = np.mean(top100decoy_score) / np.mean(top100decoy_N)
print('p=%s' % (np.mean(top100decoy_score) / np.mean(top100decoy_N)))
prots_spc = dict()
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc[k] = all_pvals[idx]
checked = set()
for k, v in list(prots_spc.items()):
if k not in checked:
if isdecoy_key(k):
if prots_spc.get(k.replace(prefix, ''), -1e6) > v:
del prots_spc[k]
checked.add(k.replace(prefix, ''))
else:
if prots_spc.get(prefix + k, -1e6) > v:
del prots_spc[k]
checked.add(prefix + k)
filtered_prots = aux.filter(prots_spc.items(), fdr=0.05, key=escore, is_decoy=isdecoy, remove_decoy=True, formula=1,
full_output=True)
identified_proteins = 0
for x in filtered_prots:
identified_proteins += 1
print('results for default search: number of identified proteins = %d' % (identified_proteins, ))
print('Running mass recalibration...')
e_ind = resdict['mc'] == 0
resdict2 = filter_results(resdict, e_ind)
true_md = []
true_isotopes = []
true_seqs = []
true_prots = set(x[0] for x in filtered_prots)
for pep, proteins in pept_prot.items():
if any(protein in true_prots for protein in proteins):
true_seqs.append(pep)
e_ind = np.in1d(resdict2['seqs'], true_seqs)
true_seqs = resdict2['seqs'][e_ind]
true_md.extend(resdict2['md'][e_ind])
true_md = np.array(true_md)
# true_isotopes.extend(resdict2['Isotopes'][e_ind])
true_isotopes.extend(np.array([Isotopes[iorig] for iorig in resdict2['iorig']])[e_ind])
true_isotopes = np.array(true_isotopes)
true_intensities = np.array([Is[iorig] for iorig in resdict2['iorig']])[e_ind]
# true_intensities = np.array(resdict2['Is'][e_ind])
# true_rt = np.array(resdict2['rt'][e_ind])
# true_mz = np.array(resdict2['mzraw'][e_ind])
true_rt = np.array([rts[iorig] for iorig in resdict2['iorig']])[e_ind]
true_mz = np.array([mzraw[iorig] for iorig in resdict2['iorig']])[e_ind]
df1 = pd.DataFrame()
df1['mass diff'] = true_md
df1['mz'] = true_mz
df1['RT'] = true_rt
df1['Intensity'] = true_intensities
df1['seqs'] = true_seqs
df1['orig_md'] = true_md
mass_left = args['ptol']
mass_right = args['ptol']
try:
mass_shift, mass_sigma, covvalue = calibrate_mass(0.001, mass_left, mass_right, true_md)
except:
mass_shift, mass_sigma, covvalue = calibrate_mass(0.01, mass_left, mass_right, true_md)
print('Calibrated mass shift: ', mass_shift)
print('Calibrated mass sigma in ppm: ', mass_sigma)
out_log.write('Calibrated mass shift: %s\n' % (mass_shift, ))
out_log.write('Calibrated mass sigma in ppm: %s\n' % (mass_sigma, ))
e_all = abs(resdict['md'] - mass_shift) / (mass_sigma)
r = 3.0
e_ind = e_all <= r
resdict = filter_results(resdict, e_ind)
zs_all = e_all[e_ind] ** 2
e_ind = np.array([Isotopes[iorig] for iorig in resdict['iorig']]) >= min_isotopes_calibration
resdict2 = filter_results(resdict, e_ind)
e_ind = resdict2['mc'] == 0
resdict2 = filter_results(resdict2, e_ind)
p1 = set(resdict2['seqs'])
prots_spc2 = defaultdict(set)
for pep, proteins in pept_prot.items():
if pep in p1:
for protein in proteins:
prots_spc2[protein].add(pep)
for k in protsN:
if k not in prots_spc2:
prots_spc2[k] = set([])
prots_spc = dict((k, len(v)) for k, v in prots_spc2.items())
names_arr = np.array(list(prots_spc.keys()))
v_arr = np.array(list(prots_spc.values()))
n_arr = np.array([protsN[k] for k in prots_spc])
top100decoy_score = [prots_spc.get(dprot, 0) for dprot in protsN if isdecoy_key(dprot)]
top100decoy_N = [val for key, val in protsN.items() if isdecoy_key(key)]
p = np.mean(top100decoy_score) / np.mean(top100decoy_N)
print('p=%s' % (np.mean(top100decoy_score) / np.mean(top100decoy_N)))
prots_spc = dict()
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc[k] = all_pvals[idx]
checked = set()
for k, v in list(prots_spc.items()):
if k not in checked:
if isdecoy_key(k):
if prots_spc.get(k.replace(prefix, ''), -1e6) > v:
del prots_spc[k]
checked.add(k.replace(prefix, ''))
else:
if prots_spc.get(prefix + k, -1e6) > v:
del prots_spc[k]
checked.add(prefix + k)
filtered_prots = aux.filter(prots_spc.items(), fdr=0.05, key=escore, is_decoy=isdecoy, remove_decoy=True, formula=1,
full_output=True)
identified_proteins = 0
for x in filtered_prots:
identified_proteins += 1
print('results for default search after mass calibration: number of identified proteins = %d' % (identified_proteins, ))
print('Running RT prediction...')
e_ind = np.array([Isotopes[iorig] for iorig in resdict['iorig']]) >= min_isotopes_calibration
# e_ind = resdict['Isotopes'] >= min_isotopes_calibration
# e_ind = resdict['Isotopes'] >= 1
resdict2 = filter_results(resdict, e_ind)
e_ind = resdict2['mc'] == 0
resdict2 = filter_results(resdict2, e_ind)
true_seqs = []
true_rt = []
true_isotopes = []
true_prots = set(x[0] for x in filtered_prots)#[:5])
for pep, proteins in pept_prot.items():
if any(protein in true_prots for protein in proteins):
true_seqs.append(pep)
e_ind = np.in1d(resdict2['seqs'], true_seqs)
true_seqs = resdict2['seqs'][e_ind]
true_rt.extend(np.array([rts[iorig] for iorig in resdict2['iorig']])[e_ind])
# true_rt.extend(resdict2['rt'][e_ind])
true_rt = np.array(true_rt)
true_isotopes.extend(np.array([Isotopes[iorig] for iorig in resdict2['iorig']])[e_ind])
# true_isotopes.extend(resdict2['Isotopes'][e_ind])
true_isotopes = np.array(true_isotopes)
e_all = abs(resdict2['md'][e_ind] - mass_shift) / (mass_sigma)
zs_all_tmp = e_all ** 2
e_ind = true_isotopes >= min_isotopes_calibration
true_seqs = true_seqs[e_ind]
true_rt = true_rt[e_ind]
true_isotopes = true_isotopes[e_ind]
zs_all_tmp = zs_all_tmp[e_ind]
e_ind = np.argsort(zs_all_tmp)
true_seqs = true_seqs[e_ind]
true_rt = true_rt[e_ind]
true_isotopes = true_isotopes[e_ind]
true_seqs = true_seqs[:2500]
true_rt = true_rt[:2500]
true_isotopes = true_isotopes[:2500]
best_seq = defaultdict(list)
newseqs = []
newRTs = []
for seq, RT in zip(true_seqs, true_rt):
best_seq[seq].append(RT)
for k, v in best_seq.items():
newseqs.append(k)
newRTs.append(np.median(v))
true_seqs = np.array(newseqs)
true_rt = np.array(newRTs)
if calib_path:
df1 = pd.read_csv(calib_path, sep='\t')
true_seqs2 = df1['peptide'].values
true_rt2 = df1['RT exp'].values
else:
true_seqs2 = true_seqs
true_rt2 = true_rt
if args['ts'] != 2 and deeplc_path:
outtrain_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain = open(outtrain_name, 'w')
outcalib_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outcalib = open(outcalib_name, 'w')
outres_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
ns = true_seqs
nr = true_rt
print('Peptides used for RT prediction: %d' % (len(ns), ))
ns2 = true_seqs2
nr2 = true_rt2
outtrain.write('seq,modifications,tr\n')
for seq, RT in zip(ns2, nr2):
mods_tmp = '|'.join([str(idx+1)+'|Carbamidomethyl' for idx, aa in enumerate(seq) if aa == 'C'])
outtrain.write(seq + ',' + str(mods_tmp) + ',' + str(RT) + '\n')
outtrain.close()
outcalib.write('seq,modifications,tr\n')
for seq, RT in zip(ns, nr):
mods_tmp = '|'.join([str(idx+1)+'|Carbamidomethyl' for idx, aa in enumerate(seq) if aa == 'C'])
outcalib.write(seq + ',' + str(mods_tmp) + ',' + str(RT) + '\n')
outcalib.close()
subprocess.call([deeplc_path, '--file_pred', outcalib_name, '--file_cal', outtrain_name, '--file_pred_out', outres_name])
pepdict = dict()
train_RT = []
train_seq = []
for x in open(outres_name).readlines()[1:]:
_, seq, _, RTexp, RT = x.strip().split(',')
pepdict[seq] = float(RT)
train_seq.append(seq)
train_RT.append(float(RTexp))
train_RT = np.array(train_RT)
RT_pred = np.array([pepdict[s] for s in train_seq])
rt_diff_tmp = RT_pred - train_RT
RT_left = -min(rt_diff_tmp)
RT_right = max(rt_diff_tmp)
try:
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 100
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
except:
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 50
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(0.1, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(1.0, RT_left, RT_right, rt_diff_tmp)
print('Calibrated RT shift: ', XRT_shift)
print('Calibrated RT sigma: ', XRT_sigma)
aa, bb, RR, ss = aux.linear_regression(RT_pred, train_RT)
else:
if args['ts'] != 2 and elude_path:
outtrain_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain = open(outtrain_name, 'w')
outcalib_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outcalib = open(outcalib_name, 'w')
outres_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
ns = true_seqs
nr = true_rt
print('Peptides used for RT prediction: %d' % (len(ns), ))
ns2 = true_seqs2
nr2 = true_rt2
for seq, RT in zip(ns, nr):
outtrain.write(seq + '\t' + str(RT) + '\n')
outtrain.close()
for seq, RT in zip(ns, nr):
outcalib.write(seq + '\t' + str(RT) + '\n')
outcalib.close()
subprocess.call([elude_path, '-t', outtrain_name, '-e', outcalib_name, '-a', '-g', '-o', outres_name])
pepdict = dict()
train_RT = []
train_seq = []
for x in open(outres_name).readlines()[3:]:
seq, RT, RTexp = x.strip().split('\t')
pepdict[seq] = float(RT)
train_seq.append(seq)
train_RT.append(float(RTexp))
train_RT = np.array(train_RT)
rt_diff_tmp = RT_pred - train_RT
RT_left = -min(rt_diff_tmp)
RT_right = max(rt_diff_tmp)
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 50
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(0.1, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(1.0, RT_left, RT_right, rt_diff_tmp)
print('Calibrated RT shift: ', XRT_shift)
print('Calibrated RT sigma: ', XRT_sigma)
aa, bb, RR, ss = aux.linear_regression(RT_pred, train_RT)
else:
ns = true_seqs
nr = true_rt
ns2 = true_seqs2
nr2 = true_rt2
RC = achrom.get_RCs_vary_lcp(ns2, nr2)
RT_pred = np.array([achrom.calculate_RT(s, RC) for s in ns])
train_RT = nr
aa, bb, RR, ss = aux.linear_regression(RT_pred, nr)
rt_diff_tmp = RT_pred - nr
RT_left = -min(rt_diff_tmp)
RT_right = max(rt_diff_tmp)
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 50
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(0.1, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(1.0, RT_left, RT_right, rt_diff_tmp)
print('Calibrated RT shift: ', XRT_shift)
print('Calibrated RT sigma: ', XRT_sigma)
print(aa, bb, RR, ss)
best_sigma = XRT_sigma
RT_sigma = XRT_sigma
else:
print('No matches found')
if args['ts']:
print('Running second stage RT prediction...')
ns = np.array(ns)
nr = np.array(nr)
idx = np.abs((rt_diff_tmp) - XRT_shift) <= 3 * XRT_sigma
ns = ns[idx]
nr = nr[idx]
if deeplc_path:
outtrain_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain = open(outtrain_name, 'w')
outres_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
print('Peptides used for RT prediction: %d' % (len(ns), ))
ll = len(ns)
ns = ns[:ll]
nr = nr[:ll]
outtrain.write('seq,modifications,tr\n')
for seq, RT in zip(ns, nr):
mods_tmp = '|'.join([str(idx+1)+'|Carbamidomethyl' for idx, aa in enumerate(seq) if aa == 'C'])
outtrain.write(seq + ',' + str(mods_tmp) + ',' + str(RT) + '\n')
outtrain.close()
subprocess.call([deeplc_path, '--file_pred', outtrain_name, '--file_cal', outtrain_name, '--file_pred_out', outres_name])
pepdict = dict()
train_RT = []
train_seq = []
for x in open(outres_name).readlines()[1:]:
_, seq, _, RTexp, RT = x.strip().split(',')
pepdict[seq] = float(RT)
train_seq.append(seq)
train_RT.append(float(RTexp))
train_RT = np.array(train_RT)
RT_pred = np.array([pepdict[s] for s in train_seq])
rt_diff_tmp = RT_pred - train_RT
RT_left = -min(rt_diff_tmp)
RT_right = max(rt_diff_tmp)
try:
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 100
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
except:
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 50
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(0.1, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(1.0, RT_left, RT_right, rt_diff_tmp)
print('Calibrated RT shift: ', XRT_shift)
print('Calibrated RT sigma: ', XRT_sigma)
aa, bb, RR, ss = aux.linear_regression(RT_pred, train_RT)
else:
if elude_path:
outtrain_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain = open(outtrain_name, 'w')
outres_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
print(len(ns))
ll = len(ns)
ns = ns[:ll]
nr = nr[:ll]
for seq, RT in zip(ns, nr):
outtrain.write(seq + '\t' + str(RT) + '\n')
outtrain.close()
subprocess.call([elude_path, '-t', outtrain_name, '-e', outtrain_name, '-a', '-g', '-o', outres_name])
pepdict = dict()
train_RT = []
train_seq = []
for x in open(outres_name).readlines()[3:]:
seq, RT, RTexp = x.strip().split('\t')
pepdict[seq] = float(RT)
train_seq.append(seq)
train_RT.append(float(RTexp))
train_RT = np.array(train_RT)
RT_pred = np.array([pepdict[s] for s in train_seq])
rt_diff_tmp = RT_pred - train_RT
RT_left = -min(rt_diff_tmp)
RT_right = max(rt_diff_tmp)
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 50
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(0.1, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(1.0, RT_left, RT_right, rt_diff_tmp)
print('Calibrated RT shift: ', XRT_shift)
print('Calibrated RT sigma: ', XRT_sigma)
aa, bb, RR, ss = aux.linear_regression(RT_pred, train_RT)
else:
RC = achrom.get_RCs_vary_lcp(ns, nr)
RT_pred = np.array([achrom.calculate_RT(s, RC) for s in ns])
aa, bb, RR, ss = aux.linear_regression(RT_pred, nr)
rt_diff_tmp = RT_pred - nr
RT_left = -min(rt_diff_tmp)
RT_right = max(rt_diff_tmp)
start_width = (scoreatpercentile(rt_diff_tmp, 95) - scoreatpercentile(rt_diff_tmp, 5)) / 50
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(start_width, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(0.1, RT_left, RT_right, rt_diff_tmp)
if np.isinf(covvalue):
XRT_shift, XRT_sigma, covvalue = calibrate_RT_gaus(1.0, RT_left, RT_right, rt_diff_tmp)
print('Calibrated RT shift: ', XRT_shift)
print('Calibrated RT sigma: ', XRT_sigma)
print(aa, bb, RR, ss)
best_sigma = XRT_sigma
RT_sigma = XRT_sigma
out_log.write('Calibrated RT shift: %s\n' % (XRT_shift, ))
out_log.write('Calibrated RT sigma: %s\n' % (XRT_sigma, ))
out_log.close()
p1 = set(resdict['seqs'])
n = args['nproc']
if deeplc_path:
pepdict = dict()
outtrain_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain = open(outtrain_name, 'w')
outres_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtrain.write('seq,modifications,tr\n')
for seq, RT in zip(ns, nr):
mods_tmp = '|'.join([str(idx+1)+'|Carbamidomethyl' for idx, aa in enumerate(seq) if aa == 'C'])
outtrain.write(seq + ',' + str(mods_tmp) + ',' + str(RT) + '\n')
outtrain.close()
outtest_name = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())
outtest = open(outtest_name, 'w')
outtest.write('seq,modifications\n')
for seq in p1:
mods_tmp = '|'.join([str(idx+1)+'|Carbamidomethyl' for idx, aa in enumerate(seq) if aa == 'C'])
outtest.write(seq + ',' + str(mods_tmp) + '\n')
outtest.close()
if args['deeplc_library']:
print('Using deeplc library...')
subprocess.call([deeplc_path, '--file_pred', outtest_name, '--file_cal', outtrain_name, '--file_pred_out', outres_name, '--use_library', args['deeplc_library'], '--write_library'])
else:
subprocess.call([deeplc_path, '--file_pred', outtest_name, '--file_cal', outtrain_name, '--file_pred_out', outres_name])
for x in open(outres_name).readlines()[1:]:
_, seq, _, RT = x.strip().split(',')
pepdict[seq] = float(RT)
else:
if n == 1 or os.name == 'nt':
qin = list(p1)
qout = []
if elude_path:
pepdict = worker_RT(qin, qout, 0, 1, False, elude_path, ns, nr, True)
else:
pepdict = worker_RT(qin, qout, 0, 1, RC, False, False, False, True)
else:
qin = list(p1)
qout = Queue()
procs = []
for i in range(n):
if elude_path:
p = Process(target=worker_RT, args=(qin, qout, i, n, False, elude_path, ns, nr))
else:
p = Process(target=worker_RT, args=(qin, qout, i, n, RC, False, False, False))
p.start()
procs.append(p)
pepdict = dict()
for _ in range(n):
for item in iter(qout.get, None):
for k, v in item.items():
pepdict[k] = v
for p in procs:
p.join()
rt_pred = np.array([pepdict[s] for s in resdict['seqs']])
rt_diff = np.array([rts[iorig] for iorig in resdict['iorig']]) - rt_pred
# rt_diff = resdict['rt'] - rt_pred
e_all = (rt_diff) ** 2 / (RT_sigma ** 2)
r = 9.0
e_ind = e_all <= r
resdict = filter_results(resdict, e_ind)
rt_diff = rt_diff[e_ind]
rt_pred = rt_pred[e_ind]
with open(base_out_name + '_protsN.tsv', 'w') as output:
output.write('dbname\ttheor peptides\n')
for k, v in protsN.items():
output.write('\t'.join((k, str(v))) + '\n')
with open(base_out_name + '_PFMs.tsv', 'w') as output:
output.write('sequence\tmass diff\tRT diff\tpeak_id\tIntensity\tnScans\tnIsotopes\tproteins\tm/z\tRT\taveragineCorr\tcharge\tion_mobility\n')
# for seq, md, rtd, peak_id, I, nScans, nIsotopes, mzr, rtr, av, ch, im in zip(resdict['seqs'], resdict['md'], rt_diff, resdict['ids'], resdict['Is'], resdict['Scans'], resdict['Isotopes'], resdict['mzraw'], resdict['rt'], resdict['av'], resdict['ch'], resdict['im']):
for seq, md, rtd, iorig in zip(resdict['seqs'], resdict['md'], rt_diff, resdict['iorig']):
peak_id = ids[iorig]
I = Is[iorig]
nScans = Scans[iorig]
nIsotopes = Isotopes[iorig]
mzr = mzraw[iorig]
rtr = rts[iorig]
av = avraw[iorig]
ch = charges[iorig]
im = imraw[iorig]
output.write('\t'.join((seq, str(md), str(rtd), str(peak_id), str(I), str(nScans), str(nIsotopes), ';'.join(pept_prot[seq]), str(mzr), str(rtr), str(av), str(ch), str(im))) + '\n')
e_ind = resdict['mc'] == 0
resdict = filter_results(resdict, e_ind)
rt_diff = rt_diff[e_ind]
rt_pred = rt_pred[e_ind]
mass_diff = (resdict['md'] - mass_shift) / (mass_sigma)
rt_diff = (np.array([rts[iorig] for iorig in resdict['iorig']]) - rt_pred) / RT_sigma
# rt_diff = (resdict['rt'] - rt_pred) / RT_sigma
prefix = 'DECOY_'
isdecoy = lambda x: x[0].startswith(prefix)
isdecoy_key = lambda x: x.startswith(prefix)
escore = lambda x: -x[1]
SEED = 42
# Hyperparameter grid
param_grid = {
'boosting_type': ['gbdt', ],
'num_leaves': list(range(10, 1000)),
'learning_rate': list(np.logspace(np.log10(0.001), np.log10(0.05), base = 10, num = 1000)),
'metric': ['rmse', ],
'verbose': [-1, ],
'num_threads': [args['nproc'], ],
}
def get_X_array(df, feature_columns):
return df.loc[:, feature_columns].values
def get_Y_array_pfms(df):
return df.loc[:, 'decoy'].values
def get_features_pfms(dataframe):
feature_columns = dataframe.columns
columns_to_remove = []
banned_features = {
'iorig',
'ids',
'seqs',
'decoy',
'preds',
'av',
'Scans',
'proteins',
'peptide',
'md',
}
for feature in feature_columns:
if feature in banned_features:
columns_to_remove.append(feature)
feature_columns = feature_columns.drop(columns_to_remove)
return feature_columns
def objective_pfms(df, hyperparameters, iteration, threshold=0):
"""Objective function for grid and random search. Returns
the cross validation score from a set of hyperparameters."""
all_res = []
groups = df['peptide']
ix = df.index.values
unique = np.unique(groups)
np.random.RandomState(SEED).shuffle(unique)
result = []
for split in np.array_split(unique, 3):
mask = groups.isin(split)
train, test = ix[~mask], ix[mask]
train_df = df.iloc[train]
test_df = df.iloc[test]
feature_columns = get_features_pfms(df)
model = get_cat_model_final_pfms(train_df, hyperparameters, feature_columns)
df.loc[mask, 'preds'] = model.predict(get_X_array(df.loc[mask, :], feature_columns))
train_df = df.iloc[train]
test_df = df.iloc[test]
fpr, tpr, thresholds = metrics.roc_curve(get_Y_array_pfms(test_df), test_df['preds'])
shr_v = metrics.auc(fpr, tpr)
# shr_v = len(aux.filter(test_df, fdr=0.25, key='preds', is_decoy='decoy'))
all_res.append(shr_v)
# print(shr_v)
if shr_v < threshold:
all_res = [0, ]
break
shr_v = np.mean(all_res)
# print(shr_v)
# print('\n')
return [shr_v, hyperparameters, iteration, all_res]
def random_search_pfms(df, param_grid, out_file, max_evals):
"""Random search for hyperparameter optimization.
Writes result of search to csv file every search iteration."""
threshold = 0
# Dataframe for results
results = pd.DataFrame(columns = ['sharpe', 'params', 'iteration', 'all_res'],
index = list(range(max_evals)))
for i in range(max_evals):
print('%d/%d' % (i+1, max_evals))
# Choose random hyperparameters
random_params = {k: random.sample(v, 1)[0] for k, v in param_grid.items()}
# Evaluate randomly selected hyperparameters
eval_results = objective_pfms(df, random_params, i, threshold)
results.loc[i, :] = eval_results
threshold = max(threshold, np.mean(eval_results[3]) - 3 * np.std(eval_results[3]))
# open connection (append option) and write results
of_connection = open(out_file, 'a')
writer = csv.writer(of_connection)
writer.writerow(eval_results)
# make sure to close connection
of_connection.close()
# Sort with best score on top
results.sort_values('sharpe', ascending = False, inplace = True)
results.reset_index(inplace = True)
return results
def get_cat_model_pfms(df, hyperparameters, feature_columns, train, test):
feature_columns = list(feature_columns)
dtrain = lgb.Dataset(get_X_array(train, feature_columns), get_Y_array_pfms(train), feature_name=feature_columns, free_raw_data=False)
dvalid = lgb.Dataset(get_X_array(test, feature_columns), get_Y_array_pfms(test), feature_name=feature_columns, free_raw_data=False)
np.random.seed(SEED)
evals_result = {}
model = lgb.train(hyperparameters, dtrain, num_boost_round=5000, valid_sets=(dvalid,), valid_names=('valid',), verbose_eval=False,
early_stopping_rounds=20, evals_result=evals_result)
return model
def get_cat_model_final_pfms(df, hyperparameters, feature_columns):
feature_columns = list(feature_columns)
train = df
dtrain = lgb.Dataset(get_X_array(train, feature_columns), get_Y_array_pfms(train), feature_name=feature_columns, free_raw_data=False)
np.random.seed(SEED)
model = lgb.train(hyperparameters, dtrain, num_boost_round=100)
return model
df1 = pd.DataFrame()
for k in resdict.keys():
df1[k] = resdict[k]
df1['ids'] = df1['iorig'].apply(lambda x: ids[x])
df1['Is'] = df1['iorig'].apply(lambda x: Is[x])
df1['Scans'] = df1['iorig'].apply(lambda x: Scans[x])
df1['Isotopes'] = df1['iorig'].apply(lambda x: Isotopes[x])
df1['mzraw'] = df1['iorig'].apply(lambda x: mzraw[x])
df1['rt'] = df1['iorig'].apply(lambda x: rts[x])
df1['av'] = df1['iorig'].apply(lambda x: avraw[x])
df1['ch'] = df1['iorig'].apply(lambda x: charges[x])
df1['im'] = df1['iorig'].apply(lambda x: imraw[x])
df1['mass_diff'] = mass_diff
df1['rt_diff'] = rt_diff
df1['decoy'] = df1['seqs'].apply(lambda x: all(z.startswith(prefix) for z in pept_prot[x]))
df1['peptide'] = df1['seqs']
mass_dict = {}
pI_dict = {}
charge_dict = {}
for pep in set(df1['peptide']):
try:
mass_dict[pep] = mass.fast_mass2(pep)
pI_dict[pep] = electrochem.pI(pep)
charge_dict[pep] = electrochem.charge(pep, pH=7.0)
except:
mass_dict[pep] = 0
pI_dict[pep] = 0
charge_dict[pep] = 0
df1['plen'] = df1['peptide'].apply(lambda z: len(z))
df1['mass'] = df1['peptide'].apply(lambda x: mass_dict[x])
df1['pI'] = df1['peptide'].apply(lambda x: pI_dict[x])
df1['charge_theor'] = df1['peptide'].apply(lambda x: charge_dict[x])
df1['rt_diff_abs'] = df1['rt_diff'].abs()
df1['rt_diff_abs_pdiff'] = df1['rt_diff_abs'] - df1.groupby('ids')['rt_diff_abs'].transform('median')
df1['rt_diff_abs_pnorm'] = df1['rt_diff_abs'] / (df1.groupby('ids')['rt_diff_abs'].transform('sum') + 1e-2)
df1['id_count'] = df1.groupby('ids')['mass_diff'].transform('count')
df1['seq_count'] = df1.groupby('peptide')['mass_diff'].transform('count')
df1t5 = df1.sort_values(by='Is', ascending=False).copy()
df1t5 = df1t5.drop_duplicates(subset='peptide', keep='first')
if args['ml']:
print('Start Machine Learning on PFMs...')
print('Features used for MachineLearning: ', get_features_pfms(df1))
MAX_EVALS = 25
out_file = 'test_randomCV_PFMs_2.tsv'
of_connection = open(out_file, 'w')
writer = csv.writer(of_connection)
# Write column names
headers = ['auc', 'params', 'iteration', 'all_res']
writer.writerow(headers)
of_connection.close()
random_results = random_search_pfms(df1, param_grid, out_file, MAX_EVALS)
random_results = pd.read_csv(out_file)
random_results = random_results[random_results['auc'] != 'auc']
random_results['params'] = random_results['params'].apply(lambda x: ast.literal_eval(x))
convert_dict = {'auc': float,
}
random_results = random_results.astype(convert_dict)
bestparams = random_results.sort_values(by='auc',ascending=False)['params'].values[0]
bestparams['num_threads'] = args['nproc']
print(random_results.sort_values(by='auc',ascending=False)['auc'].values[0])
groups = df1['peptide']
ix = df1.index.values
unique = np.unique(groups)
np.random.RandomState(SEED).shuffle(unique)
result = []
for split in np.array_split(unique, 3):
mask = groups.isin(split)
train, test = ix[~mask], ix[mask]
train_df = df1.iloc[train]
test_df = df1.iloc[test]
feature_columns = list(get_features_pfms(train_df))
model = get_cat_model_final_pfms(train_df, bestparams, feature_columns)
df1.loc[test, 'preds'] = model.predict(get_X_array(test_df, feature_columns))
else:
df1['preds'] = np.power(df1['mass_diff'], 2) + np.power(df1['rt_diff'], 2)
df1['qpreds'] = pd.qcut(df1['preds'], 10, labels=range(10))
df1['proteins'] = df1['seqs'].apply(lambda x: ';'.join(pept_prot[x]))
df1.to_csv(base_out_name + '_PFMs_ML.tsv', sep='\t', index=False)
resdict['qpreds'] = df1['qpreds'].values
resdict['ids'] = df1['ids'].values
mass_diff = resdict['qpreds']
rt_diff = resdict['qpreds']
p1 = set(resdict['seqs'])
prots_spc2 = defaultdict(set)
for pep, proteins in pept_prot.items():
if pep in p1:
for protein in proteins:
prots_spc2[protein].add(pep)
for k in protsN:
if k not in prots_spc2:
prots_spc2[k] = set([])
prots_spc = dict((k, len(v)) for k, v in prots_spc2.items())
names_arr = np.array(list(prots_spc.keys()))
v_arr = np.array(list(prots_spc.values()))
n_arr = np.array([protsN[k] for k in prots_spc])
top100decoy_score = [prots_spc.get(dprot, 0) for dprot in protsN if isdecoy_key(dprot)]
top100decoy_N = [val for key, val in protsN.items() if isdecoy_key(key)]
p = np.mean(top100decoy_score) / np.mean(top100decoy_N)
print('p=%s' % (np.mean(top100decoy_score) / np.mean(top100decoy_N)))
prots_spc = dict()
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc[k] = all_pvals[idx]
final_iteration(resdict, mass_diff, rt_diff, pept_prot, protsN, base_out_name, prefix, isdecoy, isdecoy_key, escore, fdr, args['nproc'], fname)
def worker(qin, qout, mass_diff, rt_diff, resdict, protsN, pept_prot, isdecoy_key, isdecoy, fdr, prots_spc_basic2, win_sys=False):
for item in (iter(qin.get, None) if not win_sys else qin):
mass_koef, rtt_koef = item
e_ind = mass_diff <= mass_koef
resdict2 = filter_results(resdict, e_ind)
features_dict = dict()
for pep in set(resdict2['seqs']):
for bprot in pept_prot[pep]:
prot_score = prots_spc_basic2[bprot]
if prot_score > features_dict.get(pep, [-1, ])[-1]:
features_dict[pep] = (bprot, prot_score)
prots_spc_basic = dict()
p1 = set(resdict2['seqs'])
pep_pid = defaultdict(set)
pid_pep = defaultdict(set)
banned_dict = dict()
for pep, pid in zip(resdict2['seqs'], resdict2['ids']):
# for pep, pid in zip(resdict2['seqs'], [ids[iorig] for iorig in resdict2['iorig']]):
pep_pid[pep].add(pid)
pid_pep[pid].add(pep)
if pep in banned_dict:
banned_dict[pep] += 1
else:
banned_dict[pep] = 1
if len(p1):
prots_spc_final = dict()
prots_spc_copy = False
prots_spc2 = False
unstable_prots = set()
p0 = False
names_arr = False
tmp_spc_new = False
decoy_set = False
while 1:
if not prots_spc2:
best_match_dict = dict()
n_map_dict = defaultdict(list)
for k, v in protsN.items():
n_map_dict[v].append(k)
decoy_set = set()
for k in protsN:
if isdecoy_key(k):
decoy_set.add(k)
decoy_set = list(decoy_set)
prots_spc2 = defaultdict(set)
for pep, proteins in pept_prot.items():
if pep in p1:
for protein in proteins:
if protein == features_dict[pep][0]:
prots_spc2[protein].add(pep)
for k in protsN:
if k not in prots_spc2:
prots_spc2[k] = set([])
prots_spc2 = dict(prots_spc2)
unstable_prots = set(prots_spc2.keys())
top100decoy_N = sum([val for key, val in protsN.items() if isdecoy_key(key)])
names_arr = np.array(list(prots_spc2.keys()))
n_arr = np.array([protsN[k] for k in names_arr])
tmp_spc_new = dict((k, len(v)) for k, v in prots_spc2.items())
top100decoy_score_tmp = [tmp_spc_new.get(dprot, 0) for dprot in decoy_set]
top100decoy_score_tmp_sum = float(sum(top100decoy_score_tmp))
tmp_spc = tmp_spc_new
prots_spc = tmp_spc_new
if not prots_spc_copy:
prots_spc_copy = deepcopy(prots_spc)
for idx, v in enumerate(decoy_set):
if v in unstable_prots:
top100decoy_score_tmp_sum -= top100decoy_score_tmp[idx]
top100decoy_score_tmp[idx] = prots_spc.get(v, 0)
top100decoy_score_tmp_sum += top100decoy_score_tmp[idx]
p = float(sum(top100decoy_score_tmp)) / top100decoy_N
p = top100decoy_score_tmp_sum / top100decoy_N
if not p0:
p0 = float(p)
n_change = set(protsN[k] for k in unstable_prots)
for n_val in n_change:
for k in n_map_dict[n_val]:
v = prots_spc[k]
if n_val not in best_match_dict or v > prots_spc[best_match_dict[n_val]]:
best_match_dict[n_val] = k
n_arr_small = []
names_arr_small = []
v_arr_small = []
for k, v in best_match_dict.items():
n_arr_small.append(k)
names_arr_small.append(v)
v_arr_small.append(prots_spc[v])
prots_spc_basic = dict()
all_pvals = calc_sf_all(np.array(v_arr_small), n_arr_small, p)
for idx, k in enumerate(names_arr_small):
prots_spc_basic[k] = all_pvals[idx]
best_prot = utils.keywithmaxval(prots_spc_basic)
best_score = prots_spc_basic[best_prot]
unstable_prots = set()
if best_prot not in prots_spc_final:
prots_spc_final[best_prot] = best_score
banned_pids = set()
for pep in prots_spc2[best_prot]:
for pid in pep_pid[pep]:
banned_pids.add(pid)
for pid in banned_pids:
for pep in pid_pep[pid]:
banned_dict[pep] -= 1
if banned_dict[pep] == 0:
best_prot_val = features_dict[pep][0]
for bprot in pept_prot[pep]:
if bprot == best_prot_val:
tmp_spc_new[bprot] -= 1
unstable_prots.add(bprot)
else:
v_arr = np.array([prots_spc[k] for k in names_arr])
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc_basic[k] = all_pvals[idx]
for k, v in prots_spc_basic.items():
if k not in prots_spc_final:
prots_spc_final[k] = v
break
try:
prot_fdr = aux.fdr(prots_spc_final.items(), is_decoy=isdecoy)
except ZeroDivisionError:
prot_fdr = 100.0
if prot_fdr >= 12.5 * fdr:
v_arr = np.array([prots_spc[k] for k in names_arr])
all_pvals = calc_sf_all(v_arr, n_arr, p)
for idx, k in enumerate(names_arr):
prots_spc_basic[k] = all_pvals[idx]
for k, v in prots_spc_basic.items():
if k not in prots_spc_final:
prots_spc_final[k] = v
break
if mass_koef == 9:
item2 = prots_spc_copy
else:
item2 = False
if not win_sys:
qout.put((prots_spc_final, item2))
else:
qout.append((prots_spc_final, item2))
if not win_sys:
qout.put(None)
else:
return qout
| 63,581 | 22,968 |
from django.conf.urls import url
from django.views.generic import TemplateView
from . import views
urlpatterns = [
url(r"^list_signup/$", views.list_signup, name="waitinglist_list_signup"),
url(r"^ajax_list_signup/$", views.ajax_list_signup, name="waitinglist_ajax_list_signup"),
url(r"^survey/thanks/$", TemplateView.as_view(template_name="waitinglist/thanks.html"), name="waitinglist_thanks"),
url(r"^survey/(?P<code>.*)/$", views.survey, name="waitinglist_survey"),
url(r"^success/$", TemplateView.as_view(template_name="waitinglist/success.html"), name="waitinglist_success"),
url(r"^cohorts/$", views.cohort_list, name="waitinglist_cohort_list"),
url(r"^cohorts/create/$", views.cohort_create, name="waitinglist_cohort_create"),
url(r"^cohorts/cohort/(\d+)/$", views.cohort_detail, name="waitinglist_cohort_detail"),
url(r"^cohorts/cohort/(\d+)/add_member/$", views.cohort_member_add, name="waitinglist_cohort_member_add"),
url(r"^cohorts/cohort/(\d+)/send_invitations/$", views.cohort_send_invitations, name="waitinglist_cohort_send_invitations"),
]
| 1,099 | 428 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from pathlib import Path
from nokogiri.which_env import which_env
from pickle import Unpickler
class TQDMBytesReader(object):
def __init__(self, fd, tqdm, total, desc=''):
self.fd = fd
self.tqdm = tqdm(total=total)
self.tqdm.set_description(desc)
def read(self, size=-1):
bytes = self.fd.read(size)
self.tqdm.update(len(bytes))
return bytes
def readline(self):
bytes = self.fd.readline()
self.tqdm.update(len(bytes))
return bytes
def __enter__(self):
self.tqdm.__enter__()
return self
def __exit__(self, *args, **kwargs):
return self.tqdm.__exit__(*args, **kwargs)
def tqdm_load(fname, tqdm=None, desc=''):
if tqdm == None:
if which_env() == which_env.JUPYTER:
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
with open(fname, "rb") as fd:
total = Path(fname).stat().st_size
with TQDMBytesReader(fd, tqdm=tqdm, total=total, desc=desc) as pbfd:
up = Unpickler(pbfd)
obj = up.load()
return obj | 1,162 | 410 |
# tests.test_contrib.test_prepredict
# Test the prepredict estimator.
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Mon Jul 12 07:07:33 2021 -0400
#
# ID: test_prepredict.py [] benjamin@bengfort.com $
"""
Test the prepredict estimator.
"""
##########################################################################
## Imports
##########################################################################
import pytest
from io import BytesIO
from tests.fixtures import Dataset, Split
from tests.base import IS_WINDOWS_OR_CONDA, VisualTestCase
from sklearn.naive_bayes import GaussianNB
from sklearn.cluster import MiniBatchKMeans
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split as tts
from sklearn.datasets import make_classification, make_regression, make_blobs
from yellowbrick.contrib.prepredict import *
from yellowbrick.regressor import PredictionError
from yellowbrick.classifier import ClassificationReport
import numpy as np
# Set random state
np.random.seed()
##########################################################################
## Fixtures
##########################################################################
@pytest.fixture(scope="class")
def multiclass(request):
"""
Creates a random multiclass classification dataset fixture
"""
X, y = make_classification(
n_samples=500,
n_features=20,
n_informative=8,
n_redundant=2,
n_classes=6,
n_clusters_per_class=3,
random_state=87,
)
X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2, random_state=93)
dataset = Dataset(Split(X_train, X_test), Split(y_train, y_test))
request.cls.multiclass = dataset
@pytest.fixture(scope="class")
def continuous(request):
"""
Creates a random continuous regression dataset fixture
"""
X, y = make_regression(
n_samples=500,
n_features=22,
n_informative=8,
random_state=42,
noise=0.2,
bias=0.2,
)
X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2, random_state=11)
# Set a class attribute for regression
request.cls.continuous = Dataset(Split(X_train, X_test), Split(y_train, y_test))
@pytest.fixture(scope="class")
def blobs(request):
"""
Create a random blobs clustering dataset fixture
"""
X, y = make_blobs(
n_samples=1000, n_features=12, centers=6, shuffle=True, random_state=42
)
# Set a class attribute for blobs
request.cls.blobs = Dataset(X, y)
##########################################################################
## Tests
##########################################################################
@pytest.mark.usefixtures("multiclass")
@pytest.mark.usefixtures("continuous")
@pytest.mark.usefixtures("blobs")
class TestPrePrePredictEstimator(VisualTestCase):
"""
Pre-predict contrib tests.
"""
@pytest.mark.xfail(
IS_WINDOWS_OR_CONDA,
reason="image comparison failure on Conda 3.8 and 3.9 with RMS 19.307",
)
def test_prepredict_classifier(self):
"""
Test the prepredict estimator with classification report
"""
# Make prepredictions
X, y = self.multiclass.X, self.multiclass.y
y_pred = GaussianNB().fit(X.train, y.train).predict(X.test)
# Create prepredict estimator with prior predictions
estimator = PrePredict(y_pred, CLASSIFIER)
assert estimator.fit(X.train, y.train) is estimator
assert estimator.predict(X.train) is y_pred
assert estimator.score(X.test, y.test) == pytest.approx(0.41, rel=1e-3)
# Test that a visualizer works with the pre-predictions.
viz = ClassificationReport(estimator)
viz.fit(None, y.train)
viz.score(None, y.test)
viz.finalize()
self.assert_images_similar(viz)
def test_prepredict_regressor(self):
"""
Test the prepredict estimator with a prediction error plot
"""
# Make prepredictions
X, y = self.continuous.X, self.continuous.y
y_pred = LinearRegression().fit(X.train, y.train).predict(X.test)
# Create prepredict estimator with prior predictions
estimator = PrePredict(y_pred, REGRESSOR)
assert estimator.fit(X.train, y.train) is estimator
assert estimator.predict(X.train) is y_pred
assert estimator.score(X.test, y.test) == pytest.approx(0.9999983124154966, rel=1e-2)
# Test that a visualizer works with the pre-predictions.
viz = PredictionError(estimator)
viz.fit(X.train, y.train)
viz.score(X.test, y.test)
viz.finalize()
self.assert_images_similar(viz, tol=10.0)
def test_prepredict_clusterer(self):
"""
Test the prepredict estimator with a silhouette visualizer
"""
X = self.blobs.X
y_pred = MiniBatchKMeans(random_state=831).fit(X).predict(X)
# Create prepredict estimator with prior predictions
estimator = PrePredict(y_pred, CLUSTERER)
assert estimator.fit(X) is estimator
assert estimator.predict(X) is y_pred
assert estimator.score(X) == pytest.approx(0.5477478541994333, rel=1e-2)
# NOTE: there is currently no cluster visualizer that can take advantage of
# the prepredict utility since they all require learned attributes.
def test_load(self):
"""
Test the various ways that prepredict loads data
"""
# Test callable
ppe = PrePredict(lambda: self.multiclass.y.test)
assert ppe._load() is self.multiclass.y.test
# Test file-like object, assume that str and pathlib.Path work similarly
f = BytesIO()
np.save(f, self.continuous.y.test)
f.seek(0)
ppe = PrePredict(f)
assert np.array_equal(ppe._load(), self.continuous.y.test)
# Test direct array-like completed in other tests.
| 5,991 | 1,919 |
# input and print, with format strings
s = input("What's your name?\n")
print(f"hello, {s}")
| 94 | 35 |
import os, sys
import numpy as np
import matplotlib.pyplot as plt
# Export library path
rootname = "mesoscopic-functional-connectivity"
thispath = os.path.dirname(os.path.abspath(__file__))
rootpath = os.path.join(thispath[:thispath.index(rootname)], rootname)
print("Appending project path", rootpath)
sys.path.append(rootpath)
from codes.lib.info_metrics.info_metrics_generic import parallel_metric_2d
from codes.lib.models.test_lib import dynsys
from codes.lib.sweep_lib import DataSweep
############################
# Parameters
############################
# DynSys parameters
dynsysParam = {
'nNode' : 4, # Number of variables
'nData' : 4000, # Number of timesteps
'nTrial' : 20, # Number of trials
'dt' : 50, # ms, timestep
'tau' : 500, # ms, timescale of each mesoscopic area
'inpT' : 100, # Period of input oscillation
'inpMag' : 0.0, # Magnitude of the periodic input
'std' : 0.2, # STD of neuron noise
}
# IDTxl parameters
idtxlParam = {
'dim_order' : 'rps',
'cmi_estimator' : 'JidtGaussianCMI',
'max_lag_sources' : 5,
'min_lag_sources' : 1,
'window' : 50
}
############################
# Data
############################
nSweep = 10
data = dynsys(dynsysParam) #[trial x channel x time]
print("Generated data of shape", data.shape)
methods = ['BivariateTE', 'MultivariateTE']
dataSweep1 = DataSweep(data, idtxlParam, nSweepMax=nSweep)
timeIdxs = dataSweep1.get_target_time_idxs()
# print(timeIdxs)
#
# from codes.lib.sweep_lib import Sweep2D
#
# sweeper = Sweep2D(dataSweep1.iterator(), methods, idtxlParam["dim_order"], parTarget=True)
#
# for i, (method, data, iTrg) in enumerate(sweeper.iterator()):
# print(i, method, data.shape, iTrg)
results = parallel_metric_2d(dataSweep1.iterator(), "idtxl", methods, idtxlParam, nCore=None)
fig, ax = plt.subplots(nrows=nSweep, ncols=2)
fig.suptitle("TE computation for several windows of the data")
for iMethod, method in enumerate(methods):
ax[0][iMethod].set_title(method)
print(results[method].shape)
for iSweep in range(nSweep):
ax[iSweep][0].set_ylabel("time="+str(timeIdxs[iSweep]))
ax[iSweep][iMethod].imshow(results[method][iSweep][0])
plt.show() | 2,259 | 867 |
'''
A Gameplay Mechanic [TS4]
By CozyGnomes (https://cozygnomes.tumblr.com/)
This is an existing document that contains several things you can do in your gameplay on TS4.
This program comes with the intention of automatically generating the related phrase without
having to search for each number (as instructed in the original document).
Project by Blenda C
'''
import random
FILE_NAME = 'gpmechanic.txt'
def get_mechanics():
# read the file containing the mechanics
list_mechanics = []
with open(FILE_NAME, encoding='utf-8') as file:
for line in file: # for-each loop gives lines one at a time
list_mechanics.append(line.strip()) # strip removes whitespace at the start or end
return list_mechanics
def introduction():
# gives the introduction and instructions to the user
print("")
print("A Gameplay Mechanic [TS4] by CozyGnomes")
print("Every time you press enter, a new suggestion for your gameplay will be generated. (to exit enter 0)")
def main():
introduction()
list_mechanics = get_mechanics()
chosen_value = random.choice(list_mechanics) # comes with ‘import random’
print("You should...")
print('')
print(chosen_value)
# here is the verification if the user wants to generate an alternative mechanic or wants to leave
while True:
user_input = input("")
if user_input == '':
print("or...")
chosen_value = random.choice(list_mechanics)
print(chosen_value)
elif user_input == '0':
break
# once generated, a final message
print('')
print("Now it's time to do it! Good luck!")
if __name__ == '__main__':
main() | 1,747 | 491 |
def display(a,b):
print(f'a={a}')
return None
display(3,4)
display(a=3,b=4)
display(b=4,a=3) | 101 | 57 |
# %%
from collections import OrderedDict
import dataloaders.base
from dataloaders.datasetGen import SplitGen, PermutedGen
train_dataset, val_dataset = dataloaders.base.__dict__["CIFAR10"]('data', False)
# %%
print(train_dataset)
# %%
train_dataset_splits, val_dataset_splits, task_output_space = SplitGen(train_dataset, val_dataset,
first_split_sz=2,
other_split_sz=2,
rand_split=False,
remap_class=not False)
# %%
for X, y , cl in train_dataset_splits["4"]:
print(X.shape)
print("cl")
print(y)
break
# %%
# %%
| 830 | 219 |
import sys
import pyfits
import numpy as np
from PySpectrograph import Spectrum
from PySpectrograph.Utilities.fit import interfit
import pylab as pl
def ncor(x, y):
"""Calculate the normalized correlation of two arrays"""
d=np.correlate(x,x)*np.correlate(y,y)
if d<=0: return 0
return np.correlate(x,y)/d**0.5
def xcor_redshift(spectra, template, sub=False, z1=0, z2=1, zstep=0.001):
"""Meaure the redshift of a spectra by cross correlating it
with a template
returns an array of correlation values
"""
zvalue=np.arange(z1,z2,zstep)
cc_arr=np.zeros(len(zvalue))
sflux=continuum_subtract(spectra)
tflux=continuum_subtract(template)
for i,z in enumerate(zvalue):
nflux=np.interp(spectra.wavelength, template.wavelength*(1+z), tflux)
cc_arr[i]=ncor(sflux, nflux)
return zvalue, cc_arr
def continuum_subtract(spec, function='polynomial', order=7):
"""Fit a function to a spectra and subtract the continuum"""
wc=interfit(spec.wavelength, spec.flux, function=function, order=order)
wc.interfit()
return spec.flux-wc(spec.wavelength)
def loadtext(infile):
warr, farr=np.loadtxt(infile, usecols=(0,1), unpack=True)
spec=Spectrum.Spectrum(warr, farr, stype='continuum')
return spec
def loadiraf(hdu):
farr=hdu[0].data
xarr=np.arange(len(farr))
warr=hdu[0].header['CRVAL1']+hdu[0].header['CDELT1']*(xarr+hdu[0].header['CRPIX1'])
mask=(farr>10)
spec=Spectrum.Spectrum(warr[mask], farr[mask], stype='continuum')
return spec
def loadsdss(hdu):
farr=hdu[0].data[0]
print len(farr)
xarr=np.arange(len(farr))
warr=10**(hdu[0].header['CRVAL1']+hdu[0].header['CD1_1']*(xarr+1))
spec=Spectrum.Spectrum(warr, farr, stype='continuum')
return spec
def readlinelist(infile):
line_wave=[]
line_name=[]
for lines in open(infile).readlines():
l=lines.split()
line_wave.append(l[0])
line_name.append(l[1])
return line_wave, line_name
if __name__=='__main__':
if sys.argv[1].count('fits'):
hdu=pyfits.open(sys.argv[1])
spec=loadiraf(hdu)
else:
spec=loadtext(sys.argv[1])
thdu=pyfits.open(sys.argv[2])
zc=float(sys.argv[3])
template=loadsdss(thdu)
z1=max(0,zc-0.15)
z2=max(0,zc+0.15)
z_arr, cc_arr=xcor_redshift(spec, template, z1=z1, z2=z2, zstep=0.0001)
z=z_arr[cc_arr.argmax()]
print z
#z_arr, cc_arr=xcor_redshift(spec, template, z1=z-0.05, z2=z+0.05, zstep=0.0001)
#z=z_arr[cc_arr.argmax()]
#print z
pl.figure()
sp=pl.axes([0.15,0.15,0.8,0.8])
cflux=np.convolve(spec.flux, np.ones(10), mode='same')
#cflux*=1000e16
sp.plot(spec.wavelength, cflux, color='#000000')
coef=np.polyfit(spec.wavelength, cflux, 3)
#sp.plot(spec.wavelength, np.polyval(coef, spec.wavelength))
nflux=np.interp(spec.wavelength, (1+z)*template.wavelength, template.flux)
tcoef=np.polyfit(spec.wavelength, nflux*cflux.mean()/nflux.mean(), 2)
#ratio=cflux.mean()/nflux.mean()#*np.polyval(coef, spec.wavelength)/np.polyval(tcoef, spec.wavelength)
ratio=cflux.mean()/nflux.mean()*np.polyval(coef, spec.wavelength)/np.polyval(tcoef, spec.wavelength)
#sp.plot(spec.wavelength, nflux*ratio*0.5-0.4e-16, color='#FF0000')
sp.plot(spec.wavelength, nflux*ratio, color='#FF0000')
#sp.plot(spec.wavelength, np.polyval(tcoef, spec.wavelength))
#pl.plot((1+z)*template.wavelength, template.flux*spec.flux.mean()/template.flux.mean())
spname=sys.argv[1].split('_')[0]
#sp.set_ylim([0,2000])
x1,x2=sp.get_xlim()
y1,y2=sp.get_ylim()
print y1,y2, x1,x2
line_wave, line_name=readlinelist('redshift/sdss.linelist')
dx=10
for w,n in zip(line_wave, line_name):
w=float(w)*(1+z)
if w>x1 and w< x2:
sp.plot([w,w],[y1,y2],ls='--', color='#AAAAAA')
sp.text(w, y2-dx, '$%s$' % n.replace('_', '\\'), color='#AAAAAA', fontsize=8)
#if dx<300:
# dx+=100
#else:
# dx=100
spname=sys.argv[4]
sp.text(4500,0.8*y2,spname, fontsize=24)
sp.text(4500,0.70*y2,'z=%5.4f' % zc, fontsize=24)
sp.set_ylabel('Counts')
sp.set_xlabel('$\lambda \ (\AA)$')
if len(sys.argv)>5:
sy1=float(sys.argv[5])
sy2=float(sys.argv[6])
else:
sy1=0.7
sy2=0.7
if False:
cc=pl.axes([sy1, sy2,0.2,0.2])
cc.plot(z_arr, cc_arr, color='#777777')
xticks=np.arange(100*z1,100*z2+1,10, dtype=int)/100.0
print xticks
cc.set_xticks(xticks)
cc.set_yticklabels([])
cc.set_xlabel('z')
cc.set_title('X-corr Function')
pl.savefig(spname+'.png')
pl.show()
| 4,631 | 2,137 |
################################################################################
# This is the main file for preprocessing smartphone sensor data #
# #
# Contributors: Anna Hakala & Ana Triana #
################################################################################
import niimpy
import numpy as np
import pandas as pd
from pandas import Series
import matplotlib.pyplot as plt
import seaborn as sns
import time
import datetime
import pytz
import niimpy.aalto
# backwards compatibility aliases
from .screen import screen_off, screen_duration
def date_range(df, begin, end):
"""Extract out a certain date range from a DataFrame.
Extract out a certain data range from a dataframe. The index must be the
dates, and the index must be sorted.
"""
# TODO: is this needed? Do normal pandas operation, timestamp
# checking is not really needed (and limits the formats that can
# be used, pandas can take more than pd.Timestamp)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = df.index[0]
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = df.index[-1]
df_new = df.loc[begin:end]
return df_new
# Above this point is function that should *stay* in preprocess.py
# Below this is functions that may or may not be moved.
def get_subjects(database):
""" Returns a list of the subjects in the database
Parameters
----------
database: database
"""
# TODO: deprecate, user should do ['user'].unique() on dataframe themselves
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
questions = database.raw(table='AwareHyksConverter', user=niimpy.ALL)
subjects=list(questions.user.unique())
return subjects
def get_phq9(database,subject):
""" Returns the phq9 scores from the databases per subject
Parameters
----------
database: database
user: string
Returns
-------
phq9: Dataframe with the phq9 score
"""
# TODO: Most of this logic can be moved to sum_survey_cores
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
phq9 = niimpy.aalto.phq9_raw(database)
phq9 = phq9[phq9['user']==subject]
phq9 = phq9.drop(['user','source'],axis=1)
phq9 = phq9.sort_index()
phq9 = phq9.reset_index().drop_duplicates(subset=['index','id'],keep='first').set_index('index')
phq9 = phq9.groupby(phq9.index)['answer'].sum()
phq9 = phq9.to_frame()
return phq9
#surveys
def daily_affect_variability(questions, subject=None):
""" Returns two DataFrames corresponding to the daily affect variability and
mean daily affect, both measures defined in the OLO paper available in
10.1371/journal.pone.0110907. In brief, the mean daily affect computes the
mean of each of the 7 questions (e.g. sad, cheerful, tired) asked in a
likert scale from 0 to 7. Conversely, the daily affect viariability computes
the standard deviation of each of the 7 questions.
NOTE: This function aggregates data by day.
Parameters
----------
questions: DataFrame with subject data (or database for backwards compatibility)
subject: string, optional (backwards compatibility only, in the future do filtering before).
Returns
-------
DLA_mean: mean of the daily affect
DLA_std: standard deviation of the daily affect
"""
# TODO: The daily summary (mean/std) seems useful, can we generalize?
# Backwards compatibilty if a database was passed
if isinstance(questions, niimpy.database.Data1):
questions = questions.raw(table='AwareHyksConverter', user=subject)
# Maintain backwards compatibility in the case subject was passed and
# questions was *not* a dataframe.
elif isinstance(subject, string):
questions = questions[questions['user'] == subject]
questions=questions[(questions['id']=='olo_1_1') | (questions['id']=='olo_1_2') | (questions['id']=='olo_1_3') | (questions['id']=='olo_1_4') | (questions['id']=='olo_1_5') | (questions['id']=='olo_1_6') | (questions['id']=='olo_1_7') | (questions['id']=='olo_1_8')]
questions['answer']=pd.to_numeric(questions['answer'])
questions = questions.drop(['device', 'time', 'user'], axis=1)
if (pd.Timestamp.tzname(questions.index[0]) != 'EET'):
if pd.Timestamp.tzname(questions.index[0]) != 'EEST':
questions.index = pd.to_datetime(questions.index).tz_localize('Europe/Helsinki')
questions=questions.drop_duplicates(subset=['datetime','id'],keep='first')
questions=questions.pivot_table(index='datetime', columns='id', values='answer')
questions=questions.rename(columns={'olo_1_1': 'cheerful', 'olo_1_2': 'tired','olo_1_3': 'content', 'olo_1_4': 'nervous','olo_1_5': 'tranquil', 'olo_1_6': 'sad', 'olo_1_7': 'excited', 'olo_1_8': 'active'})
questions = questions.reset_index()
DLA = questions.copy()
questions['date_minus_time'] = questions['datetime'].apply( lambda questions : datetime.datetime(year=questions.year, month=questions.month, day=questions.day))
questions.set_index(questions["date_minus_time"],inplace=True)
DLA_std = questions.resample('D').std()#), how='std')
DLA_std=DLA_std.rename(columns={'date_minus_time': 'datetime'})
DLA_std.index = pd.to_datetime(DLA_std.index).tz_localize('Europe/Helsinki')
DLA_mean = questions.resample('D').mean()
DLA_mean=DLA_mean.rename(columns={'date_minus_time': 'datetime'})
DLA_mean.index = pd.to_datetime(DLA_mean.index).tz_localize('Europe/Helsinki')
return DLA_std, DLA_mean
#Ambient Noise
def ambient_noise(noise, subject, begin=None, end=None):
""" Returns a Dataframe with 5 possible computations regarding the noise
ambient plug-in: average decibels, average frequency, number of times when
there was noise in the day, number of times when there was a loud noise in
the day (>70dB), and number of times when the noise matched the speech noise
level and frequency (65Hz < freq < 255Hz and dB>50 )
NOTE: This function aggregates data by day.
Parameters
----------
noise: DataFrame with subject data (or database for backwards compatibility)
subject: string, optional (backwards compatibility only, in the future do filtering before).
begin: datetime, optional
end: datetime, optional
Returns
-------
avg_noise: Dataframe
"""
# TODO: move to niimpy.noise
# TODO: add arguments for frequency/decibels/silence columns
# Backwards compatibilty if a database was passed
if isinstance(noise, niimpy.database.Data1):
noise = noise.raw(table='AwareAmbientNoise', user=subject)
# Maintain backwards compatibility in the case subject was passed and
# questions was *not* a dataframe.
elif isinstance(subject, string):
noise = noise[noise['user'] == subject]
# Shrink the dataframe down to only what we need
noise = noise[['double_frequency', 'is_silent', 'double_decibels', 'datetime']]
# Extract the data range (In the future should be done before this function
# is called.)
if begin is not None or end is not None:
noise = date_range(noise, begin, end)
noise['is_silent']=pd.to_numeric(noise['is_silent'])
loud = noise[noise.double_decibels>70] #check if environment was noisy
speech = noise[noise['double_frequency'].between(65, 255)]
speech = speech[speech.is_silent==0] #check if there was a conversation
silent=noise[noise.is_silent==0] #This is more what moments there are noise in the environment.
avg_noise=noise.resample('D', on='datetime').mean() #average noise
avg_noise=avg_noise.drop(['is_silent'],axis=1)
if not silent.empty:
silent=silent.resample('D', on='datetime').count()
silent = silent.drop(['double_decibels','double_frequency','datetime'],axis=1)
silent=silent.rename(columns={'is_silent':'noise'})
avg_noise = avg_noise.merge(silent, how='outer', left_index=True, right_index=True)
if not loud.empty:
loud=loud.resample('D', on='datetime').count()
loud = loud.drop(['double_decibels','double_frequency','datetime'],axis=1)
loud=loud.rename(columns={'is_silent':'loud'})
avg_noise = avg_noise.merge(loud, how='outer', left_index=True, right_index=True)
if not speech.empty:
speech=speech.resample('D', on='datetime').count()
speech = speech.drop(['double_decibels','double_frequency','datetime'],axis=1)
speech=speech.rename(columns={'is_silent':'speech'})
avg_noise = avg_noise.merge(speech, how='outer', left_index=True, right_index=True)
return avg_noise
#Application
def shutdown_info(database,subject,begin=None,end=None):
""" Returns a DataFrame with the timestamps of when the phone has shutdown.
NOTE: This is a helper function created originally to preprocess the application
info data
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
shutdown: Dataframe
"""
bat = niimpy.read._get_dataframe(database, table='AwareBattery', user=subject)
bat = niimpy.filter_dataframe(bat, begin=begin, end=end)
# TODO: move to niimpy.battery
if 'datetime' in bat.columns:
bat = bat[['battery_status', 'datetime']]
else:
bat = bat[['battery_status']]
bat=bat.loc[begin:end]
bat['battery_status']=pd.to_numeric(bat['battery_status'])
shutdown = bat[bat['battery_status'].between(-3, 0, inclusive=False)]
return shutdown
def get_seconds(time_delta):
""" Converts the timedelta to seconds
NOTE: This is a helper function
Parameters
----------
time_delta: Timedelta
"""
return time_delta.dt.seconds
def app_duration(database,subject,begin=None,end=None,app_list_path=None):
""" Returns two DataFrames contanining the duration and number of events per
group of apps, e.g. number of times a person used communication apps like
WhatsApp, Telegram, Messenger, sms, etc. and for how long these apps were
used in a day (in seconds).
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
app_list_path: path to the csv file where the apps are classified into groups
Returns
-------
duration: Dataframe
count: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
app = database.raw(table='AwareApplicationNotifications', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = app.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = app.iloc[len(app)-1]['datetime']
if(app_list_path==None):
app_list_path = '/m/cs/scratch/networks-nima/ana/niima-code/Datastreams/Phone/apps_group.csv'
app = app.drop(columns=['device','user','time','defaults','sound','vibrate'])
app=app.loc[begin:end]
#Classify the apps into groups
app_list=pd.read_csv(app_list_path)
app['group']=np.nan
for index, row in app.iterrows():
group=app_list.isin([row['application_name']]).any()
group=group.reset_index()
if (not any(group[0])):
app.loc[index,'group']=10
else:
app.loc[index,'group']=group.index[group[0] == True].tolist()[0]
#Insert missing data due to phone being shut down
shutdown = shutdown_info(database,subject,begin,end)
if not shutdown.empty:
shutdown['group']=11
shutdown['battery_status'] = 'off'
app = app.merge(shutdown, how='outer', left_index=True, right_index=True)
app['application_name'] = app['application_name'].replace(np.nan, 'off', regex=True)
app['group_x'] = app['group_x'].replace(np.nan, 11, regex=True)
app = app.drop(['battery_status','group_y'], axis=1)
dates=app.datetime_x.combine_first(app.datetime_y)
app['datetime']=dates
app = app.drop(['datetime_x','datetime_y'], axis=1)
app=app.rename(columns={'group_x':'group'})
#Insert missing data due to the screen being off
screen=screen_off(database,subject,begin,end)
if not screen.empty:
app = app.merge(screen, how='outer', left_index=True, right_index=True)
app['application_name'] = app['application_name'].replace(np.nan, 'off', regex=True)
app['group'] = app['group'].replace(np.nan, 11, regex=True)
del app['screen_status']
dates=app.datetime_x.combine_first(app.datetime_y)
app['datetime']=dates
app = app.drop(['datetime_x','datetime_y'], axis=1)
#Insert missing data caught by sms but unknown cause
sms = database.raw(table='AwareMessages', user=subject)
sms = sms.drop(columns=['device','user','time','trace'])
sms = sms.drop_duplicates(subset=['datetime','message_type'],keep='first')
sms = sms[sms.message_type=='outgoing']
sms = sms.loc[begin:end]
if not sms.empty:
app = app.merge(sms, how='outer', left_index=True, right_index=True)
app['application_name'] = app['application_name'].replace(np.nan, 'sms', regex=True)
app['group'] = app['group'].replace(np.nan, 2, regex=True)
del app['message_type']
dates=app.datetime_x.combine_first(app.datetime_y)
app['datetime']=dates
app = app.drop(['datetime_x','datetime_y'], axis=1)
#Insert missing data caught by calls but unknown cause
call = database.raw(table='AwareCalls', user=subject)
if not call.empty:
call = call.drop(columns=['device','user','time','trace'])
call = call.drop_duplicates(subset=['datetime','call_type'],keep='first')
call['call_duration'] = pd.to_timedelta(call.call_duration.astype(int), unit='s')
call = call.loc[begin:end]
dummy = call.datetime+call.call_duration
dummy = pd.Series.to_frame(dummy)
dummy['finish'] = dummy[0]
dummy = dummy.set_index(0)
call = call.merge(dummy, how='outer', left_index=True, right_index=True)
dates=call.datetime.combine_first(call.finish)
call['datetime']=dates
call = call.drop(columns=['call_duration','finish'])
app = app.merge(call, how='outer', left_index=True, right_index=True)
app.group = app.group.fillna(2)
app.application_name = app.application_name.fillna('call')
dates=app.datetime_x.combine_first(app.datetime_y)
app['datetime']=dates
app = app.drop(columns=['datetime_x','datetime_y','call_type'])
#Calculate the app duration per group
app['duration']=np.nan
app['duration']=app['datetime'].diff()
app['duration'] = app['duration'].shift(-1)
app['datetime'] = app['datetime'].dt.floor('d')
duration=pd.pivot_table(app,values='duration',index='datetime', columns='group', aggfunc=np.sum)
count=pd.pivot_table(app,values='duration',index='datetime', columns='group', aggfunc='count')
duration.columns = duration.columns.map({0.0: 'sports', 1.0: 'games', 2.0: 'communication', 3.0: 'social_media', 4.0: 'news', 5.0: 'travel', 6.0: 'shop', 7.0: 'entretainment', 8.0: 'work_study', 9.0: 'transportation', 10.0: 'other', 11.0: 'off'})
count.columns = count.columns.map({0.0: 'sports', 1.0: 'games', 2.0: 'communication', 3.0: 'social_media', 4.0: 'news', 5.0: 'travel', 6.0: 'shop', 7.0: 'entretainment', 8.0: 'work_study', 9.0: 'transportation', 10.0: 'other', 11.0: 'off'})
duration = duration.apply(get_seconds,axis=1)
return duration, count
#Communication
def call_info(database,subject,begin=None,end=None):
""" Returns a DataFrame contanining the duration and number of events per
type of calls (outgoing, incoming, missed). The Dataframe summarizes the
duration of the incoming/outgoing calls in seconds, number of those events,
and how long (in seconds) the person has spoken to the top 5 contacts (most
frequent)
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
duration: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
call = database.raw(table='AwareCalls', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = call.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = call.iloc[len(call)-1]['datetime']
call = call.drop(columns=['device','user','time'])
call = call.loc[begin:end]
call['datetime'] = call['datetime'].dt.floor('d')
call['call_duration']=pd.to_numeric(call['call_duration'])
duration = call.groupby(['datetime']).sum()
missed_calls = call.loc[(call['call_type'] == 'missed')].groupby(['datetime']).count()
outgoing_calls = call.loc[(call['call_type'] == 'outgoing')].groupby(['datetime']).count()
incoming_calls = call.loc[(call['call_type'] == 'incoming')].groupby(['datetime']).count()
duration['call_missed'] = missed_calls['call_type']
duration['call_outgoing'] = outgoing_calls['call_type']
duration['call_incoming'] = incoming_calls['call_type']
duration2 = call.pivot_table(index='datetime', columns='call_type', values='call_duration',aggfunc='sum')
if ('incoming' in duration2.columns):
duration2 = duration2.rename(columns={'incoming': 'call_incoming_duration'})
if ('outgoing' in duration2.columns):
duration2 = duration2.rename(columns={'outgoing': 'call_outgoing_duration'})
if ('missed' in duration2.columns):
duration2 = duration2.drop(columns=['missed'])
duration = duration.merge(duration2, how='outer', left_index=True, right_index=True)
duration = duration.fillna(0)
if ('missed_y' in duration.columns):
duration = duration.drop(columns=['missed_y'])
#duration.columns = ['total_call_duration', 'call_missed', 'call_outgoing', 'call_incoming', 'call_incoming_duration', 'call_outgoing_duration']
#Now let's calculate something more sophisticated... Let's see
trace = call.groupby(['trace']).count()
trace = trace.sort_values(by=['call_type'], ascending=False)
top5 = trace.index.values.tolist()[:5]
call['frequent']=0
call = call.reset_index()
call = call.rename(columns={'index': 'date'})
for index, row in call.iterrows():
if (call.loc[index,'trace'] in top5):
call.loc[index,'frequent']=1
call['frequent'] = call['frequent'].astype(str)
duration2 = call.pivot_table(index='date', columns=['call_type','frequent'], values='call_duration',aggfunc='sum')
duration2.columns = ['_'.join(col) for col in duration2.columns]
duration2 = duration2.reset_index()
#duration2.columns = ['datetime','incoming_0','incoming_1','missed_0','missed_1','outgoing_0','outgoing_1']
duration2['datetime'] = duration2['date'].dt.floor('d')
duration2 = duration2.groupby(['datetime']).sum()
if ('incoming_0' in duration2.columns):
duration2 = duration2.drop(columns=['incoming_0'])
if ('missed_0' in duration2.columns):
duration2 = duration2.drop(columns=['missed_0'])
if ('missed_1' in duration2.columns):
duration2 = duration2.drop(columns=['missed_1'])
if ('outgoing_0' in duration2.columns):
duration2 = duration2.drop(columns=['outgoing_0'])
duration = duration.merge(duration2, how='outer', left_index=True, right_index=True)
duration = duration.rename(columns={'incoming_1': 'incoming_duration_top5', 'outgoing_1': 'outgoing_duration_top5'})
return duration
def sms_info(database,subject,begin=None,end=None):
""" Returns a DataFrame contanining the number of events per type of messages
SMS (outgoing, incoming). The Dataframe summarizes the number of the
incoming/outgoing sms and how many of those correspond to the top 5 contacts
(most frequent with whom the subject exchanges texts)
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
sms_stats: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
sms = database.raw(table='AwareMessages', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = sms.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = sms.iloc[len(sms)-1]['datetime']
sms = sms.drop(columns=['device','user','time'])
sms['datetime'] = sms['datetime'].dt.floor('d')
sms = sms.loc[begin:end]
if (len(sms)>0):
sms_stats = sms.copy()
sms_stats['dummy'] = 1
sms_stats = sms_stats.pivot_table(index='datetime', columns='message_type', values='dummy',aggfunc='sum')
#Now let's move to somethign more sophisticated
trace = sms.groupby(['trace']).count()
trace = trace.sort_values(by=['message_type'], ascending=False)
top5 = trace.index.values.tolist()[:5]
sms['frequent']=0
sms = sms.reset_index()
sms = sms.rename(columns={'index': 'date'})
for index, row in sms.iterrows():
if (sms.loc[index,'trace'] in top5):
sms.loc[index,'frequent']=1
sms['frequent'] = sms['frequent'].astype(str)
sms['dummy']=1
dummy = sms.pivot_table(index='date', columns=['message_type','frequent'], values='dummy',aggfunc='sum')
dummy.columns = ['_'.join(col) for col in dummy.columns]
dummy = dummy.reset_index()
dummy['datetime'] = dummy['date'].dt.floor('d')
dummy = dummy.groupby(['datetime']).sum()
if ('incoming_0' in dummy.columns):
dummy = dummy.drop(columns=['incoming_0'])
if ('outgoing_0' in dummy.columns):
dummy = dummy.drop(columns=['outgoing_0'])
sms_stats = sms_stats.merge(dummy, how='outer', left_index=True, right_index=True)
sms_stats = sms_stats.rename(columns={'incoming_1': 'sms_incoming_top5', 'outgoing_1': 'sms_outgoing_top5'})
sms_stats = sms_stats.fillna(0)
if ('incoming' in sms_stats.columns):
sms_stats = sms_stats.rename(columns={'incoming': 'sms_incoming'})
if ('outgoing' in sms_stats.columns):
sms_stats = sms_stats.rename(columns={'outgoing': 'sms_outgoing'})
return sms_stats
else:
sms_stats = pd.DataFrame()
return sms_stats
def sms_duration(database,subject,begin,end):
""" Returns a DataFrame contanining the duration per type of messages SMS
(outgoing, incoming). The Dataframe summarizes the calculated duration of
the incoming/outgoing sms and the lags (i.e. the period between receiving a
message and reading/writing a reply).
NOTE: The foundation of this function is still weak and needs discussion
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
reading: Dataframe
writing: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
app = database.raw(table='AwareApplicationNotifications', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = app.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = app.iloc[len(app)-1]['datetime']
app = app.drop(columns=['device','user','time','defaults','sound','vibrate'])
#Insert missing data due to phone being shut down
shutdown = shutdown_info(database,subject,begin,end)
shutdown=shutdown.rename(columns={'battery_status':'application_name'})
shutdown['application_name'] = 'off'
app = app.merge(shutdown, how='outer', left_index=True, right_index=True)
app['application_name_x'] = app['application_name_x'].replace(np.nan, 'off', regex=True)
del app['application_name_y']
dates=app.datetime_x.combine_first(app.datetime_y)
app['datetime']=dates
app = app.drop(['datetime_x','datetime_y'],axis=1)
app=app.rename(columns={'application_name_x':'application_name'})
#Insert missing data due to the screen being off
screen=screen_off(database,subject,begin,end)
app = app.merge(screen, how='outer', left_index=True, right_index=True)
app['application_name'] = app['application_name'].replace(np.nan, 'off', regex=True)
del app['screen_status']
dates=app.datetime_x.combine_first(app.datetime_y)
app['datetime']=dates
app = app.drop(['datetime_x','datetime_y'],axis=1)
app = app.drop_duplicates(subset=['datetime','application_name'],keep='first')
#Insert missing data caught by sms but unknown cause
sms = database.raw(table='AwareMessages', user=subject)
sms = sms.drop(columns=['device','user','time','trace'])
sms = sms.drop_duplicates(subset=['datetime','message_type'],keep='first')
#sms = sms[sms.message_type=='outgoing']
app = app.merge(sms, how='outer', left_index=True, right_index=True)
app.loc[app['application_name'].isnull(),'application_name'] = app['message_type']
del app['message_type']
dates=app.datetime_x.combine_first(app.datetime_y)
app['datetime']=dates
app = app.drop(['datetime_x','datetime_y'],axis=1)
#Calculate the app duration
app['duration']=np.nan
app['duration']=app['datetime'].diff()
app['duration'] = app['duration'].shift(-1)
#Select the text applications only
sms_app_name = ['Messages','Mensajería','MensajerÃa','Viestit','incoming','outgoing']
app = app[app['application_name'].isin(sms_app_name)]
sms_app_name = ['Messages','Mensajería','MensajerÃa','Viestit']
app['application_name'].loc[(app['application_name'].isin(sms_app_name))] = 'messages'
app['group']=np.nan
for i in range(len(app)-1):
if (app.application_name[i]=='incoming' and app.application_name[i+1]=='messages'):
app.group[i+1]=1
elif (app.application_name[i]=='messages' and app.application_name[i+1]=='outgoing'):
app.group[i+1]=2
else:
app.group[i+1]=0
app['lags'] = app['datetime'].diff()
app['datetime'] = app['datetime'].dt.floor('d')
app=app.loc[begin:end]
reading = app.loc[(app['group']==1)]
if (len(reading)>0):
reading = pd.pivot_table(reading,values=['duration','lags'],index='datetime', columns='application_name', aggfunc=np.sum)
reading.columns = ['reading_duration','reading_lags']
reading = reading.apply(get_seconds,axis=1)
writing = app.loc[(app['group']==2)]
if (len(writing)>0):
for i in range(len(writing)-1):
if (writing.lags[i].seconds<15 or writing.lags[i].seconds>120):
writing.lags[i] = datetime.datetime.strptime('00:05', "%M:%S") - datetime.datetime.strptime("00:00", "%M:%S")
del writing['duration']
writing = writing.rename(columns={'lags':'writing_duration'})
writing = pd.pivot_table(writing,values='writing_duration',index='datetime', columns='application_name', aggfunc=np.sum)
writing = writing.apply(get_seconds,axis=1)
return reading, writing
def communication_info(database,subject,begin=None,end=None):
""" Returns a DataFrame contanining all the information extracted from
communication's events (calls, sms, and communication apps like WhatsApp,
Telegram, Messenger, etc.). Regarding calls, this function contains the
duration of the incoming/outgoing calls in seconds, number of those events,
and how long (in seconds) the person has spoken to the top 5 contacts (most
frequent). Regarding the SMSs, this function contains the number of incoming
/outgoing events, and the top 5 contacts (most frequent). Aditionally, we
also include the calculated duration of the incoming/outgoing sms and the
lags (i.e. the period between receiving a message and reading/writing a
reply). Regarding the app, the duration of communication events is summarized.
This function also sums all the different durations (calls, SMSs, apps) and
provides the duration (in seconds) that a person spent communicating during
the day.
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
call_summary: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
app = database.raw(table='AwareApplicationNotifications', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = app.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = app.iloc[len(app)-1]['datetime']
duration_app, count_app = app_duration(database,subject,begin,end)
call_summary = call_info(database,subject,begin,end)
sms_summary = sms_info(database,subject,begin,end)
#reading, writing = sms_duration(database,subject,begin,end)
if (not sms_summary.empty):
call_summary = call_summary.merge(sms_summary, how='outer', left_index=True, right_index=True)
call_summary = call_summary.fillna(0)
#Now let's see if there is any info from the apps worth bringin back
if ('communication' in duration_app.columns): #2 is the number for communication apps
comm_app = duration_app['communication']#.dt.seconds
comm_app = comm_app.fillna(0)
comm_app = comm_app.to_frame()
if ('social_media' in duration_app.columns): #2 is the number for communication apps
social_app = duration_app['social_media']#.dt.seconds
social_app = social_app.fillna(0)
social_app = social_app.to_frame()
try:
social_app
try:
comm_app
comm_app = comm_app.merge(social_app, how='outer', left_index=True, right_index=True)
except NameError:
comm_app = social_app
except NameError:
pass
try:
comm_app
call_summary = call_summary.merge(comm_app, how='outer', left_index=True, right_index=True)
except NameError:
pass
call_summary = call_summary.fillna(0)
if ('communication' in call_summary.columns):
call_summary['total_comm_duration'] = call_summary['call_duration']+call_summary['communication']
if (('social_media' in call_summary.columns) and ('communication' in call_summary.columns)):
call_summary['total_comm_duration'] = call_summary['call_duration']+call_summary['social_media']+call_summary['communication']
if ('communication' in call_summary.columns):
call_summary=call_summary.rename(columns={'communication':'comm_apps_duration'})
if ('social_media' in call_summary.columns):
call_summary=call_summary.rename(columns={'social_media':'social_apps_duration'})
#Now let's see if there is any info from the sms duration
'''if (len(reading)>0):
reading['reading_duration'] = reading['reading_duration']#.dt.seconds
reading['reading_lags'] = reading['reading_lags']#.dt.seconds
call_summary = call_summary.merge(reading, how='outer', left_index=True, right_index=True)
call_summary = call_summary.fillna(0)
call_summary['total_comm_duration'] = call_summary['total_comm_duration']+call_summary['reading_duration']
if (len(writing)>0):
writing=writing.rename(columns={'outgoing':'writing_duration'})
writing['writing_duration'] = writing['writing_duration']#.dt.seconds
call_summary = call_summary.merge(writing, how='outer', left_index=True, right_index=True)
call_summary = call_summary.fillna(0)
call_summary['total_comm_duration'] = call_summary['total_comm_duration']+call_summary['writing_duration']'''
return call_summary
#Occurrences
def occurrence_call_sms(database,subject,begin=None,end=None):
""" Returns a DataFrame contanining the number of events that occur in a
day for call and sms. The events are binned in 12-minutes, i.e. if there is
an event at 11:05 and another one at 11:45, 2 occurences happened in one
hour. Then, the sum of these occurences yield the number per day.
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
event: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
call = database.raw(table='AwareCalls', user=subject)
if not call.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = call.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = call.iloc[len(call)-1]['datetime']
call = call.drop(columns=['device','user','time'])
call = call.loc[begin:end]
sms = database.raw(table='AwareMessages', user=subject)
if not sms.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = call.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = call.iloc[len(call)-1]['datetime']
sms = sms.drop(columns=['device','user','time'])
sms = sms.loc[begin:end]
if not call.empty:
if not sms.empty:
call_sms = call.merge(sms, how='outer', left_index=True, right_index=True)
times = pd.DatetimeIndex.to_series(call_sms.index,keep_tz=True)
else:
times = pd.DatetimeIndex.to_series(call.index,keep_tz=True)
if not sms.empty:
if not call.empty:
call_sms = sms.merge(call, how='outer', left_index=True, right_index=True)
times = pd.DatetimeIndex.to_series(call_sms.index,keep_tz=True)
else:
times = pd.DatetimeIndex.to_series(sms.index,keep_tz=True)
event=niimpy.util.occurrence(times)
event = event.groupby(['day']).sum()
event = event.drop(columns=['hour'])
return event
def occurrence_call_sms_apps(database,subject,begin=None,end=None,app_list_path=None,comm_app_list_path=None):
""" Returns a DataFrame contanining the number of events that occur in a
day for calls, sms, and communication apps. The events are binned in
12-minutes, i.e. if there is an event at 11:05 and another one at 11:45, 2
occurences happened in one hour. Then, the sum of these occurences yield the
number per day.
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
app_list_path: path to the file where the apps are classified into groups
comm_app_list_path:path to the file where the communication apps are listed
Returns
-------
event: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
call = database.raw(table='AwareCalls', user=subject)
if not call.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = call.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = call.iloc[len(call)-1]['datetime']
call = call.drop(columns=['device','user','time'])
call = call.loc[begin:end]
sms = database.raw(table='AwareMessages', user=subject)
if not sms.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = sms.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = sms.iloc[len(sms)-1]['datetime']
sms = sms.drop(columns=['device','user','time'])
sms = sms.loc[begin:end]
app = database.raw(table='AwareApplicationNotifications', user=subject)
if (app_list_path==None):
app_list_path='/m/cs/scratch/networks-nima/ana/niima-code/Datastreams/Phone/apps_group.csv'
if (comm_app_list_path==None):
comm_app_list_path='/m/cs/scratch/networks-nima/ana/niima-code/Datastreams/Phone/comm_apps.csv'
if not app.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = app.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = app.iloc[len(app)-1]['datetime']
app = app.drop(columns=['device','user','time','defaults','sound','vibrate'])
app = app.loc[begin:end]
app_list=pd.read_csv(app_list_path)
app['group']=np.nan
for index, row in app.iterrows():
group=app_list.isin([row['application_name']]).any()
group=group.reset_index()
if (not any(group[0])):
app.loc[index,'group']=10
else:
app.loc[index,'group']=group.index[group[0] == True].tolist()[0]
app = app.loc[app['group'] == 2]
comm_app_list = pd.read_csv(comm_app_list_path)
comm_app_list = comm_app_list['Communication'].tolist()
app = app[~app.application_name.isin(comm_app_list)]
if not call.empty:
if not sms.empty:
event = call.merge(sms, how='outer', left_index=True, right_index=True)
else:
event = call
else:
if not sms.empty:
event = sms
else:
event= pd.DataFrame()
if not app.empty:
if not event.empty:
event = event.merge(app, how='outer', left_index=True, right_index=True)
else:
event=app
if not event.empty:
times = pd.DatetimeIndex.to_series(event.index,keep_tz=True)
event=niimpy.util.occurrence(times)
event = event.groupby(['day']).sum()
event = event.drop(columns=['hour'])
return event
def occurrence_call_sms_social(database,subject,begin=None,end=None,app_list_path=None,comm_app_list_path=None):
""" Returns a DataFrame contanining the number of events that occur in a
day for calls, sms, and social and communication apps. The events are binned
in 12-minutes, i.e. if there is an event at 11:05 and another one at 11:45,
2 occurences happened in one hour. Then, the sum of these occurences yield
the number per day.
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
app_list_path: path to the file where the apps are classified into groups
comm_app_list_path:path to the file where the communication apps are listed
Returns
-------
event: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
call = database.raw(table='AwareCalls', user=subject)
if not call.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = call.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = call.iloc[len(call)-1]['datetime']
call = call.drop(columns=['device','user','time'])
call = call.loc[begin:end]
sms = database.raw(table='AwareMessages', user=subject)
if not sms.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = sms.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = sms.iloc[len(sms)-1]['datetime']
sms = sms.drop(columns=['device','user','time'])
sms = sms.loc[begin:end]
app = database.raw(table='AwareApplicationNotifications', user=subject)
if(app_list_path==None):
app_list_path='/m/cs/scratch/networks-nima/ana/niima-code/Datastreams/Phone/apps_group.csv'
if (comm_app_list_path==None):
comm_app_list_path='/m/cs/scratch/networks-nima/ana/niima-code/Datastreams/Phone/comm_apps.csv'
if not app.empty:
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = app.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = app.iloc[len(app)-1]['datetime']
app = app.drop(columns=['device','user','time','defaults','sound','vibrate'])
app = app.loc[begin:end]
app_list=pd.read_csv(app_list_path)
app['group']=np.nan
for index, row in app.iterrows():
group=app_list.isin([row['application_name']]).any()
group=group.reset_index()
if (not any(group[0])):
app.loc[index,'group']=10
else:
app.loc[index,'group']=group.index[group[0] == True].tolist()[0]
app = app.loc[(app['group'] == 2) | (app['group'] == 3)]
comm_app_list = pd.read_csv(comm_app_list_path)
comm_app_list = comm_app_list['Communication'].tolist()
app = app[~app.application_name.isin(comm_app_list)]
if not call.empty:
if not sms.empty:
event = call.merge(sms, how='outer', left_index=True, right_index=True)
else:
event = call
else:
if not sms.empty:
event = sms
else:
event= pd.DataFrame()
if not app.empty:
if not event.empty:
event = event.merge(app, how='outer', left_index=True, right_index=True)
else:
event=app
if not event.empty:
times = pd.DatetimeIndex.to_series(event.index,keep_tz=True)
event=niimpy.util.occurrence(times)
event = event.groupby(['day']).sum()
event = event.drop(columns=['hour'])
event.index = pd.to_datetime(event.index).tz_localize('Europe/Helsinki')
return event
#Location
def location_data(database,subject,begin=None,end=None):
""" Reads the readily, preprocessed location data from the right database.
The data already contains the aggregation of the GPS data (more info here:
https://github.com/digitraceslab/koota-server/blob/master/kdata/converter.py).
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
location: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
location = database.raw(table='AwareLocationDay', user=subject)
location = location.drop(['device','user'],axis=1)
location=location.drop_duplicates(subset=['day'],keep='first')
location['day']=pd.to_datetime(location['day'], format='%Y-%m-%d')
location=location.set_index('day')
location.index = pd.to_datetime(location.index).tz_localize('Europe/Helsinki')
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = location.index[0]
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = location.index[-1]
location=location.loc[begin:end]
return location
#Screen
def get_battery_data(battery, user=None, start = None, end = None):
""" Returns a DataFrame with battery data for a user.
Parameters
----------
battery: DataFrame with battery data
user: string, optional
start: datetime, optional
end: datetime, optional
"""
assert isinstance(battery, pd.core.frame.DataFrame), "data is not a pandas DataFrame"
if(user!= None):
assert isinstance(user, str),"user not given in string format"
battery_data = battery[(battery['user']==user)]
else:
battery_data = battery
if(start!=None):
start = pd.to_datetime(start)
else:
start = battery_data.iloc[0]['datetime']
if(end!= None):
end = pd.to_datetime(end)
else:
end = battery_data.iloc[len(battery_data)-1]['datetime']
battery_data = battery_data[(battery_data['datetime']>=start) & (battery_data['datetime']<=end)]
battery_data['battery_level'] = pd.to_numeric(battery_data['battery_level'])
#df['column'].fillna(pd.Timedelta(seconds=0))
#df.dropna()
battery_data = battery_data.drop_duplicates(subset=['datetime','user','device'],keep='last')
battery_data = battery_data.drop(['user','device','time','datetime'],axis=1)
return battery_data
def battery_occurrences(battery_data, user=None, start=None, end=None, battery_status = False, days= 0, hours=6, minutes=0, seconds=0,milli=0, micro=0, nano=0):
""" Returns a dataframe showing the amount of battery data points found between a given interval and steps.
The default interval is 6 hours.
Parameters
----------
battery_data: Dataframe
user: string, optional
start: datetime, optional
end: datetime, optional
battery_status: boolean, optional
"""
assert isinstance(battery_data, pd.core.frame.DataFrame), "data is not a pandas DataFrame"
assert isinstance(user, (type(None), str)),"user not given in string format"
if(user!= None):
ocurrence_data = battery_data[(battery_data['user']==user)]
else:
occurrence_data = battery_data
occurrence_data = occurrence_data.drop_duplicates(subset=['datetime','device'],keep='last')
if(start==None):
start = occurrence_data.iloc[0]['datetime']
start = pd.to_datetime(start)
td = pd.Timedelta(days=days,hours=hours,minutes=minutes,seconds=seconds,milliseconds=milli,microseconds=micro,nanoseconds=nano)
delta = start+td
if(end==None):
end = occurrence_data.iloc[len(occurrence_data)-1]['datetime']
end = pd.to_datetime(end)
idx_range = np.floor((end-start)/td).astype(int)
idx = pd.date_range(start, periods = idx_range, freq=td)
if ((battery_status == True) & ('battery_status' in occurrence_data.columns)):
occurrences = pd.DataFrame(np.nan, index = idx,columns=list(['start','end','occurrences','battery_status']))
for i in range(idx_range):
idx_dat = occurrence_data.loc[(occurrence_data['datetime']>start) & (occurrence_data['datetime']<=delta)]
battery_status = occurrence_data.loc[(occurrence_data['datetime']>start) & (occurrence_data['datetime']<=delta) & ((occurrence_data['battery_status']=='-1')|(occurrence_data['battery_status']=='-2')|(occurrence_data['battery_status']=='-3'))]
occurrences.iloc[i] = [start, delta,len(idx_dat), len(battery_status)]
start = start + td
delta = start + td
else:
occurrences = pd.DataFrame(np.nan, index = idx,columns=list(['start','end','occurrences']))
for i in range(idx_range):
idx_dat = occurrence_data.loc[(occurrence_data['datetime']>start) & (occurrence_data['datetime']<=delta)]
occurrences.iloc[i] = [start, delta,len(idx_dat)]
start = start + td
delta = start + td
return occurrences
def battery_gaps(data, min_duration_between = None):
'''Returns a DataFrame including all battery data and showing the delta between
consecutive battery timestamps. The minimum size of the considered deltas can be decided
with the min_duration_between parameter.
Parameters
----------
data: dataframe with date index
min_duration_between: Timedelta, for example, pd.Timedelta(hours=6)
'''
assert isinstance(data, pd.core.frame.DataFrame), "data is not a pandas DataFrame"
assert isinstance(data.index, pd.core.indexes.datetimes.DatetimeIndex), "data index is not DatetimeIndex"
gaps = data.copy()
gaps['tvalue'] = gaps.index
gaps['delta'] = (gaps['tvalue']-gaps['tvalue'].shift()).fillna(pd.Timedelta(seconds=0))
if(min_duration_between!=None):
gaps = gaps[gaps['delta']>=min_duration_between]
return gaps
def battery_charge_discharge(data):
'''Returns a DataFrame including all battery data and showing the charge/discharge between each timestamp.
Parameters
----------
data: dataframe with date index
'''
assert isinstance(data, pd.core.frame.DataFrame), "data is not a pandas DataFrame"
assert isinstance(data.index, pd.core.indexes.datetimes.DatetimeIndex), "data index is not DatetimeIndex"
charge = data.copy()
charge['battery_level'] = pd.to_numeric(charge['battery_level'])
charge['tvalue'] = charge.index
charge['tdelta'] = (charge['tvalue']-charge['tvalue'].shift()).fillna(pd.Timedelta(seconds=0))
charge['bdelta'] = (charge['battery_level']-charge['battery_level'].shift()).fillna(0)
charge['charge/discharge']= ((charge['bdelta'])/((charge['tdelta']/ pd.Timedelta(seconds=1))))
return charge
def find_real_gaps(battery_data,other_data,start=None, end=None, days= 0, hours=6, minutes=0, seconds=0,milli=0, micro=0, nano=0):
""" Returns a dataframe showing the gaps found both in the battery data and the other data.
The default interval is 6 hours.
Parameters
----------
battery_data: Dataframe
other_data: Dataframe
The data you want to compare with
start: datetime, optional
end: datetime, optional
"""
assert isinstance(battery_data, pd.core.frame.DataFrame), "battery_data is not a pandas DataFrame"
assert isinstance(other_data, pd.core.frame.DataFrame), "other_data is not a pandas DataFrame"
assert isinstance(battery_data.index, pd.core.indexes.datetimes.DatetimeIndex), "battery_data index is not DatetimeIndex"
assert isinstance(other_data.index, pd.core.indexes.datetimes.DatetimeIndex), "other_data index is not DatetimeIndex"
if(start!=None):
start = pd.to_datetime(start)
else:
start = battery_data.index[0] if (battery_data.index[0]<= other_data.index[0]) else other_data.index[0]
if(end!=None):
end = pd.to_datetime(end)
else:
end = battery_data.index[-1] if (battery_data.index[-1]>= other_data.index[-1]) else other_data.index[-1]
battery = battery_occurrences(battery_data, start=start,end=end,days=days,hours=hours,minutes=minutes,seconds=seconds,milli=milli,micro=micro,nano=nano)
battery.rename({'occurrences': 'battery_occurrences'}, axis=1, inplace = True)
other = battery_occurrences(other_data, start=start,days=days,hours=hours,minutes=minutes,seconds=seconds,milli=milli,micro=micro,nano=nano)
mask = (battery['battery_occurrences']==0)&(other['occurrences']==0)
gaps = pd.concat([battery[mask],other[mask]['occurrences']],axis=1, sort=False)
return gaps
def find_non_battery_gaps(battery_data,other_data,start=None, end=None, days= 0, hours=6, minutes=0, seconds=0,milli=0, micro=0, nano=0):
""" Returns a dataframe showing the gaps found only in the other data.
The default interval is 6 hours.
Parameters
----------
battery_data: Dataframe
other_data: Dataframe
The data you want to compare with
start: datetime, optional
end: datetime, optional
"""
assert isinstance(battery_data, pd.core.frame.DataFrame), "battery_data is not a pandas DataFrame"
assert isinstance(other_data, pd.core.frame.DataFrame), "other_data is not a pandas DataFrame"
assert isinstance(battery_data.index, pd.core.indexes.datetimes.DatetimeIndex), "battery_data index is not DatetimeIndex"
assert isinstance(other_data.index, pd.core.indexes.datetimes.DatetimeIndex), "other_data index is not DatetimeIndex"
if(start!=None):
start = pd.to_datetime(start)
else:
start = battery_data.index[0] if (battery_data.index[0]<= other_data.index[0]) else other_data.index[0]
if(end!=None):
end = pd.to_datetime(end)
else:
end = battery_data.index[-1] if (battery_data.index[-1]>= other_data.index[-1]) else other_data.index[-1]
battery = battery_occurrences(battery_data, start=start,end=end,days=days,hours=hours,minutes=minutes,seconds=seconds,milli=milli,micro=micro,nano=nano)
battery.rename({'occurrences': 'battery_occurrences'}, axis=1, inplace = True)
other = battery_occurrences(other_data, start=start,days=days,hours=hours,minutes=minutes,seconds=seconds,milli=milli,micro=micro,nano=nano)
mask = (battery['battery_occurrences']>10)&(other['occurrences']==0)
gaps = pd.concat([battery[mask],other[mask]['occurrences']],axis=1, sort=False)
return gaps
def find_battery_gaps(battery_data,other_data,start=None, end=None, days= 0, hours=6, minutes=0, seconds=0,milli=0, micro=0, nano=0):
""" Returns a dataframe showing the gaps found only in the battery data.
The default interval is 6 hours.
Parameters
----------
battery_data: Dataframe
other_data: Dataframe
The data you want to compare with
start: datetime, optional
end: datetime, optional
"""
assert isinstance(battery_data, pd.core.frame.DataFrame), "battery_data is not a pandas DataFrame"
assert isinstance(other_data, pd.core.frame.DataFrame), "other_data is not a pandas DataFrame"
assert isinstance(battery_data.index, pd.core.indexes.datetimes.DatetimeIndex), "battery_data index is not DatetimeIndex"
assert isinstance(other_data.index, pd.core.indexes.datetimes.DatetimeIndex), "other_data index is not DatetimeIndex"
if(start!=None):
start = pd.to_datetime(start)
else:
start = battery_data.index[0] if (battery_data.index[0]<= other_data.index[0]) else other_data.index[0]
if(end!=None):
end = pd.to_datetime(end)
else:
end = battery_data.index[-1] if (battery_data.index[-1]>= other_data.index[-1]) else other_data.index[-1]
battery = battery_occurrences(battery_data, start=start,end=end,days=days,hours=hours,minutes=minutes,seconds=seconds,milli=milli,micro=micro,nano=nano)
battery.rename({'occurrences': 'battery_occurrences'}, axis=1, inplace = True)
other = battery_occurrences(other_data, start=start,end=end,days=days,hours=hours,minutes=minutes,seconds=seconds,milli=milli,micro=micro,nano=nano)
mask = (battery['battery_occurrences']==0)&(other['occurrences']>0)
gaps = pd.concat([battery[mask],other[mask]['occurrences']],axis=1, sort=False)
return gaps
def missing_data_format(question,keep_values=False):
""" Returns a series of timestamps in the right format to allow missing data visualization
.
Parameters
----------
question: Dataframe
"""
question['date'] = question.index
question['date'] = question['date'].apply( lambda question : datetime.datetime(year=question.year, month=question.month, day=question.day))
question = question.drop_duplicates(subset=['date'],keep='first')
question = question.set_index(['date'])
if (keep_values == False):
question['answer'] = 1
question = question.T.squeeze()
return question
def screen_missing_data(database,subject,begin=None,end=None):
""" Returns a DataFrame contanining the percentage (range [0,1]) of loss data
calculated based on the transitions of screen status. In general, if
screen_status(t) == screen_status(t+1), we declared we have at least one
missing point.
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
count: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"usr not given in string format"
screen = database.raw(table='AwareScreen', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = screen.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = screen.iloc[len(screen)-1]['datetime']
screen=screen.drop_duplicates(subset=['datetime'],keep='first')
screen = screen.drop(['device','user','time'],axis=1)
screen=screen.loc[begin:end]
screen['screen_status']=pd.to_numeric(screen['screen_status'])
#Include the missing points that are due to shutting down the phone
shutdown = shutdown_info(database,subject,begin,end)
shutdown=shutdown.rename(columns={'battery_status':'screen_status'})
shutdown['screen_status']=0
screen = screen.merge(shutdown, how='outer', left_index=True, right_index=True)
screen['screen_status'] = screen.fillna(0)['screen_status_x'] + screen.fillna(0)['screen_status_y']
screen = screen.drop(['screen_status_x','screen_status_y'],axis=1)
dates=screen.datetime_x.combine_first(screen.datetime_y)
screen['datetime']=dates
screen = screen.drop(['datetime_x','datetime_y'],axis=1)
#Detect missing data points
screen['missing']=0
screen['next']=screen['screen_status'].shift(-1)
screen['dummy']=screen['screen_status']-screen['next']
screen['missing'] = np.where(screen['dummy']==0, 1, 0)
screen['missing'] = screen['missing'].shift(1)
screen = screen.drop(['dummy','next'], axis=1)
screen = screen.fillna(0)
screen['datetime'] = screen['datetime'].apply( lambda screen : datetime.datetime(year=screen.year, month=screen.month, day=screen.day))
screen = screen.drop(['screen_status'], axis=1)
count=pd.pivot_table(screen,values='missing',index='datetime', aggfunc='count')
count = screen.groupby(['datetime','missing'])['missing'].count().unstack(fill_value=0)
count['missing'] = count[1.0]/(count[0.0]+count[1.0])
count = count.drop([0.0,1.0], axis=1)
if (pd.Timestamp.tzname(count.index[0]) != 'EET'):
if pd.Timestamp.tzname(count.index[0]) != 'EEST':
count.index = pd.to_datetime(count.index).tz_localize('Europe/Helsinki')
return count
def missing_noise(database,subject,begin=None,end=None):
""" Returns a Dataframe with the estimated missing data from the ambient
noise sensor.
NOTE: This function aggregates data by day.
Parameters
----------
database: Niimpy database
user: string
begin: datetime, optional
end: datetime, optional
Returns
-------
avg_noise: Dataframe
"""
assert isinstance(database, niimpy.database.Data1),"database not given in Niimpy database format"
assert isinstance(subject, str),"user not given in string format"
noise = database.raw(table='AwareAmbientNoise', user=subject)
if(begin!=None):
assert isinstance(begin,pd.Timestamp),"begin not given in timestamp format"
else:
begin = noise.iloc[0]['datetime']
if(end!= None):
assert isinstance(end,pd.Timestamp),"end not given in timestamp format"
else:
end = noise.iloc[len(noise)-1]['datetime']
noise = noise.drop(['device','user','time','double_silence_threshold','double_rms','blob_raw','is_silent','double_frequency'],axis=1)
noise = noise.loc[begin:end]
noise['duration'] = noise['datetime'].diff()
noise['duration'] = get_seconds(noise['duration'])
noise = noise.iloc[1:]
shutdown = shutdown_info(database,subject,begin,end)
shutdown=shutdown.rename(columns={'battery_status':'duration'})
noise = noise.merge(shutdown, how='outer', left_index=True, right_index=True)
noise['duration_x'] = noise.fillna(0)['duration_x'] + noise.fillna(0)['duration_y']
noise=noise.rename(columns={'duration_x':'duration'})
dates=noise.datetime_x.combine_first(noise.datetime_y)
noise['datetime']=dates
noise = noise.drop(['datetime_x','datetime_y'],axis=1)
noise=noise.drop(['double_decibels', 'duration_y'],axis=1)
noise['missing'] = np.where(noise['duration']>=1860, 1, 0) #detect the missing points
noise['dummy'] = noise.missing.shift(-2) #assumes that everytime the cellphone shuts down, two timestamps are generated with -1 in the battery_health
noise['dummy'] = noise.dummy*noise.duration
noise['dummy'] = noise.dummy.shift(2)
noise['missing'] = np.where(noise['missing']==1, np.round(noise['duration']/1800), 0) #calculate the number of datapoints missing
noise = noise.drop(noise[noise.dummy==-1].index) #delete those missing datapoints due to the phone being shut down
noise = noise.drop(['duration', 'datetime', 'dummy'],axis=1)
return noise
| 62,936 | 19,889 |
# imports
import visa
import numpy as np
import os
import csv
import time
import datetime
import tkinter as tk
from tkinter.filedialog import askopenfilename, askdirectory
from tkinter.ttk import Frame, Button, Style,Treeview, Scrollbar, Checkbutton
from functools import partial
import serial
# This app uses an arduino to output two analog voltage channels from 0 to ~3.3V
# These output voltages are used to control flow rate on mass flow controllers
class VoltageController(Frame):
def __init__(self,parent):
#### USER DEFINED
self.arduinoAddress = 'COM5'
self.window_title = "Mass Flow Control"
self.channels = ["A","B"]
self.V_calibration = {i:None for i in self.channels} # initialize correction factor
self.show_keithley = True
self.smu_address_default = ""
self.smu_address = ""
self.complianceV=5
self.max_V_out = 3.2467 # Measured maximum output voltage
self.upper_reference_V = 4.097 # Measured reference output from LM4040
#### End user defined parameters
self.arduino = serial.Serial(self.arduinoAddress,9600)
Frame.__init__(self, parent)
self.parent = parent
self.configure_gui()
def configure_gui(self):
# Master Window
self.parent.title(self.window_title)
self.style = Style()
self.style.theme_use("default")
# Test Mode Frame
frame_setpoints=Frame(self)
frame_setpoints.pack()
self.s_setpoints = {}
self.b_set_setpoints = {}
self.l_actual_flow = {}
self.l_integer = {}
tk.Label(frame_setpoints,text="Setpoint").grid(row=0,column=1,sticky=tk.W,padx=1, pady=1)
tk.Label(frame_setpoints,text="Actual").grid(row=0,column=2,sticky=tk.W,padx=1, pady=1)
tk.Label(frame_setpoints,text="Integer").grid(row=0,column=3,sticky=tk.W,padx=1, pady=1)
for i,ch in enumerate(self.channels):
self.s_setpoints[ch] = tk.StringVar()
tk.Label(frame_setpoints,text="Channel " + str(ch) + " (SCCM)"
).grid(row=i+1,column=0,sticky=tk.W,padx=1, pady=1)#.pack(side=tk.LEFT)
tk.Entry(frame_setpoints,textvariable=self.s_setpoints[ch],width=10
).grid(row=i+1,column=1,sticky=tk.W)
self.l_actual_flow[ch] = tk.Label(frame_setpoints,text="00.00")
self.l_actual_flow[ch].grid(row=i+1,column=2,sticky=tk.W,padx=1, pady=1)
self.l_integer[ch] = tk.Label(frame_setpoints,text="0000")
self.l_integer[ch].grid(row=i+1,column=3,sticky=tk.W,padx=1, pady=1)
self.b_set_setpoints[ch] = Button(
frame_setpoints,text="Set",
command=partial(self.set_setpoint,ch))
self.b_set_setpoints[ch].grid(row=i+1,column=4,sticky=tk.W,padx=1, pady=1)
# Source control buttons
frame_buttons=Frame(self)
frame_buttons.pack()
# Turn on all sources
tk.Button(frame_buttons,text="Turn Sources On", bg="lime",
command=self.turn_on_sources).grid(row=0,column=0,sticky=tk.W,padx=1, pady=1)
# Set all sources to zero
tk.Button(frame_buttons,text="Set Sources to 0", bg="red",
command=self.turn_off_sources).grid(row=0,column=1,sticky=tk.W,padx=1, pady=1)
# Functions for measuring with Keithley
if self.show_keithley:
self.rm = visa.ResourceManager()
self.resources = self.rm.list_resources()
self.configure_keithley_widgets()
# Style Configuration
Style().configure("defaultState.TButton", foreground='black', background='light grey')
Style().configure("onState.TButton", foreground='black', background='red')
Style().map("onState.TButton",
background=[('disabled', 'grey'),
('pressed', 'red3'),
('active', 'red2')])
self.pack(fill=tk.BOTH, expand=1)
def configure_keithley_widgets(self):
frame_keithley = Frame(self)
frame_keithley.pack()
self.l_smu_address = tk.Label(frame_keithley, text='Pick SMU address:')
self.l_smu_address.grid(row=0, column=0, sticky=tk.W)
self.s_smu_address = tk.StringVar()
self.s_smu_address.set(self.smu_address)
self.o_smu_address = tk.OptionMenu(
frame_keithley, self.s_smu_address,*self.resources,
command=self.connect_to_smu)
self.o_smu_address.grid(row=0,column=1, sticky=tk.W)
self.configure_resource_optionmenu()
##### Connect buttons
#self.c_connect = Frame(self.c_top)
#self.c_connect.pack(fill=X, expand=True)
self.b_connect = tk.Button(frame_keithley, command=self.connect_to_smu)
self.b_connect.configure(text="Connect", background= "yellow")
self.b_connect.grid(row=0, column=2, sticky=tk.E, padx=5)
self.b_calibrate_channelA = tk.Button(frame_keithley, command=partial(self.calibrate_channels,"A"))
self.b_calibrate_channelA.configure(text="Calibrate A", background= "grey")
self.b_calibrate_channelA.grid(row=0, column=3, sticky=tk.E, padx=5)
self.b_calibrate_channelB = tk.Button(frame_keithley, command=partial(self.calibrate_channels,"B"))
self.b_calibrate_channelB.configure(text="Calibrate B", background= "grey")
self.b_calibrate_channelB.grid(row=0, column=4, sticky=tk.E, padx=5)
tk.Label(frame_keithley, text='Voltage Reading:').grid(row=1,column=0,padx=5,sticky=tk.E)
self.l_voltage = tk.Label(frame_keithley, text='000.0 mV')
self.l_voltage.grid(row=1,column=1,padx=5,sticky=tk.W)
def configure_resource_optionmenu(self):
# Only display keithley or GPIB addresses
# Keithley addresses have form USB0::0x05E6::0x26##::7 digit SN::INSTR
self.display_resources = []
for resource in self.resources:
if ('USB0::0x05E6::0x26' in resource) or ('GPIB0' in resource):
# Add the resource address and vendor info to the option menu
hardware_info = self.get_hardware_label(resource)
if not hardware_info=='Unknown':
self.display_resources.append(resource + '--' + hardware_info)
# https://stackoverflow.com/questions/28412496/updating-optionmenu-from-list
menu = self.o_smu_address["menu"]
menu.delete(0, "end")
for string in self.display_resources:
menu.add_command(label=string,
command=lambda value=string.split('--')[0]: self.s_smu_address.set(value))
# reset address to default
self.s_smu_address.set(self.smu_address_default)
def get_hardware_label(self,resource):
# Check for known hardware types and make a label
try:
r = self.rm.open_resource(resource)
hardware_info = r.query("*IDN?")
if 'OK\r\n' in hardware_info:
# The "OK\r\n" message is sent as a handshake from Obis lasers
# Turn hand-shaking off and then ask for the info again
r.write('system:communicate:handshaking OFF')
hardware_info = r.query("*IDN?")
# Check for known instruments
if 'Keithley' in hardware_info:
model_number = hardware_info.split(',')[1].split(' Model ')[1]
serial_number = hardware_info.split(',')[2][1:]
label = 'Keithley ' + model_number + ' SN: ' + serial_number
elif 'Stanford' in hardware_info:
label = 'Lock-in Amplifier ' + hardware_info.split(',')[1]
elif 'HEWLETT' in hardware_info:
label = 'Parameter Analyzer ' + hardware_info.split(',')[1]
elif 'Coherent' in hardware_info:
wavelength = r.query('system:information:wavelength?')
if 'OK' in wavelength:
r.write('system:communicate:handshaking OFF')
wavelength = r.query('system:information:wavelength?')
label = 'Coherent ' + wavelength.strip() + 'nm laser'
else:
label = 'Unknown: ' + hardware_info.strip()
r.close()
except Exception as e:
#print(e)
label='Unknown'
return label
def connect_to_smu(self):
self.smu_address = self.s_smu_address.get()
self.keithley = self.rm.open_resource(self.smu_address)
self.initializeKeithley(self.keithley)
print('keithley connected')
self.b_connect.configure(background='green2')
def calibrate_channels(self,ch):
print("calibrating channel " + ch)
setpoints = np.arange(0,4096,1)
voltages = np.zeros(setpoints.shape)
self.keithley.write('smua.source.leveli=0')
self.keithley.write('smua.source.output=1')
self.keithley.write('smua.measure.autorangev=1')
for i,setpt in enumerate(setpoints):
self.arduino.write((str(ch)+str(setpt)+'\n').encode())
self.l_actual_flow[ch].configure(text=str(setpt))
time.sleep(0.2)
voltages[i] = self.readVoltage()
self.l_voltage.configure(text="{:.2f}".format(voltages[i]*1e3) + " mV")
self.parent.update()
np.savetxt('Channel'+ch+'_calibration.csv',np.vstack((setpoints,voltages)).T,
delimiter=',',header='Setpoints,Voltages')
def prep_measure_stability(self,ch):
self.start_time = time.time()
self.set_setpoint(ch) # Turn on desired set-point
self.file = 'Channel'+ch+'_stability.csv'
header="Elapsed Time (hr),Output Voltage (V)\n"
with open(self.file, 'a') as f:
f.write(header)
self.measure_stability(ch)
def measure_stability(self,ch):
voltage = self.readVoltage() # read voltage
self.onTime = time.time() - self.startTime # record onTime
self.l_voltage.configure(text="{:.2f}".format(voltages[i]*1e3) + " mV")
# Update file
with open(self.file, 'a') as f:
f.write(str(self.onTime/3600.0)+','+
str(voltage)+'\n')
# Update once every 20 seconds
self.parent.after(int(20 * 1000), partial(self.measure_stability,ch))
def set_setpoint(self,ch):
integer,actual_flow = self.convert_sccm_to_int(float(self.s_setpoints[ch].get()),ch)
self.l_actual_flow[ch].configure(text='{:.2f}'.format(float(actual_flow)))
self.l_integer[ch].configure(text=str(integer))
self.arduino.write((str(ch)+str(integer)+'\n').encode())
def convert_sccm_to_int(self,sccm,ch):
# Get calibration if not yet loaded
if self.V_calibration[ch] is None:
self.V_calibration[ch] = np.genfromtxt("Channel"+ch+"_calibration.csv",
delimiter=',',skip_header=1)
# Maximum SCCM output is 200
# the upper reference output voltage is given by the LM4040
sccm_per_volt = 200 / self.upper_reference_V
V_out = sccm / sccm_per_volt # Needed output voltage
if V_out > self.max_V_out:
print('Maximum output voltage exceeded')
V_out = self.max_V_out
idx_min = np.abs(V_out - self.V_calibration[ch][:,1]).argmin()
integer = int(self.V_calibration[ch][idx_min,0])
#V_out * 4096 / self.max_V_out / MCF
actual_flow=self.V_calibration[ch][idx_min,1]*sccm_per_volt
return integer,actual_flow
def turn_off_sources(self):
for ch in self.channels:
self.arduino.write((str(ch)+str(0)+'\n').encode())
def turn_on_sources(self):
for ch in self.channels:
self.set_setpoint(ch)
def initializeKeithley(self,keithley):
keithley.write('reset()')
keithley.timeout = 4000 # ms
keithley.write('errorqueue.clear()')
ch = 'a'
keithley.write( 'smu'+ch+'.reset()')
keithley.write( 'smu'+ch+'.measure.count=20')
keithley.write( 'smu'+ch+'.measure.nplc=1')
keithley.write( 'smu'+ch+'.nvbuffer1.appendmode=0')
keithley.write( 'smu'+ch+'.nvbuffer1.clear()')
keithley.write( 'smu'+ch+'.source.func=0') # 0 is output_DCAMPS, 1 is output_DCVOLTS
keithley.write( 'smu'+ch+'.source.limitv='+str(self.complianceV))
keithley.write( 'smu'+ch+'.source.leveli=0')
keithley.write( 'smu'+ch+'.source.output=0')
keithley.write( 'smu'+ch+'.measure.autorangev=1')
ch = 'b'
keithley.write( 'smu'+ch+'.reset()')
keithley.write( 'smu'+ch+'.measure.count=10')
keithley.write( 'smu'+ch+'.measure.nplc=1')
keithley.write( 'smu'+ch+'.nvbuffer1.appendmode=0')
keithley.write( 'smu'+ch+'.nvbuffer1.clear()')
keithley.write( 'smu'+ch+'.source.func=1') # 0 is output_DCAMPS, 1 is output_DCVOLTS
keithley.write( 'smu'+ch+'.source.levelv=0')
keithley.write( 'smu'+ch+'.measure.autorangei=1')
keithley.write( 'smu'+ch+'.source.output=1')
print('keithley initialized')
def turnCurrentOn(self,I):
print('current turned on')
self.keithley.write( 'smua.source.leveli='+str(I))
self.keithley.write( 'smua.source.output=1')
def turnCurrentOff(self):
print('current turned off')
self.keithley.write( 'smua.source.output=0')
def turnVoltageOn(self,V):
#print('voltage turned on')
self.keithley.write( 'smua.source.func=1') # 0 is output_DCAMPS, 1 is output_DCVOLTS
self.keithley.write( 'smua.source.levelv='+str(V))
self.keithley.write( 'smua.source.output=1')
def turnVoltageOff(self):
#print('voltage turned off')
self.keithley.write( 'smua.source.levelv=0')
self.keithley.write( 'smua.source.func=0') # 0 is output_DCAMPS, 1 is output_DCVOLTS
self.keithley.write( 'smua.source.output=0')
# reads the voltage from keithley of the specified device
def readVoltage(self):
self.keithley.write('smua.nvbuffer1.clear()')
self.keithley.write('smua.measure.v(smua.nvbuffer1)')
sig = self.keithley.query('printbuffer(1,smua.nvbuffer1.n,smua.nvbuffer1)')
sig=[float(v) for v in sig.split(',')]
return np.mean(sig)
def readCurrent(self):
self.keithley.write('smua.measure.autorangei=1')
self.keithley.write('smua.nvbuffer1.clear()')
self.keithley.write('smua.measure.i(smua.nvbuffer1)')
sig = self.keithley.query('printbuffer(1,smua.nvbuffer1.n,smua.nvbuffer1)')
sig=[float(v) for v in sig.split(',')]
return np.mean(sig)
# reads device photodiode signal measured by keithley from channel b
def keithleyDiodeRead(self,keithley):
holder = []
for x in range(0,self.keithleyReadingCount):
keithley.write('smub.nvbuffer1.clear()')
keithley.write('smub.measure.i(smub.nvbuffer1)')
sig = keithley.query('printbuffer(1,smub.nvbuffer1.n,smub.nvbuffer1)')
sig=[float(v) for v in sig.split(',')]
holder.append(sig)
return np.mean(holder),np.std(holder)
def main():
root = tk.Tk()
app = VoltageController(root)
root.mainloop()
#app.keithley.close()
app.arduino.close()
if __name__ == '__main__':
main() | 15,570 | 5,341 |
#-*- coding:utf-8 -*-
__author__ = "ChenJun"
import theano
import theano.tensor as T
import numpy as np
import cPickle as pickle
from theano_models.qa_cnn import CNNModule
from theano_models.layers import InteractLayer, MLP, MLPDropout, BatchNormLayer
from theano_models.optimizer import Optimizer
from data_process.load_data import data_loader
from collections import OrderedDict
from qa_score import qa_evaluate
from weighted_model import ensemble
import warnings
warnings.filterwarnings("ignore")
SEED = 3435
rng = np.random.RandomState(SEED)
def get_overlap(path,length):
train_overlap = pickle.load(open(path+"pkl/overlap01-train.pkl","r"))
valid_overlap = pickle.load(open(path+"pkl/overlap01-valid.pkl","r"))
test_overlap = pickle.load(open(path+"pkl/overlap01-test.pkl","r"))
train_overlap_q, train_overlap_a = train_overlap[:,0], train_overlap[:,1]
valid_overlap_q, valid_overlap_a = valid_overlap[:,0], valid_overlap[:,1]
test_overlap_q, test_overlap_a = test_overlap[:,0], test_overlap[:,1]
print "overlap01 feature shape: ", train_overlap_q.shape, valid_overlap_q.shape, test_overlap_a.shape
train_overlap_q = theano.shared(value=train_overlap_q.reshape((train_overlap.shape[0], 1, length,1)),borrow=True)
train_overlap_a = theano.shared(value=train_overlap_a.reshape((train_overlap.shape[0], 1, length, 1)), borrow=True)
valid_overlap_q = theano.shared(value=valid_overlap_q.reshape((valid_overlap.shape[0], 1, length, 1)), borrow=True)
valid_overlap_a = theano.shared(value=valid_overlap_a.reshape((valid_overlap.shape[0], 1, length, 1)), borrow=True)
test_overlap_q = theano.shared(value=test_overlap_q.reshape((test_overlap.shape[0], 1, length, 1)), borrow=True)
test_overlap_a = theano.shared(value=test_overlap_a.reshape((test_overlap.shape[0], 1, length, 1)), borrow=True)
return [(train_overlap_q,valid_overlap_q,test_overlap_q),(train_overlap_a,valid_overlap_a,test_overlap_a)]
def build_model(batch_size,img_h,img_w,filter_windows,filter_num,n_in,n_hidden,n_out,L1_reg,L2_reg,conv_non_linear,learning_rate,n_epochs,random=False,non_static=False):
"""
build cnn model for QA.
:param batch_size: batch_size
:param img_h: sentence length
:param img_w: word vector dimension [100]
:param filter_windows: filter window sizes
:param filter_num: the number of feature maps (per filter window)
:param n_in: num of input units
:param n_hidden: num of hidden units
:param n_out: num of out units
:param L1_reg: mlp L1 loss
:param L2_reg: mlp L2 loss
:param conv_non_linear: activation
:param learning_rate: learning rate
:param n_epochs: num of epochs
:param random: bool, use random embedding or trained embedding
:param non_static: bool, use word embedding for param or not
:return:
"""
global rng
###############
# LOAD DATA #
###############
print "loading the data... "
path = "/Users/chenjun/PycharmProjects/DBQA/"
loader = data_loader(path+"pkl/data-train-nn.pkl",path+"pkl/data-valid-nn.pkl",path+"pkl/data-test-nn.pkl", path+"pkl/index2vec.pkl")
valid_group_list = pickle.load(open(path+"pkl/valid_group.pkl"))
test_group_list = [int(x.strip()) for x in open(path + "data/dbqa-data-test.txt.group")]
datasets, emb_words = loader.get_input_by_model(model="theano",random=random)
train_q_data, valid_q_data, test_q_data = datasets[0]
train_a_data, valid_a_data, test_a_data = datasets[1]
train_l_data, valid_l_data, test_l_data = datasets[2]
features = get_overlap(path, length=img_h)
train_overlap_q, valid_overlap_q, test_overlap_q = features[0]
train_overlap_a, valid_overlap_a, test_overlap_a = features[1]
# calculate the number of batches
n_train_batches = train_q_data.get_value(borrow=True).shape[0] // batch_size
n_valid_batches = valid_q_data.get_value(borrow=True).shape[0] // batch_size
n_test_batches = test_q_data.get_value(borrow=True).shape[0] // batch_size
print "batch_size: %i, n_train_batches: %i, n_valid_batches: %i, n_test_batches: %i" % (batch_size, n_train_batches, n_valid_batches, n_test_batches)
###############
# BUILD MODEL #
###############
print "building the model... "
# define the input variable
index = T.lscalar(name="index")
drop_rate = T.fscalar(name="drop_rate")
x1 = T.matrix(name='x1', dtype='int64')
x2 = T.matrix(name='x2', dtype='int64')
y = T.lvector(name='y')
x1_overlap = T.tensor4(name="x1_overlap", dtype='float32')
x2_overlap = T.tensor4(name="x2_overlap", dtype='float32')
# transfer input to vector with embedding.
_x1 = emb_words[x1.flatten()].reshape((x1.shape[0], 1, img_h, img_w - 1))
emb_x1 = T.concatenate([_x1, x1_overlap], axis=3)
_x2 = emb_words[x2.flatten()].reshape((x2.shape[0], 1, img_h, img_w - 1))
emb_x2 = T.concatenate([_x2, x2_overlap], axis=3)
# conv_layer
conv_layers = []
q_input = []
a_input = []
for i, filter_h in enumerate(filter_windows):
filter_w = img_w
filter_shape = (filter_num, 1, filter_h, filter_w)
pool_size = (img_h - filter_h + 1, img_w - filter_w + 1)
conv_layer = CNNModule(rng, filter_shape=filter_shape, pool_size=pool_size, non_linear=conv_non_linear)
q_conv_output, a_conv_output = conv_layer(emb_x1, emb_x2)
q_conv_output = q_conv_output.flatten(2) # [batch_size * filter_num]
a_conv_output = a_conv_output.flatten(2) # [batch_size * filter_num]
q_input.append(q_conv_output)
a_input.append(a_conv_output)
conv_layers.append(conv_layer)
q_input = T.concatenate(q_input, axis=1) # batch_size*(filter_num*len(filter_windows))
a_input = T.concatenate(a_input, axis=1) # batch_size*(filter_num*len(filter_windows))
num_filters = len(filter_windows) * filter_num
interact_layer = InteractLayer(rng, num_filters, num_filters, dim=n_in)
qa_vec = interact_layer(q_input, a_input)
bn_layer = BatchNormLayer(n_in=n_in, inputs=qa_vec)
# classifier = MLP(rng,input=bn_layer.out,n_in=n_in,n_hidden=n_hidden,n_out=n_out)
classifier = MLPDropout(rng, input=bn_layer.out, n_in=n_in, n_hidden=n_hidden, n_out=n_out, dropout_rate=drop_rate)
# model params
params = classifier.params + interact_layer.params + bn_layer.params
for i in xrange(len(conv_layers)):
params += conv_layers[i].params
if non_static:
print "---CNN-NON-STATIC---"
params += [emb_words]
else:
print "---CNN-STATIC---"
opt = Optimizer()
cost = (
classifier.cross_entropy(y)
+ L1_reg * classifier.L1
+ L2_reg * classifier.L2_sqr
)
# updates = opt.sgd_updates_adadelta(params, cost, 0.95, 1e-6, 9)
updates = opt.RMSprop(params, cost)
train_model = theano.function(
inputs=[index, drop_rate],
updates=updates,
outputs=cost,
givens={
x1: train_q_data[index * batch_size:(index + 1) * batch_size],
x2: train_a_data[index * batch_size:(index + 1) * batch_size],
y: train_l_data[index * batch_size:(index + 1) * batch_size],
x1_overlap: train_overlap_q[index * batch_size: (index + 1) * batch_size],
x2_overlap: train_overlap_a[index * batch_size: (index + 1) * batch_size]
},
)
valid_model = theano.function(
inputs=[index, drop_rate],
outputs=classifier.pred_prob(),
givens={
x1: valid_q_data[index * batch_size:(index + 1) * batch_size],
x2: valid_a_data[index * batch_size:(index + 1) * batch_size],
x1_overlap: valid_overlap_q[index * batch_size: (index + 1) * batch_size],
x2_overlap: valid_overlap_a[index * batch_size: (index + 1) * batch_size]
},
)
test_model = theano.function(
inputs=[index, drop_rate],
outputs=classifier.pred_prob(),
givens={
x1: test_q_data[index * batch_size:(index + 1) * batch_size],
x2: test_a_data[index * batch_size:(index + 1) * batch_size],
x1_overlap: test_overlap_q[index * batch_size: (index + 1) * batch_size],
x2_overlap: test_overlap_a[index * batch_size: (index + 1) * batch_size]
},
)
###############
# TRAIN MODEL #
###############
print('training the model...')
epoch = 0
valid_dic = OrderedDict()
eval_dic = OrderedDict()
while epoch < n_epochs:
epoch += 1
batch_cost = 0.
for batch_index1 in xrange(n_train_batches):
batch_cost += train_model(batch_index1, 0.5) # drop
if batch_index1 % 100 == 0:
print ('epoch %i/%i, batch %i/%i, cost %f') % (
epoch, n_epochs, batch_index1, n_train_batches, batch_cost / n_train_batches)
###############
# VALID MODEL #
###############
valid_score_data = []
for batch_index2 in xrange(n_valid_batches):
batch_pred = valid_model(batch_index2, 0.0) # drop
valid_score_data.append(batch_pred)
valid_score_list = (np.concatenate(np.asarray(valid_score_data), axis=0)).tolist()
valid_label_list = valid_l_data.get_value(borrow=True).tolist()
for i in xrange(len(valid_score_list), len(valid_label_list)):
valid_score_list.append(np.random.random())
_eval = qa_evaluate(valid_score_list, valid_label_list, valid_group_list, label=1, mod="mrr")
print "---valid mrr: ", _eval
valid_dic[str(epoch) + "-" + str(batch_index1)] = _eval
###############
# TEST MODEL #
###############
test_score_data = []
for batch_index3 in xrange(n_test_batches):
batch_pred = test_model(batch_index3, 0.0) # drop
test_score_data.append(batch_pred)
test_score_list = (np.concatenate(np.asarray(test_score_data), axis=0)).tolist()
test_label_list = test_l_data.get_value(borrow=True).tolist()
for i in xrange(len(test_score_list), len(test_label_list)):
test_score_list.append(np.random.random())
_eval = qa_evaluate(test_score_list, test_label_list, test_group_list, label=1, mod="mrr")
print "---test mrr: ", _eval
eval_dic[str(epoch) + "-" + str(batch_index1)] = _eval
pickle.dump(valid_score_list, open(path + "result/cnn-overlap-valid.pkl." + str(epoch) + "-" + str(batch_index1), "w"))
pickle.dump(test_score_list, open(path + "result/cnn-overlap-test.pkl."+str(epoch)+"-"+str(batch_index1), "w"))
pickle.dump(test_label_list, open(path + "result/test_label.pkl", "w"))
pickle.dump(valid_label_list, open(path + "result/valid_label.pkl", "w"))
_valid_dic = sorted(valid_dic.items(), key=lambda x: x[1])[-10:]
_eval_dic = sorted(eval_dic.items(), key=lambda x: x[1])[-10:]
print "valid dic: ", _valid_dic
print "eval dic: ", _eval_dic
valid_score_file = [path+"result/cnn-overlap-valid.pkl."+x[0] for x in _valid_dic]
test_score_file = [path + "result/cnn-overlap-test.pkl." + x[0] for x in _valid_dic] ###from valid
valid_label_file = path + "result/valid_label.pkl"
test_label_file = path + "result/test_label.pkl"
test_ensemble_file = path + "result/test_ensemble_overlap.pkl"
valid_ensemble_file = path + "result/valid_ensemble_overlap.pkl"
valid_mrr = ensemble(valid_score_file, valid_label_file, valid_group_list, valid_ensemble_file)
test_mrr = ensemble(test_score_file, test_label_file, test_group_list, test_ensemble_file)
print "---ensemble valid mrr: ", valid_mrr
print "---ensemble test mrr: ", test_mrr
if __name__ == "__main__":
build_model(batch_size=64,
img_h=50,
img_w=51,
filter_windows=[3, 1, 2],
filter_num=128,
n_in=20,
n_hidden=128,
n_out=2,
L1_reg=0.00,
L2_reg=0.0001,
conv_non_linear="relu",
learning_rate=0.001,
n_epochs=3,
random=False,
non_static=False)
| 12,522 | 4,537 |
# Rest Server
from flask import Flask, jsonify, abort, request
# Eureka client
from eureka.client import EurekaClient
# Background tasks
import threading
import atexit
import logging
import socket
import netifaces as ni
import sys
import os
import time
# Plotter libs
from io import BytesIO
import pymongo
from pymongo import MongoClient
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import base64
import datetime
DATABASE_NAME = 'marble'
POSTS_COLLECTION = 'posts'
PROCESSED_POSTS_COLLECTION = 'processed_posts'
pool_time = 5 # Seconds
# variables that are accessible from anywhere
commonDataStruct = {}
# lock to control access to variable
dataLock = threading.Lock()
# thread handler
yourThread = threading.Thread()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Global variables
app_name = "plotter-dous"
try:
ni.ifaddresses('eth0')
app_ip = ni.ifaddresses('eth0')[2][0]['addr']
except Exception:
app_ip = "localhost"
app_host = socket.getfqdn()
app_port = 8084
secure_app_port = 8443
eureka_url = "http://registry:1111/eureka/"
def create_app():
app = Flask(__name__)
def interrupt():
global yourThread
yourThread.cancel()
def doStuff():
global commonDataStruct
global yourThread
with dataLock:
# TODO: Handle what happens when eureka goes down
try:
commonDataStruct['ec'].heartbeat()
except Exception:
logger.info("Registering to Eureka...")
try:
commonDataStruct['ec'].register(initial_status="UP")
logger.info("Registered to Eureka.")
commonDataStruct['ec'].heartbeat()
except Exception as e:
logger.warning(
"Caught exception while trying to register in Eureka: " + str(e) + ". Will retry again shortly.")
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(
exc_tb.tb_frame.f_code.co_filename)[1]
print((exc_type, fname, exc_tb.tb_lineno))
# Set the next thread to happen
yourThread = threading.Timer(pool_time, doStuff, ())
yourThread.start()
def doStuffStart():
# Do initialisation stuff here
# no spaces or underscores, this needs to be url-friendly
commonDataStruct['ec'] = EurekaClient(app_name,
ip_address=app_ip,
eureka_url=eureka_url,
eureka_domain_name="",
data_center="MyOwn",
port=app_port,
secure_port=None,
use_dns=False,
region="none",
prefer_same_zone=False,
context="",
host_name=app_host,
vip_address=app_name,
secure_vip_address=app_name)
global yourThread
# Create your thread
yourThread = threading.Timer(pool_time, doStuff, ())
yourThread.start()
# Initiate
doStuffStart()
# When you kill Flask (SIGTERM), clear the trigger for the next thread
atexit.register(interrupt)
return app
class ChartResponse(object):
def __init__(self, name, description="", type="Image", customType=None, jobId=None, options={}, data={}, images={}):
self.id = None
self.name = name
self.description = description
self.type = type
self.customType = customType
self.jobId = jobId
self.options = options
self.data = data
self.images = images
self.createdAt = None
def plotTopic(topicName, options):
polarity = None
chartName = options['title']
chartDescription = options['description']
client = MongoClient('mongodb', 27017)
db = client[DATABASE_NAME]
posts_collection = db.get_collection(POSTS_COLLECTION)
processed_posts_collection = db.get_collection(PROCESSED_POSTS_COLLECTION)
invalid_plot = False
if (options['type'] == "scatter"):
logger.debug("Plotting scatter.")
collection = options.get('collection', PROCESSED_POSTS_COLLECTION)
point_size = options.get('point_size', 2)
color = options.get('color', 'green')
y_axis_field = options.get('y_axis_field', 'polarity')
y_min = options.get('y_min', None)
y_max = options.get('y_max', None)
if (collection == POSTS_COLLECTION):
posts = posts_collection.find(
{'topicName': topicName}).sort('createdAt', pymongo.ASCENDING)
else:
posts = processed_posts_collection.find(
{'topicName': topicName}).sort('createdAt', pymongo.ASCENDING)
dates_axis = []
y_axis = []
for post in posts:
if (y_axis_field in post):
dates_axis.append(post['createdAt'])
y_axis.append(post[y_axis_field])
dates = [pd.to_datetime(d) for d in dates_axis]
fig = plt.figure(1, figsize=(11, 6))
plt.title(chartName)
plt.xlabel('createdAt')
plt.ylabel(y_axis_field)
# the scatter plot:
axScatter = plt.subplot(111)
axScatter.scatter(x=dates, y=y_axis, s=point_size, color=color)
# set axes range
plt.xlim(dates[0], dates[len(dates) - 1])
if y_min == None:
y_min = min(y_axis)
if y_max == None:
y_max = max(y_axis)
plt.ylim(y_min, y_max)
my_plot = plt.gcf()
imgdata = BytesIO()
# my_plot.show()
my_plot.savefig(imgdata, format='png')
encoded_chart = base64.b64encode(imgdata.getvalue())
else:
invalid_plot = True
client.close()
if invalid_plot:
return None
singleChart = {
"id": None,
"name": chartName,
"description": chartDescription,
"type": "Figure List",
"customType": "",
"jobId": None,
"options": {},
"data": {},
"figures": [encoded_chart.decode('ascii')],
#"figures": [],
"createdAt": None
}
response = {
"charts": [
singleChart
]
}
return response
app = create_app()
@app.route('/api/plot', methods=['POST'])
def process():
print(request)
if not request.json or not 'topicName' or not 'options' in request.json:
abort(400)
response = plotTopic(
request.json['topicName'], request.json.get('options', {}))
if (response != None):
return jsonify(response), 200
else:
return "", 500
if __name__ == '__main__':
app.run(host="0.0.0.0", port=app_port)
# plotTopic("Apple Microsoft", {
# 'title': 'Titlte', 'description': 'Dscription'})
#input("Press Enter to continue...")
| 7,298 | 2,145 |
#Example for Jon Smirl on 28 Dec 1999, originally by Steve Muench, with improvements by Mike Brown and Jeremy Richman
from Xml.Xslt import test_harness
sheet_1 = """<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0">
<xsl:template match="/">
<html>
<head/>
<body>
<xsl:apply-templates/>
</body>
</html>
</xsl:template>
<xsl:template match="p">
<p><xsl:apply-templates/></p>
</xsl:template>
<xsl:template match="programlisting">
<span style="font-family:monospace">
<xsl:call-template name="br-replace">
<xsl:with-param name="word" select="."/>
</xsl:call-template>
</span>
</xsl:template>
<xsl:template name="br-replace">
<xsl:param name="word"/>
<!-- </xsl:text> on next line on purpose to get newline -->
<xsl:variable name="cr"><xsl:text>
</xsl:text></xsl:variable>
<xsl:choose>
<xsl:when test="contains($word,$cr)">
<xsl:value-of select="substring-before($word,$cr)"/>
<br/>
<xsl:call-template name="br-replace">
<xsl:with-param name="word" select="substring-after($word,$cr)"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$word"/>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>"""
sheet_2 = """<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0">
<xsl:template match="/">
<html>
<head/>
<body>
<xsl:apply-templates/>
</body>
</html>
</xsl:template>
<xsl:template match="p">
<p><xsl:apply-templates/></p>
</xsl:template>
<xsl:template match="programlisting">
<span style="font-family:monospace">
<xsl:apply-templates/>
</span>
</xsl:template>
<xsl:template match="programlisting/text()[contains(.,'
')]">
<xsl:call-template name="br-replace">
<xsl:with-param name="text" select="."/>
</xsl:call-template>
</xsl:template>
<xsl:template name="br-replace">
<xsl:param name="text"/>
<!-- </xsl:text> on next line on purpose to get newline -->
<xsl:choose>
<xsl:when test="contains($text, '
')">
<xsl:value-of select="substring-before($text, '
')"/>
<br/>
<xsl:call-template name="br-replace">
<xsl:with-param name="text" select="substring-after($text, '
')"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$text"/>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>"""
source_1="""<doc>
<p>This is some text.</p>
<programlisting><![CDATA[This is a paragraph
with some newlines
does it work?]]></programlisting>
</doc>"""
expected_1 = """<html>
<head>
<meta http-equiv='Content-Type' content='text/html; charset=iso-8859-1'>
</head>
<body>
<p>This is some text.</p>
<span style='font-family:monospace'>This is a paragraph<br> with some newlines<br> does it work?</span>
</body>
</html>"""
def Test(tester):
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_1)
test_harness.XsltTest(tester, source, [sheet], expected_1,
title='test 1')
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_2)
test_harness.XsltTest(tester, source, [sheet], expected_1,
title='test 2')
return
| 3,274 | 1,210 |
import tldextract
from django.db.models import Q
from docutils import nodes
from docutils.transforms import Transform
from django_docutils.favicon.models import get_favicon_model
from ..nodes import icon
Favicon = get_favicon_model()
def resolve_favicon(url):
"""Given a URL to a website, see if a Favicon exists in db.
URL will be resolved to a fqdn for a key lookup.
:param url: URL to any page on a website
:type url: str
:returns: Full Storage based favicon url path, or None
:rtype: str|None
"""
# e.g. forums.bbc.co.uk
fqdn = tldextract.extract(url).fqdn
try:
return Favicon.objects.get(domain=fqdn).favicon.url
except (ValueError, Favicon.DoesNotExist):
return None
class FaviconTransform(Transform):
#: run after based.app.references.rst.transforms.xref
default_priority = 20
def apply(self):
q = Q()
# first run, iterate through references, extract FQDN's, add to query
for node in self.document.traverse(plain_references):
q.add(Q(domain__exact=tldextract.extract(node['refuri']).fqdn), Q.OR)
# pull all fqdn's with a favicon
favicons = Favicon.objects.filter(q)
for node in self.document.traverse(plain_references):
fqdn = tldextract.extract(node['refuri']).fqdn
try:
favicon_url = next( # Find favicon matching fqdn
(f.favicon.url for f in favicons if f.domain == fqdn), None
)
except ValueError: # no favicon exists for fqdn
favicon_url = None
if favicon_url:
nodecopy = node.deepcopy()
ico = icon(
'',
'',
style=f'background-image: url({favicon_url})',
classes=['ico'],
)
nodecopy.insert(0, ico)
node.replace_self(nodecopy)
def plain_references(node):
"""Docutils traversal: Only return references with URI's, skip xref's
If a nodes.reference already has classes, it's an icon class from xref,
so skip that.
If a nodes.reference has no 'refuri', it's junk, skip.
Docutils node.traverse condition callback
:returns: True if it's a URL we want to lookup favicons for
:rtype: bool
"""
if isinstance(node, nodes.reference):
# skip nodes already with xref icon classes or no refuri
no_classes = 'classes' not in node or not node['classes']
has_refuri = 'refuri' in node
if no_classes and has_refuri and node['refuri'].startswith('http'):
return True
return False
| 2,687 | 827 |
import dynet as dy
import os
from xnmt.serializer import Serializable
class ModelContext(Serializable):
yaml_tag = u'!ModelContext'
def __init__(self):
self.dropout = 0.0
self.weight_noise = 0.0
self.default_layer_dim = 512
self.dynet_param_collection = None
self.serialize_params = ["dropout", "weight_noise", "default_layer_dim"]
def update(self, other):
for param in self.serialize_params:
setattr(self, param, getattr(other, param))
class PersistentParamCollection(object):
def __init__(self, model_file, save_num_checkpoints=1):
self.model_file = model_file
self.param_col = dy.Model()
self.is_saved = False
assert save_num_checkpoints >= 1 or (model_file is None and save_num_checkpoints==0)
if save_num_checkpoints>0: self.data_files = [self.model_file + '.data']
for i in range(1,save_num_checkpoints):
self.data_files.append(self.model_file + '.data.' + str(i))
def revert_to_best_model(self):
self.param_col.populate(self.model_file + '.data')
def save(self, fname=None):
if fname: assert fname == self.data_files[0], "%s != %s" % (fname + '.data', self.data_files[0])
if not self.is_saved:
self.remove_existing_history()
self.shift_safed_checkpoints()
self.param_col.save(self.data_files[0])
self.is_saved = True
def remove_existing_history(self):
for fname in self.data_files[1:]:
if os.path.exists(fname):
os.remove(fname)
def shift_safed_checkpoints(self):
for i in range(len(self.data_files)-1)[::-1]:
if os.path.exists(self.data_files[i]):
os.rename(self.data_files[i], self.data_files[i+1])
def load_from_data_file(self, datafile):
self.param_col.populate(datafile)
| 1,733 | 628 |
import csv
from django.http import HttpResponse, HttpResponseForbidden
from django.template.defaultfilters import slugify
from django.db.models.loading import get_model
def export(qs, fields=None):
model = qs.model
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s.csv' % slugify(model.__name__)
writer = csv.writer(response)
# Write headers to CSV file
if fields:
headers = fields
else:
headers = []
for field in model._meta.fields:
headers.append(field.name)
writer.writerow(headers)
# Write data to CSV file
for obj in qs:
row = []
for field in headers:
if field in headers:
val = getattr(obj, field)
if callable(val):
val = val()
row.append(val)
writer.writerow(row)
# Return CSV file to browser as download
return response
def admin_list_export(request, model_name, app_label, queryset=None, fields=None, list_display=True):
"""
Put the following line in your urls.py BEFORE your admin include
(r'^admin/(?P<app_label>[\d\w]+)/(?P<model_name>[\d\w]+)/csv/', 'util.csv_view.admin_list_export'),
"""
if not request.user.is_staff:
return HttpResponseForbidden()
if not queryset:
model = get_model(app_label, model_name)
queryset = model.objects.all()
filters = dict()
for key, value in request.GET.items():
if key not in ('ot', 'o'):
filters[str(key)] = str(value)
if len(filters):
queryset = queryset.filter(**filters)
if not fields and list_display:
from django.contrib import admin
ld = admin.site._registry[queryset.model].list_display
if ld and len(ld) > 0:
fields = ld
'''
if not fields:
if list_display and len(queryset.model._meta.admin.list_display) > 1:
fields = queryset.model._meta.admin.list_display
else:
fields = None
'''
return export(queryset, fields)
"""
Create your own change_list.html for your admin view and put something like this in it:
{% block object-tools %}
<ul class="object-tools">
<li><a href="csv/{%if request.GET%}?{{request.GET.urlencode}}{%endif%}" class="addlink">Export to CSV</a></li>
{% if has_add_permission %}
<li><a href="add/{% if is_popup %}?_popup=1{% endif %}" class="addlink">{% blocktrans with cl.opts.verbose_name|escape as name %}Add {{ name }}{% endblocktrans %}</a></li>
{% endif %}
</ul>
{% endblock %}
"""
import datetime
from django.http import HttpResponseRedirect #, HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
def mail(request, app_label, model_name):
context = { 'app_label':app_label, 'model_name':model_name, }
return render_to_response('mail.html', context, context_instance=RequestContext(request))
| 3,100 | 925 |
# -*- coding: utf-8 -*-
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: MIT
#
import wx
import serial
# A new custom class that extends the wx.Frame
class MyFrame(wx.Frame):
def __init__(self, parent, title):
super(MyFrame, self).__init__(parent, title=title,
size=(250, 150))
# Attach the paint event to the frame
self.Bind(wx.EVT_PAINT, self.OnPaint)
# Create a timer for redrawing the frame every 100 milliseconds
self.Timer = wx.Timer(self)
self.Timer.Start(100)
self.Bind(wx.EVT_TIMER, self.OnPaint)
# Show the frame
self.Centre()
self.Show()
def OnPaint(self, event=None):
# Create the paint surface
dc = wx.PaintDC(self)
# Refresh the display
self.Refresh()
# Get data from serial port
value = arduino.readline()
# Draw the serial data
# Set up colors:
thickness = 4
border_color = "#990000"
fill_color = "#FF944D"
dc.SetPen(wx.Pen(border_color, thickness))
dc.SetBrush(wx.Brush(fill_color))
# Draw a line
dc.DrawLine(50, 40, 50+value, 40)
# Draw a rectangle
dc.DrawRectangle(50,50,value,50)
# Main program
if __name__ == '__main__':
# Connect to serial port first
try:
arduino = serial.Serial('/dev/tty.usbmodem1421', 9600)
except:
print "Failed to connect"
exit()
# Create and launch the wx interface
app = wx.App()
MyFrame(None, 'Serial data test')
app.MainLoop()
# Close the serial connection
arduino.close()
| 1,668 | 580 |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
from typing import Any, ContextManager
import pytest
from pants.backend.docker.subsystems.dockerfile_parser import rules as parser_rules
from pants.backend.docker.target_types import DockerImageTarget
from pants.backend.docker.util_rules.docker_build_args import docker_build_args
from pants.backend.docker.util_rules.docker_build_context import (
DockerBuildContext,
DockerBuildContextRequest,
DockerVersionContext,
)
from pants.backend.docker.util_rules.docker_build_context import rules as context_rules
from pants.backend.docker.util_rules.docker_build_env import docker_build_environment_vars
from pants.backend.docker.util_rules.dockerfile import rules as dockerfile_rules
from pants.backend.python import target_types_rules
from pants.backend.python.goals import package_pex_binary
from pants.backend.python.goals.package_pex_binary import PexBinaryFieldSet
from pants.backend.python.target_types import PexBinary
from pants.backend.python.util_rules import pex_from_targets
from pants.backend.shell.target_types import ShellSourcesGeneratorTarget, ShellSourceTarget
from pants.backend.shell.target_types import rules as shell_target_types_rules
from pants.core.goals.package import BuiltPackage
from pants.core.target_types import FilesGeneratorTarget
from pants.core.target_types import rules as core_target_types_rules
from pants.engine.addresses import Address
from pants.engine.fs import Snapshot
from pants.engine.internals.scheduler import ExecutionError
from pants.testutil.pytest_util import no_exception
from pants.testutil.rule_runner import QueryRule, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*context_rules(),
*core_target_types_rules(),
*dockerfile_rules(),
*package_pex_binary.rules(),
*parser_rules(),
*pex_from_targets.rules(),
*shell_target_types_rules(),
*target_types_rules.rules(),
docker_build_args,
docker_build_environment_vars,
QueryRule(BuiltPackage, [PexBinaryFieldSet]),
QueryRule(DockerBuildContext, (DockerBuildContextRequest,)),
],
target_types=[
DockerImageTarget,
FilesGeneratorTarget,
PexBinary,
ShellSourcesGeneratorTarget,
ShellSourceTarget,
],
)
return rule_runner
def assert_build_context(
rule_runner: RuleRunner,
address: Address,
*,
expected_files: list[str],
expected_version_context: dict[str, dict[str, str]] | None = None,
pants_args: list[str] | None = None,
runner_options: dict[str, Any] | None = None,
) -> None:
if runner_options is None:
runner_options = {}
runner_options.setdefault("env_inherit", set()).update({"PATH", "PYENV_ROOT", "HOME"})
rule_runner.set_options(pants_args or [], **runner_options)
context = rule_runner.request(
DockerBuildContext,
[
DockerBuildContextRequest(
address=address,
build_upstream_images=False,
)
],
)
snapshot = rule_runner.request(Snapshot, [context.digest])
assert sorted(expected_files) == sorted(snapshot.files)
if expected_version_context is not None:
assert context.version_context == DockerVersionContext.from_dict(expected_version_context)
def test_file_dependencies(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
# img_A -> files_A
# img_A -> img_B
"src/a/BUILD": dedent(
"""\
docker_image(name="img_A", dependencies=[":files_A", "src/b:img_B"])
files(name="files_A", sources=["files/**"])
"""
),
"src/a/Dockerfile": "FROM base",
"src/a/files/a01": "",
"src/a/files/a02": "",
# img_B -> files_B
"src/b/BUILD": dedent(
"""\
docker_image(name="img_B", dependencies=[":files_B"])
files(name="files_B", sources=["files/**"])
"""
),
"src/b/Dockerfile": "FROM base",
"src/b/files/b01": "",
"src/b/files/b02": "",
# Mixed
"src/c/BUILD": dedent(
"""\
docker_image(name="img_C", dependencies=["src/a:files_A", "src/b:files_B"])
"""
),
"src/c/Dockerfile": "FROM base",
}
)
# We want files_B in build context for img_B
assert_build_context(
rule_runner,
Address("src/b", target_name="img_B"),
expected_files=["src/b/Dockerfile", "src/b/files/b01", "src/b/files/b02"],
)
# We want files_A in build context for img_A, but not files_B
assert_build_context(
rule_runner,
Address("src/a", target_name="img_A"),
expected_files=["src/a/Dockerfile", "src/a/files/a01", "src/a/files/a02"],
)
# Mixed.
assert_build_context(
rule_runner,
Address("src/c", target_name="img_C"),
expected_files=[
"src/c/Dockerfile",
"src/a/files/a01",
"src/a/files/a02",
"src/b/files/b01",
"src/b/files/b02",
],
)
def test_files_out_of_tree(rule_runner: RuleRunner) -> None:
# src/a:img_A -> res/static:files
rule_runner.write_files(
{
"src/a/BUILD": dedent(
"""\
docker_image(name="img_A", dependencies=["res/static:files"])
"""
),
"res/static/BUILD": dedent(
"""\
files(name="files", sources=["!BUILD", "**/*"])
"""
),
"src/a/Dockerfile": "FROM base",
"res/static/s01": "",
"res/static/s02": "",
"res/static/sub/s03": "",
}
)
assert_build_context(
rule_runner,
Address("src/a", target_name="img_A"),
expected_files=[
"src/a/Dockerfile",
"res/static/s01",
"res/static/s02",
"res/static/sub/s03",
],
)
def test_packaged_pex_path(rule_runner: RuleRunner) -> None:
# This test is here to ensure that we catch if there is any change in the generated path where
# built pex binaries go, as we rely on that for dependency inference in the Dockerfile.
rule_runner.write_files(
{
"src/docker/BUILD": """docker_image(dependencies=["src/python/proj/cli:bin"])""",
"src/docker/Dockerfile": """FROM python""",
"src/python/proj/cli/BUILD": """pex_binary(name="bin", entry_point="main.py")""",
"src/python/proj/cli/main.py": """print("cli main")""",
}
)
assert_build_context(
rule_runner,
Address("src/docker", target_name="docker"),
expected_files=["src/docker/Dockerfile", "src.python.proj.cli/bin.pex"],
)
def test_version_context_from_dockerfile(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/docker/BUILD": "docker_image()",
"src/docker/Dockerfile": dedent(
"""\
FROM python:3.8
FROM alpine as interim
FROM interim
FROM scratch:1-1 as output
"""
),
}
)
assert_build_context(
rule_runner,
Address("src/docker"),
expected_files=["src/docker/Dockerfile"],
expected_version_context={
"baseimage": {"tag": "3.8"},
"stage0": {"tag": "3.8"},
"interim": {"tag": "latest"},
"stage2": {"tag": "latest"},
"output": {"tag": "1-1"},
},
)
def test_synthetic_dockerfile(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/docker/BUILD": dedent(
"""\
docker_image(
instructions=[
"FROM python:3.8",
"FROM alpine as interim",
"FROM interim",
"FROM scratch:1-1 as output",
]
)
"""
),
}
)
assert_build_context(
rule_runner,
Address("src/docker"),
expected_files=["src/docker/Dockerfile.docker"],
expected_version_context={
"baseimage": {"tag": "3.8"},
"stage0": {"tag": "3.8"},
"interim": {"tag": "latest"},
"stage2": {"tag": "latest"},
"output": {"tag": "1-1"},
},
)
def test_shell_source_dependencies(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/docker/BUILD": dedent(
"""\
docker_image(dependencies=[":entrypoint", ":shell"])
shell_source(name="entrypoint", source="entrypoint.sh")
shell_sources(name="shell", sources=["scripts/**/*.sh"])
"""
),
"src/docker/Dockerfile": "FROM base",
"src/docker/entrypoint.sh": "",
"src/docker/scripts/s01.sh": "",
"src/docker/scripts/s02.sh": "",
"src/docker/scripts/random.file": "",
}
)
assert_build_context(
rule_runner,
Address("src/docker"),
expected_files=[
"src/docker/Dockerfile",
"src/docker/entrypoint.sh",
"src/docker/scripts/s01.sh",
"src/docker/scripts/s02.sh",
],
)
def test_build_arg_defaults_from_dockerfile(rule_runner: RuleRunner) -> None:
# Test that only explicitly defined build args in the BUILD file or pants configuraiton use the
# environment for its values.
rule_runner.write_files(
{
"src/docker/BUILD": dedent(
"""\
docker_image(
extra_build_args=[
"base_version",
]
)
"""
),
"src/docker/Dockerfile": dedent(
"""\
ARG base_name=python
ARG base_version=3.8
FROM ${base_name}:${base_version}
ARG NO_DEF
ENV opt=${NO_DEF}
"""
),
}
)
assert_build_context(
rule_runner,
Address("src/docker"),
runner_options={
"env": {
"base_name": "no-effect",
"base_version": "3.9",
},
},
expected_files=["src/docker/Dockerfile"],
expected_version_context={
"baseimage": {"tag": "${base_version}"},
"stage0": {"tag": "${base_version}"},
"build_args": {
# `base_name` is not listed here, as it was not an explicitly defined build arg.
"base_version": "3.9",
},
},
)
@pytest.mark.parametrize(
"dockerfile_arg_value, extra_build_arg_value, expect",
[
pytest.param(None, None, no_exception(), id="No args defined"),
pytest.param(
None,
"",
pytest.raises(ExecutionError, match=r"variable 'MY_ARG' is undefined"),
id="No default value for build arg",
),
pytest.param(None, "some default value", no_exception(), id="Default value for build arg"),
pytest.param("", None, no_exception(), id="No build arg defined, and ARG without default"),
pytest.param(
"",
"",
pytest.raises(ExecutionError, match=r"variable 'MY_ARG' is undefined"),
id="No default value from ARG",
),
pytest.param(
"", "some default value", no_exception(), id="Default value for build arg, ARG present"
),
pytest.param(
"some default value", None, no_exception(), id="No build arg defined, only ARG"
),
pytest.param("some default value", "", no_exception(), id="Default value from ARG"),
pytest.param(
"some default value",
"some other default",
no_exception(),
id="Default value for build arg, ARG default",
),
],
)
def test_undefined_env_var_behavior(
rule_runner: RuleRunner,
dockerfile_arg_value: str | None,
extra_build_arg_value: str | None,
expect: ContextManager,
) -> None:
dockerfile_arg = ""
if dockerfile_arg_value is not None:
dockerfile_arg = "ARG MY_ARG"
if dockerfile_arg_value:
dockerfile_arg += f"={dockerfile_arg_value}"
extra_build_args = ""
if extra_build_arg_value is not None:
extra_build_args = 'extra_build_args=["MY_ARG'
if extra_build_arg_value:
extra_build_args += f"={extra_build_arg_value}"
extra_build_args += '"],'
rule_runner.write_files(
{
"src/docker/BUILD": dedent(
f"""\
docker_image(
{extra_build_args}
)
"""
),
"src/docker/Dockerfile": dedent(
f"""\
FROM python:3.8
{dockerfile_arg}
"""
),
}
)
with expect:
assert_build_context(
rule_runner,
Address("src/docker"),
expected_files=["src/docker/Dockerfile"],
)
| 13,807 | 4,053 |
from .test_ai_model import TestAiModelExtension
| 48 | 15 |
#!/usr/bin/env python3
import logging
from pathlib import Path
import json
from datetime import datetime, timedelta
from typing import Set, Sequence, Any, Iterator
from dataclasses import dataclass
from .exporthelpers.dal_helper import PathIsh, Json, Res, datetime_naive
from .exporthelpers.logging_helper import LazyLogger
logger = LazyLogger(__package__)
seconds = int
_DT_FMT = '%Y-%m-%dT%H:%M:%S'
@dataclass
class Entry:
dt: datetime_naive
'''
Ok, it definitely seems local, by the looks of the data.
https://www.rescuetime.com/apidoc#analytic-api-reference
"defined by the user’s selected time zone" -- not sure what it means, but another clue I suppose
Note that the manual export has something like -08:00, but it's the time is same as local -- doesn't make any sense...
'''
duration_s: seconds
activity: str
@classmethod
def from_row(cls, row: Json) -> 'Entry':
# COL_DT = 0
# COL_DUR = 1
# COL_ACTIVITY = 3
# todo I think cols are fixed so could speed up lookup? Not really necessary at te moment though
COL_DT = 'Date'
COL_DUR = 'Time Spent (seconds)'
COL_ACTIVITY = 'Activity'
dt_s = row[COL_DT]
dur = row[COL_DUR]
activity = row[COL_ACTIVITY]
dt = datetime.strptime(dt_s, _DT_FMT)
return cls(dt=dt, duration_s=dur, activity=activity)
class DAL:
def __init__(self, sources: Sequence[PathIsh]) -> None:
# todo not sure if should sort -- probably best to rely on get_files?
self.sources = [p if isinstance(p, Path) else Path(p) for p in sources]
def raw_entries(self) -> Iterator[Res[Json]]:
# todo rely on more_itertools for it?
emitted: Set[Any] = set()
last = None
for src in self.sources:
# todo parse in multiple processes??
try:
j = json.loads(src.read_text())
except Exception as e:
ex = RuntimeError(f'While processing {src}')
ex.__cause__ = e
yield ex
continue
headers = j['row_headers']
rows = j['rows']
total = len(rows)
unique = 0
for row in rows:
frow = tuple(row) # freeze for hashing
if frow in emitted:
continue
drow = dict(zip(headers, row))
if last is not None and drow['Date'] < last['Date']: # pylint: disable=unsubscriptable-object
yield RuntimeError(f'Expected\n{drow}\nto be later than\n{last}')
# TODO ugh, for couple of days it was pretty bad, lots of duplicated entries..
# for now, just ignore it
else:
yield drow
emitted.add(frow)
unique += 1
last = drow
logger.debug(f"{src}: filtered out {total - unique:<6} of {total:<6}. Grand total: {len(emitted)}")
def entries(self) -> Iterator[Res[Entry]]:
for row in self.raw_entries():
if isinstance(row, Exception):
yield row
continue
cur = Entry.from_row(row)
yield cur
from typing import Iterable
# todo quick test (dal helper aided: check that DAL can handle fake data)
def fake_data_generator(rows=100, seed=123) -> Json:
# todo ok, use faker/mimesis here??
from random import Random
r = Random(seed)
def row_gen():
base = datetime(year=2000, month=1, day=1)
cur = base
emitted = 0
i = 0
while emitted < rows:
i += 1
sleeping = 1 <= cur.hour <= 8
if sleeping:
cur = cur + timedelta(hours=2)
continue
# do something during that period
duration = r.randint(10, 500)
if r.choice([True, False]):
emitted += 1
yield [
cur.strftime(_DT_FMT),
duration,
1,
f'Activity {i % 10}',
'Category {i % 3}',
i % 2,
]
cur += timedelta(seconds=duration)
return {
"notes": "data is an array of arrays (rows), column names for rows in row_headers",
"row_headers": ["Date", "Time Spent (seconds)", "Number of People", "Activity", "Category", "Productivity"],
"rows": list(row_gen())
}
def main() -> None:
# todo adapt for dal_helper?
import argparse
p = argparse.ArgumentParser()
p.add_argument('path', type=Path)
args = p.parse_args()
files = list(sorted(args.path.glob('*.json')))
model = DAL(files)
count = 0
for x in model.entries():
if isinstance(x, Exception):
logger.error(x)
else:
count += 1
if count % 10000 == 0:
logger.info('Processed %d entries', count)
# print(x)
if __name__ == '__main__':
main()
| 5,100 | 1,523 |
import FWCore.ParameterSet.Config as cms
from MuonAnalysis.MuonAssociators.muonL1Match_cfi import *
muonHLTL1Match = cms.EDProducer("HLTL1MuonMatcher",
muonL1MatcherParameters,
# Reconstructed muons
src = cms.InputTag("muons"),
# L1 Muon collection, and preselection on that collection
matched = cms.InputTag("patTrigger"),
# Requests to select the object
matchedCuts = cms.string('coll("hltL1extraParticles")'),
# 90% compatible with documentation at SWGuidePATTrigger#Module_Configuration_AN1
# andOr = cms.bool( False ), # if False, do the 'AND' of the conditions below; otherwise, do the OR
# filterIdsEnum = cms.vstring( '*' ),
# filterIds = cms.vint32( 0 ),
# filterLabels = cms.vstring( '*' ),
# pathNames = cms.vstring( '*' ),
# collectionTags = cms.vstring( 'hltL1extraParticles' ),
resolveAmbiguities = cms.bool( True ), # if True, no more than one reco object can be matched to the same L1 object; precedence is given to the reco ones coming first in the list
# Fake filter lavels for the object propagated to the second muon station
setPropLabel = cms.string("propagatedToM2"),
# Write extra ValueMaps
writeExtraInfo = cms.bool(True),
)
| 1,287 | 453 |
lista = list()
while True:
lista.append(int(input("Digite um número inteiro:\t")))
while True:
p = str(input("Digitar mais números?\t").strip())[0].upper()
if p in 'SN':
break
else:
print("\033[31mDigite uma opção válida!\033[m")
if p == 'N':
break
par = list()
impar = list()
for n in lista:
if n % 2 == 0:
par.append(n)
else:
impar.append(n)
par.sort()
impar.sort()
if 0 in par:
par.remove(0)
print(f"Entre os números \033[32m{lista}\033[m, os números pares são: \033[33m{par}\033[m e os números ímpares são: \033[34m{impar}\033[m!")
| 632 | 264 |
#! /usr/bin/env python
# -*- coding: utf-8 -*
import collections
from census_data_downloader.core.tables import BaseTableConfig
from census_data_downloader.core.decorators import register
@register
class PerCapitaIncomeDownloader(BaseTableConfig):
# inflation adjusted
PROCESSED_TABLE_NAME = "percapitaincome"
UNIVERSE = "total population"
RAW_TABLE_NAME = 'B19301'
RAW_FIELD_CROSSWALK = collections.OrderedDict({
'001': "per_capita_income"
})
@register
class PerCapitaIncomeLatinoDownloader(PerCapitaIncomeDownloader):
# inflation adjusted
PROCESSED_TABLE_NAME = "percapitaincomelatino"
RAW_TABLE_NAME = 'B19301I'
@register
class PerCapitaIncomeWhiteDownloader(PerCapitaIncomeDownloader):
# inflation adjusted
PROCESSED_TABLE_NAME = "percapitaincomewhite"
RAW_TABLE_NAME = 'B19301H'
@register
class PerCapitaIncomeBlackDownloader(PerCapitaIncomeDownloader):
# inflation adjusted
PROCESSED_TABLE_NAME = "percapitaincomeblack"
RAW_TABLE_NAME = 'B19301B'
@register
class PerCapitaIncomeAsianDownloader(PerCapitaIncomeDownloader):
# inflation adjusted
PROCESSED_TABLE_NAME = "percapitaincomeasian"
RAW_TABLE_NAME = 'B19301D'
| 1,212 | 449 |
# Copyright 2018 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Skeleton of a CLI
import click
import pyskel_bc
@click.command('pyskel_bc')
@click.argument('count', type=int, metavar='N')
def cli(count):
"""Echo a value `N` number of times"""
for i in range(count):
click.echo(pyskel_bc.has_legs)
| 844 | 268 |
class Solution:
def intersect(self, q1, q2):
if q1.isLeaf:
return q1.val and q1 or q2
elif q2.isLeaf:
return q2.val and q2 or q1
else:
tLeft = self.intersect(q1.topLeft, q2.topLeft)
tRight = self.intersect(q1.topRight, q2.topRight)
bLeft = self.intersect(q1.bottomLeft, q2.bottomLeft)
bRight = self.intersect(q1.bottomRight, q2.bottomRight)
if tLeft.isLeaf and tRight.isLeaf and bLeft.isLeaf and bRight.isLeaf and tLeft.val == tRight.val == bLeft.val == bRight.val:
node = Node(tLeft.val, True, None, None, None, None)
else:
node = Node(False, False, tLeft, tRight, bLeft, bRight)
return node | 757 | 258 |
import Crypto.Random
from Crypto.Cipher import AES
import hashlib
import base64
SALT_SIZE = 16
AES_MULTIPLE = 16
NUMBER_OF_ITERATIONS = 20
def generate_key(password, salt):
key = password.encode('utf-8') + salt
for i in range(NUMBER_OF_ITERATIONS):
key = hashlib.sha256(str(key).encode('utf-8')).digest()
return key
def pad_text(text, multiple):
extra_bytes = len(text) % multiple
padding_size = multiple - extra_bytes
padding = chr(padding_size) * padding_size
padded_text = text + padding
return padded_text
def unpad_text(padded_text):
padding_size = padded_text[-1]
text = padded_text[:-padding_size]
return text
def encrypt(plaintext, password):
salt = Crypto.Random.get_random_bytes(SALT_SIZE)
key = generate_key(password, salt)
cipher = AES.new(key, AES.MODE_ECB)
padded_plaintext = pad_text(plaintext, AES_MULTIPLE)
ciphertext = cipher.encrypt(padded_plaintext)
ciphertext_with_salt = salt + ciphertext
return str(base64.b64encode(ciphertext_with_salt), 'utf-8')
def decrypt(ciphertext, password):
ciphertext = base64.b64decode(ciphertext)
salt = ciphertext[0:SALT_SIZE]
ciphertext_sans_salt = ciphertext[SALT_SIZE:]
key = generate_key(password, salt)
cipher = AES.new(key, AES.MODE_ECB)
padded_plaintext = cipher.decrypt(ciphertext_sans_salt)
plaintext = unpad_text(padded_plaintext)
return str(plaintext, 'utf-8')
| 1,442 | 545 |
from xml.etree import ElementTree
from xml.dom import minidom
def get_pretty_xml(tree):
"""Transforms and pretty-prints a given xml-tree to string
:param tree: XML-tree
:type tree: object
:returns: (Prettified) stringified version of the passed xml-tree
:rtype: str
"""
stringified_tree = ElementTree.tostring(tree.getroot(), 'utf-8')
minidoc = minidom.parseString(stringified_tree)
return minidoc.toprettyxml(indent=" ")
| 462 | 152 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import time
import GetOldTweets3 as Got3
if sys.version_info[0] < 3:
raise Exception("Python 2.x is not supported. Please upgrade to 3.x")
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
def test_username():
tweet_criteria = Got3.manager.TweetCriteria() \
.set_username('barackobama') \
.set_max_tweets(1)
tweet = Got3.manager.TweetManager.get_tweets(tweet_criteria)[0]
assert tweet.username == 'BarackObama'
def test_query_search():
tweet_criteria = Got3.manager.TweetCriteria().set_query_search('#europe #refugees') \
.set_since("2015-05-01") \
.set_until("2015-09-30") \
.set_max_tweets(1)
tweet = Got3.manager.TweetManager.get_tweets(tweet_criteria)[0]
assert tweet.hashtags.lower() == '#europe #refugees'
def test_mass_fetch_concurrent():
time1 = time.time()
tweet_criteria = Got3.manager.TweetCriteria().set_query_search('#europe #refugees') \
.set_since("2015-05-01") \
.set_until("2015-09-30") \
.set_max_tweets(500)
tweets = Got3.manager.ConcurrentTweetManager.get_tweets(tweet_criteria, worker_count=25)
print("Time Needed Concurrent: {} Secs".format((time.time() - time1)))
assert len(tweets) <= 1000
def test_mass_fetch_non_concurrent():
time1 = time.time()
tweet_criteria = Got3.manager.TweetCriteria().set_query_search('#europe #refugees') \
.set_since("2015-05-01") \
.set_until("2015-09-30") \
.set_max_tweets(500)
tweets = Got3.manager.TweetManager.get_tweets(tweet_criteria)
print("Time Needed Non Concurrent: {} Secs".format((time.time() - time1)))
assert len(tweets) <= 1000
| 1,745 | 698 |
import extsum as ext
URL = "https://i.picsum.photos/id/42/1/1.jpg"
# Uncomment for random Picsum photo
# URL = "https://picsum.photos/1/1"
if __name__ == '__main__':
# Init
photo = ext.Load(URL)
photo_parsed = ext.Parse(photo)
# Print found ID (if any)
id_found = photo_parsed.find_id()
if id_found is None:
print("Couldn't find any ID")
else:
print(f"Found ID {id_found}")
| 421 | 165 |
import subprocess
from .base import Base
class Kind(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'lab_browse'
self.default_action = 'lab_browse'
def action_lab_browse(self, context):
for target in context['targets']:
iid = target['word']
command = ['lab', 'browse', iid]
process = subprocess.Popen(command,
cwd=context['path'],
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
output, err_output = process.communicate(timeout=15)
except subprocess.TimeoutExpired:
process.kill()
output, err_output = process.communicate()
exit_code = process.returncode
if exit_code != 0:
print('error')
| 931 | 253 |
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
outfile):
from genomicode import filelib
from genomicode import parallel
from Betsy import module_utils as mlib
in_data = antecedents
metadata = {}
## data_node, cls_node = antecedents
## a, b, c = read_label_file.read(cls_node.identifier)
## if len(a) > 1:
## colors = []
## for i in range(5):
## colors.append(cm.hot(i / 5.0, 1))
## colors.append(cm.autumn(i / 5.0, i))
## colors.append(cm.cool(i / 5.0, i))
## colors.append(cm.jet(i / 5.0, i))
## colors.append(cm.spring(i / 5.0, i))
## colors.append(cm.prism(i / 5.0, i))
## colors.append(cm.summer(i / 5.0, i))
## colors.append(cm.winter(i / 5.0, i))
## opts = [colors[int(i)] for i in b]
## legend = [c[int(i)] for i in b]
## plot_pca(data_node.identifier, outfile, opts, legend)
#num_genes = mlib.get_user_option(
# user_options, "pca_num_genes", type=int)
#assert num_genes >= 5 and num_genes < 1E5
#metadata["num_genes"] = num_genes
pcaplot = mlib.get_config("pcaplot", which_assert_file=True)
prism_file = "prism.txt"
row_pc_file = "row_components.txt"
col_pc_file = "col_components.txt"
sq = parallel.quote
cmd = [
sq(pcaplot),
"--label",
#"-g", num_genes,
"--prism_file", prism_file,
"--row_pc_file", row_pc_file,
"--col_pc_file", col_pc_file,
sq(in_data.identifier),
sq(outfile),
]
cmd = " ".join(map(str, cmd))
parallel.sshell(cmd)
metadata["commands"] = [cmd]
filelib.assert_exists_nz(outfile)
return metadata
def name_outfile(self, antecedents, user_options):
return "pca.png"
## def plot_pca(filename, result_fig, opts='b', legend=None):
## import arrayio
## from genomicode import jmath, mplgraph
## from genomicode import filelib
## R = jmath.start_R()
## jmath.R_equals(filename, 'filename')
## M = arrayio.read(filename)
## labels = M._col_names['_SAMPLE_NAME']
## data = M.slice()
## jmath.R_equals(data, 'X')
## R('NUM.COMPONENTS <- 2')
## R('S <- svd(X)')
## R('U <- S$u[,1:NUM.COMPONENTS]')
## R('D <- S$d[1:NUM.COMPONENTS]')
## # Project the data onto the first 2 components.
## R('x <- t(X) %*% U %*% diag(D)')
## x1 = R['x'][0:M.ncol()]
## x2 = R['x'][M.ncol():]
## xlabel = 'Principal Component 1'
## ylabel = 'Principal Component 2'
## if len(opts) > 1:
## fig = mplgraph.scatter(
## x1, x2, xlabel=xlabel, ylabel=ylabel, color=opts,
## legend=legend)
## else:
## fig = mplgraph.scatter(
## x1, x2, xlabel=xlabel, ylabel=ylabel, color=opts,
## label=labels)
## fig.savefig(result_fig)
## assert filelib.exists_nz(result_fig), 'the plot_pca.py fails'
| 3,358 | 1,183 |
from plots import plot_experiment_APs, plot_experiment_no_diff, experiment_VI_plots
import os
# paths, names, title, out_dir, out_name
data_dir = '/Users/amcg0011/Data/pia-tracking/dl-results/210512_150843_seed_z-1_y-1_x-1_m_centg'
suffix = 'seed_z-1_y-1_x-1_m_centg'
out_dir = os.path.join(data_dir, 'DL-vs-Dog')
ap_paths = [os.path.join(data_dir, suffix + '_validation_AP.csv'),
os.path.join(data_dir, 'DoG-segmentation_average_precision.csv')]
nd_paths = [os.path.join(data_dir, 'seed_z-1_y-1_x-1_m_centg_validation_metrics.csv'),
os.path.join(data_dir, 'DoG-segmentation_metrics.csv')]
vi_paths = [
os.path.join(data_dir, 'seed_z-1_y-1_x-1_m_centgvalidation_VI.csv'),
os.path.join(data_dir, 'seed_z-1_y-1_x-1_m_centgvalidation_VI_DOG-seg.csv')
]
#plot_experiment_APs(ap_paths, ['DL', 'DoG'], 'Average precision: DL vs Dog', out_dir, 'AP_DL-vs-Dog')
#plot_experiment_no_diff(nd_paths, ['DL', 'DoG'], 'Number difference: DL vs Dog', out_dir, 'ND_DL-vs-Dog')
experiment_VI_plots(vi_paths, ['DL', 'DoG'], 'VI Subscores: DL vs DoG', 'VI_DL-vs-DoG', out_dir) | 1,098 | 498 |
"""
Python Flight Mechanics Engine (PyFME).
Copyright (c) AeroPython Development Team.
Distributed under the terms of the MIT License.
Velocity
--------
The aircraft state has
"""
from abc import abstractmethod
import numpy as np
# TODO: think about generic changes from body to horizon that could be used for
# velocity, accelerations...
# If also changes from attitude of elements in the body (such as sensors) to
# body and horizon coordinates are implemented it would be useful!
from pyfme.utils.coordinates import body2hor, hor2body
class Velocity:
"""Velocity
Attributes
----------
vel_body : ndarray, shape(3)
(u [m/s], v [m/s], w [m/s])
u
v
w
vel_NED : ndarray, shape(3)
(v_north [m/s], v_east [m/s], v_down [m/s])
v_north
v_east
v_down
"""
def __init__(self):
# Body axis
self._vel_body = np.zeros(3) # m/s
# Local horizon (NED)
self._vel_NED = np.zeros(3) # m/s
@abstractmethod
def update(self, coords, attitude):
raise NotImplementedError
@property
def vel_body(self):
return self._vel_body
@property
def u(self):
return self.vel_body[0]
@property
def v(self):
return self.vel_body[1]
@property
def w(self):
return self.vel_body[2]
@property
def vel_NED(self):
return self._vel_NED
@property
def v_north(self):
return self._vel_NED[0]
@property
def v_east(self):
return self._vel_NED[1]
@property
def v_down(self):
return self._vel_NED[2]
@property
def value(self):
"""Only for testing purposes"""
return np.hstack((self.vel_body, self.vel_NED))
class BodyVelocity(Velocity):
def __init__(self, u, v, w, attitude):
# TODO: docstring
super().__init__()
self.update(np.array([u, v, w]), attitude)
def update(self, value, attitude):
self._vel_body[:] = value
self._vel_NED = body2hor(value,
attitude.theta,
attitude.phi,
attitude.psi) # m/s
def __repr__(self):
return f"u: {self.u:.2f} m/s, v: {self.v:.2f} m/s, w: {self.w:.2f} m/s"
class NEDVelocity(Velocity):
def __init__(self, vn, ve, vd, attitude):
# TODO: docstring
super().__init__()
self.update(np.array([vn, ve, vd]), attitude)
def update(self, value, attitude):
self._vel_NED[:] = value
self._vel_body = hor2body(value,
attitude.theta,
attitude.phi,
attitude.psi) # m/s
def __repr__(self):
return (f"V_north: {self.v_north:.2f} m/s,"
f"V_east: {self.v_east:.2f} m/s, "
f"V_down: {self.v_down:.2f} m/s")
| 2,925 | 1,003 |
# test while statements
while 0:
1
while 0:
2
else:
3
| 68 | 30 |
from minds.sections.newsfeed import NewsfeedAPI
from minds.sections.channel import ChannelAPI
from minds.sections.notifications import NotificationsAPI
from minds.sections.posting import PostingAPI
from minds.sections.interact import InteractAPI
| 246 | 59 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
AUTHOR
- Antti Suni <antti.suni@helsinki.fi>
- Sébastien Le Maguer <lemagues@tcd.ie>
DESCRIPTION
usage: cwt_global_spectrum.py [-h] [-v] [-o OUTPUT]
[-P]
input_file
Tool for extracting global wavelet spectrum of speech envelope
introduced for second language fluency estimation in the following paper:
@inproceedings{suni2019characterizing,
title={Characterizing second language fluency with global wavelet spectrum},
author={Suni, Antti and Kallio, Heini and Benu{\v{s}}, {\v{S}}tefan and {\v{S}}imko, Juraj},
booktitle={International Congress of Phonetic Sciences},
pages={1947--1951},
year={2019},
organization={Australasian Speech Science and Technology Association Inc.}
}
positional arguments:
input_file Input signal or F0 file
optional arguments:
-h, --help show this help message and exit
-v, --verbosity increase output verbosity
-o OUTPUT, --output OUTPUT
output directory for analysis or filename for synthesis.
(Default: input_file directory [Analysis] or <input_file>.f0 [Synthesis])
-P, --plot Plot the results
You should be able to see peak around 4Hz, corresponding to syllable rate.
For longer speech files, lower frequency peaks related to phrasing should appear.
Synthetic test file with 8Hz, 4Hz and 1Hz components is included in sample directory.
LICENSE
See https://github.com/asuni/wavelet_prosody_toolkit/blob/master/LICENSE.txt
"""
# System/default
import sys
import os
# Arguments
import argparse
# Messaging/logging
import traceback
import time
import logging
# Math/plot
import numpy as np
import matplotlib.ticker
import matplotlib.pyplot as plt
# Libraries
from wavelet_prosody_toolkit.prosody_tools import cwt_utils as cwt_utils
from wavelet_prosody_toolkit.prosody_tools import misc as misc
from wavelet_prosody_toolkit.prosody_tools import energy_processing as energy_processing
###############################################################################
# global constants
###############################################################################
LEVEL = [logging.WARNING, logging.INFO, logging.DEBUG]
###############################################################################
# Functions
###############################################################################
def calc_global_spectrum(wav_file, period=5, n_scales=60, plot=False):
"""
"""
# Extract signal envelope, scale and normalize
(fs, waveform) = misc.read_wav(wav_file)
waveform = misc.resample(waveform, fs, 16000)
energy = energy_processing.extract_energy(waveform, min_freq=30, method="hilbert")
energy[energy<0] = 0
energy = np.cbrt(energy+0.1)
params = misc.normalize_std(energy)
# perform continous wavelet transform on envelope with morlet wavelet
# increase _period to get sharper spectrum
matrix, scales, freq = cwt_utils.cwt_analysis(params, first_freq = 16, num_scales = n_scales, scale_distance = 0.1,period=period, mother_name="Morlet",apply_coi=True)
# power, arbitrary scaling to prevent underflow
p_matrix = (abs(matrix)**2).astype('float32')*1000.0
power_spec = np.nanmean(p_matrix,axis=1)
if plot:
f, wave_pics = plt.subplots(1, 2, gridspec_kw = {'width_ratios':[5, 1]}, sharey=True)
f.subplots_adjust(hspace=10)
f.subplots_adjust(wspace=0)
wave_pics[0].set_ylim(0, n_scales)
wave_pics[0].set_xlabel("Time(m:s)")
wave_pics[0].set_ylabel("Frequency(Hz)")
wave_pics[1].set_xlabel("power")
wave_pics[1].tick_params(labelright=True)
fname = os.path.basename(wav_file)
title = "CWT Morlet(p="+str(period)+") global spectrum, "+ fname
wave_pics[0].contourf(p_matrix, 100)
wave_pics[0].set_title(title, loc="center")
wave_pics[0].plot(params*3, color="white",alpha=0.5)
freq_labels = [round(x,3)
if (np.isclose(x, round(x)) or
(x < 2 and np.isclose(x*100., round(x*100))) or
(x < 0.5 and np.isclose(x*10000., round(x*10000))))
else ""
for x in list(freq)]
wave_pics[0].set_yticks(np.linspace(0, len(freq_labels)-1, len(freq_labels)))
wave_pics[0].set_yticklabels(freq_labels)
formatter = matplotlib.ticker.FuncFormatter(lambda ms, x: time.strftime('%M:%S', time.gmtime(ms // 200)))
wave_pics[0].xaxis.set_major_formatter(formatter)
wave_pics[1].grid(axis="y")
wave_pics[1].plot(power_spec,np.linspace(0,len(power_spec), len(power_spec)),"-")
plt.show()
return (power_spec, freq)
###############################################################################
# Main function
###############################################################################
def main():
"""Main entry function
"""
global args
period = 5
n_scales = 60
# Compute the global spectrum
(power_spec, freq) = calc_global_spectrum(args.wav_file, period, n_scales, args.plot)
# save spectrum and associated frequencies for further processing
output_dir = os.path.dirname(args.wav_file)
if args.output_dir is not None:
output_dir = args.output_dir
os.makedirs(output_dir, exist_ok=True)
basename = os.path.join(output_dir, os.path.splitext(os.path.basename(args.wav_file))[0])
np.savetxt(basename+".spec.txt", power_spec, fmt="%.5f", newline= " ")
np.savetxt(basename+".freqs.txt", freq, fmt="%.5f", newline= " ")
###############################################################################
# Envelopping
###############################################################################
if __name__ == '__main__':
try:
parser = argparse.ArgumentParser(description="")
# Add options
parser.add_argument("-l", "--log_file", default=None,
help="Logger file")
parser.add_argument("-o", "--output_dir", default=None, type=str,
help="The output directory (if not defined, use the same directory than the wave file)")
parser.add_argument("-P", "--plot", default=False, action="store_true",
help="Plot the results")
parser.add_argument("-v", "--verbosity", action="count", default=0,
help="increase output verbosity")
# Add arguments
parser.add_argument("wav_file", help="The input wave file")
# Parsing arguments
args = parser.parse_args()
# create logger and formatter
logger = logging.getLogger()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Verbose level => logging level
log_level = args.verbosity
if (args.verbosity >= len(LEVEL)):
log_level = len(LEVEL) - 1
logger.setLevel(log_level)
logging.warning("verbosity level is too high, I'm gonna assume you're taking the highest (%d)" % log_level)
else:
logger.setLevel(LEVEL[log_level])
# create console handler
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
# create file handler
if args.log_file is not None:
fh = logging.FileHandler(args.log_file)
logger.addHandler(fh)
# Debug time
start_time = time.time()
logger.info("start time = " + time.asctime())
# Running main function <=> run application
main()
# Debug time
logging.info("end time = " + time.asctime())
logging.info('TOTAL TIME IN MINUTES: %02.2f' %
((time.time() - start_time) / 60.0))
# Exit program
sys.exit(0)
except KeyboardInterrupt as e: # Ctrl-C
raise e
except SystemExit: # sys.exit()
pass
except Exception as e:
logging.error('ERROR, UNEXPECTED EXCEPTION')
logging.error(str(e))
traceback.print_exc(file=sys.stderr)
sys.exit(-1)
else:
print("usage: cwt_global_spectrum.py <audiofile>")
| 8,331 | 2,645 |
import sys
import math
import operator
import re
from os import listdir
from os.path import isfile, join
from PIL import Image
IMAGE_WIDTH = 355
IMAGE_HEIGHT = 355
BORDER_DISCOUNT = 0.11
BLOCK_SIZE = 36
BLOCKS_SIZE = BLOCK_SIZE * BLOCK_SIZE
X_BLOCK_SIZE = IMAGE_WIDTH / BLOCK_SIZE;
Y_BLOCK_SIZE = IMAGE_HEIGHT / BLOCK_SIZE;
NORTH_POINT = IMAGE_HEIGHT * BORDER_DISCOUNT
WEST_POINT = IMAGE_WIDTH * BORDER_DISCOUNT
SOUTH_POINT = IMAGE_HEIGHT - WEST_POINT
EAST_POINT = IMAGE_WIDTH - WEST_POINT
HALF_WIDTH = IMAGE_WIDTH / 2
HALF_HEIGHT = IMAGE_HEIGHT / 2
def getImageRegion(x, y):
if x >= WEST_POINT and x <= EAST_POINT and y >= NORTH_POINT and y <= SOUTH_POINT:
return "E"
if x <= HALF_WIDTH and y < HALF_HEIGHT:
return "A"
if x <= HALF_WIDTH and y >= HALF_HEIGHT:
return "D"
if x > HALF_WIDTH and y <= HALF_HEIGHT:
return "B"
return "C"
def convertHistogramIntoWords(histogram):
newHistogram = {}
#necessary?
for key, value in enumerate(histogram):
newHistogram[key] = value
newHistogram = sorted(newHistogram.items(), key=operator.itemgetter(1), reverse=True)
len95percent = int(len(newHistogram) * 0.95)
words = ''
count = 0
for key,value in newHistogram:
if count == len95percent:
break
words += 'x' + str(key)
++count
return words
def generateSDLCDescriptorSample(image):
lowColorImg = image.convert('P')
yLeft = 0
yRight = Y_BLOCK_SIZE
imageDescription = []
for y in range(0, BLOCK_SIZE):
xLeft = 0
xRight = X_BLOCK_SIZE
for x in range(0, BLOCK_SIZE):
imageBlock = lowColorImg.transform((BLOCK_SIZE, BLOCK_SIZE), Image.EXTENT, (xLeft, yLeft, xRight, yRight))
imageHistogram = imageBlock.histogram()
blockDescription = getImageRegion(xRight, yRight) + convertHistogramIntoWords(imageHistogram)
imageDescription.append(blockDescription)
xLeft += X_BLOCK_SIZE
xRight += X_BLOCK_SIZE
yLeft += Y_BLOCK_SIZE
yRight += Y_BLOCK_SIZE
return ' '.join(imageDescription)
def persistSample(path, sdlc):
fo = open(path, "wb")
fo.write(sdlc)
fo.close()
def main(argv):
print "Welcome to SDLC Descriptor Python implementation."
imageCollectionPath = "data/colecaoDafitiPosthaus"
queriesCollectionPath = "data/queries"
print "Verifying datafiPosthaus and query collections."
imageList = [ f for f in listdir(imageCollectionPath) if isfile(join(imageCollectionPath, f)) ]
queryImageList = [ f for f in listdir(queriesCollectionPath) if isfile(join(queriesCollectionPath, f)) and re.match(r'[0-9]+.*\.jpg', f) ]
print "Running SDLC Descriptor over DafitiPosthaus collection."
print "This will take a time, make sure to have at least 20GB of free space on disk."
for imagePath in queryImageList:
imageFile = queriesCollectionPath + "/" + imagePath;
try:
im = Image.open(imageFile)
sdlcImage = generateSDLCDescriptorSample(im)
sdlcImagePath = queriesCollectionPath + "/" + imagePath.split(".")[0] + ".sdlc"
persistSample(sdlcImagePath, sdlcImage)
except IOError:
pass
print "Finished the second collection."
print "Checkout for files with extersion .sdlc on each collection folder."
for imagePath in imageList:
imageFile = imageCollectionPath + "/" + imagePath;
try:
im = Image.open(imageFile)
sdlcImage = generateSDLCDescriptorSample(im)
sdlcImagePath = imageCollectionPath + "/" + imagePath.split(".")[0] + ".sdlc"
persistSample(sdlcImagePath, sdlcImage)
except IOError:
pass
print "Finished the first collection."
print "Running SDLC Descriptor over query collection."
if __name__ == '__main__':
main(sys.argv)
| 4,177 | 1,390 |
import tensorflow as tf
from tensorflow.contrib import learn
from betahex.features import Features
from betahex.training.common import make_train_model, make_policy_input_fn, accuracy
from betahex.models import MODEL
tf.logging.set_verbosity(tf.logging.INFO)
def main(unused_argv):
# Load training and eval data
feat = Features(13, MODEL['features'])
model_fn = make_train_model(
feat,
policy_filters=MODEL['filters'],
policy_shape=MODEL['shape'],
learning_rate=2e-3,
learn_rate_decay=.98,
optimizer="Adam",
regularization_scale=MODEL['regularization_scale']
)
config = learn.RunConfig(
save_checkpoints_steps=1000,
save_checkpoints_secs=None,
save_summary_steps=100
)
est = learn.Estimator(
model_fn=model_fn,
model_dir="data/tf/models/supervised/%s-l2e-3-d.98adam" % MODEL['name'],
config=config
)
train_in = make_policy_input_fn(feat, ["data/tf/features/train.tfrecords"], 64)
eval_in = make_policy_input_fn(feat, ["data/tf/features/eval.tfrecords"], 32)
fouls = 0
for i in range(40):
est.fit(
input_fn=train_in,
steps=2000
)
metrics = {
"accuracy":
learn.MetricSpec(
metric_fn=accuracy, prediction_key="classes")
}
eval_result = est.evaluate(input_fn=eval_in, metrics=metrics, steps=200)
if eval_result['accuracy'] < 1e-2 or eval_result['loss'] > 16:
fouls += 1
if fouls > 3:
break
if __name__ == '__main__':
tf.app.run()
| 1,649 | 573 |
# -*- coding: utf-8 -*-
"""
BSD 3-Clause License
Copyright 2021 National Technology & Engineering Solutions of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights in this software.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
##############################################################################
# Import standard Python libraries
import sys
import numpy as np
#import datetime
#from copy import deepcopy
from pathlib import Path
import pandas as pd
# Import custom libraries
import M2TUtils
import M2TFuncs
###############################################################################
###############################################################################
# Input Data Notes
# custIDInput: list of str (customers) - the list of customer IDs as strings
# transLabelsTrue: numpy array of int (1,customers) - the transformer labels for each customer as integers. This is the ground truth transformer labels
# transLabelsErrors: numpy array of int (1,customers) - the transformer labels for each customer which may contain errors.
# In the sample data, customer_3 transformer was changed from 1 to 2 and customer_53 transformer was changed from 23 to 22
# voltageInput: numpy array of float (measurements,customers) - the raw voltage AMI measurements for each customer in Volts
# pDataInput: numpy array of float (measurements, customers) - the real power measurements for each customer in Watts
# qDataInput: numpy array of float (measurements, customers) - the reactive power measurements for each customer in VAr
# Note that the indexing of all variables above should match in the customer index, i.e. custIDInput[0], transLabelsInput[0,0], voltageInput[:,0], pDataInput[:,0], and qDataInput[:,0] should all be the same customer
###############################################################################
# Load Sample data
currentDirectory = Path.cwd()
filePath = Path(currentDirectory.parent,'SampleData')
filename = Path(filePath,'VoltageData_AMI.npy')
voltageInput = np.load(filename)
filename = Path(filePath,'RealPowerData_AMI.npy')
pDataInput = np.load(filename)
filename = Path(filePath,'ReactivePowerData_AMI.npy')
qDataInput = np.load(filename)
filename = Path(filePath,'TransformerLabelsTrue_AMI.npy')
transLabelsTrue = np.load(filename)
filename = Path(filePath,'TransformerLabelsErrors_AMI.npy')
transLabelsErrors = np.load(filename)
filename = Path(filePath,'CustomerIDs_AMI.npy')
custIDInput = list(np.load(filename))
###############################################################################
###############################################################################
# Data pre-processing
# Convert the raw voltage measurements into per unit and difference (delta voltage) representation
vPU = M2TUtils.ConvertToPerUnit_Voltage(voltageInput)
vDV = M2TUtils.CalcDeltaVoltage(vPU)
##############################################################################
#
# Error Flagging Section - Correlation Coefficient Analysis
# Calculate CC Matrix
ccMatrix,noVotesIndex,noVotesIDs = M2TUtils.CC_EnsMedian(vDV,windowSize=384,custID=custIDInput)
# The function CC_EnsMedian takes the median CC across windows in the dataset.
# This is mainly done to deal with the issue of missing measurements in the dataset
# If your data does not have missing measurements you could use numpy.corrcoef directly
# Do a sweep of possible CC Thresholds and rank the flagged results
notMemberVector = [0.25,0.26,0.27,0.28,0.29,0.30,0.31,0.32,0.33,0.34,0.35,0.36,0.37,0.38,0.39,0.4,0.41,0.42,0.43,0.44,0.45,0.46,0.47,0.48,0.49,0.50,0.51,0.52,0.53,0.54,0.55,0.56,0.57,0.58,0.59,0.60,0.61,0.62,0.63,0.64,0.65,0.66,0.67,0.68,0.69,0.70,0.71,0.72,0.73,0.74,0.75,0.76,0.78,0.79,0.80,0.81,0.82,0.83,0.84,0.85,0.86,0.87,0.88,0.90,0.91]
allFlaggedTrans, allNumFlagged, rankedFlaggedTrans, rankedTransThresholds = M2TFuncs.RankFlaggingBySweepingThreshold(transLabelsErrors,notMemberVector,ccMatrix)
# Plot the number of flagged transformers for all threshold values
M2TUtils.PlotNumFlaggedTrans_ThresholdSweep(notMemberVector,allNumFlagged,transLabelsErrors,savePath=-1)
# The main output from this Error Flagging section is rankedFlaggedTrans which
# contains the list of flagged transformers ranked by correlation coefficient.
# Transformers at the beginning of the list were flagged with lower CC, indicating
# higher confidence that those transformers do indeed have errors.
##############################################################################
#
# Transformer Assignment Section - Linear Regression Steps
#
# Calculate the pairwise linear regression
r2Affinity,rDist,xDist,regRDistIndiv,regXDistIndiv,mseMatrix = M2TUtils.ParamEst_LinearRegression(voltageInput,pDataInput,qDataInput)
additiveFactor = 0.02
minMSE, mseThreshold = M2TUtils.FindMinMSE(mseMatrix,additiveFactor)
#This sets the mse threshold based on adding a small amount to the smallest MSE value in the pairwise MSE matrix
# Alternatively you could set the mse threshold manually
#mseThreshold = 0.3
# Plot CDF for adjusted reactance distance
replacementValue = np.max(np.max(xDist))
xDistAdjusted = M2TFuncs.AdjustDistFromThreshold(mseMatrix,xDist,mseThreshold, replacementValue)
# Select a particular set of ranked results using a correlation coefficient threshold
notMemberThreshold=0.5
flaggingIndex = np.where(np.array(notMemberVector)==notMemberThreshold)[0][0]
flaggedTrans = allFlaggedTrans[flaggingIndex]
predictedTransLabels,allChangedIndices,allChangedOrgTrans,allChangedPredTrans = M2TFuncs.CorrectFlaggedTransErrors(flaggedTrans,transLabelsErrors,custIDInput,ccMatrix,notMemberThreshold, mseMatrix,xDistAdjusted,reactanceThreshold=0.046)
# predictedTransLabels: numpy array of int (1,customers) - the predicted labels
# for each customer. Positive labels will be unchanged from the original
# set of transformer labels. Negative labels will be new transformer groupings
# which should be the correct groups of customers served by a particular
# transformer but will require mapping back to a particular physical transformer.
# In the sample data customer_4 was injected with an incorrect label and should now be grouped with customer_5 and customer_6
# customer_53 was also injected with an incorrect label and should now be grouped with customer_54 and customer_55
print('Meter to Transformer Pairing Algorithm Results')
M2TUtils.PrettyPrintChangedCustomers(predictedTransLabels,transLabelsErrors,custIDInput)
# This function calculates two transformer level metrics of accuracy that we have been using
# incorrectTrans is a list of incorrect transformers where incorrect means customers added or omitted to the correct grouping
# This defines Transformer Accuracy, i.e. the number of correct transformers out of the total transformers
# incorrectPairedIDs lists the customers from incorrect trans which allows us to define
# Customer Pairing Accuracy which is the number of customers in the correct groupings, i.e. no customers added or omitted from the grouping
incorrectTrans,incorrectPairedIndices, incorrectPairedIDs= M2TUtils.CalcTransPredErrors(predictedTransLabels,transLabelsTrue,custIDInput,singleCustMarker=-999)
print('')
print('Ground Truth Results')
print('Transformers with incorrect groupings:')
print(incorrectTrans)
# In the sample data, these will be empty because all customers were correctly grouped together by their service transformer.
# Write output to a csv file
df = pd.DataFrame()
df['customer ID'] = custIDInput
df['Original Transformer Labels (with errors)'] = transLabelsErrors[0,:]
df['Predicted Transformer Labels'] = predictedTransLabels[0,:]
df['Actual Transformer Labels'] = transLabelsTrue[0,:]
df.to_csv('outputs_PredictedTransformerLabels.csv')
print('Predicted transformer labels written to outputs_PredictedTransformerLabels.csv')
df = pd.DataFrame()
df['Ranked Flagged Transformers'] = flaggedTrans
df.to_csv('outputs_RankedFlaggedTransformers.csv')
print('Flagged and ranked transformers written to outputs_RankedFlaggedTransformers.csv')
| 9,747 | 3,099 |
import math
import numpy as np
import pykokkos as pk
@pk.workload
class Advance_cycle:
def __init__(self, num_part, p_pos_x, p_pos_y, p_pos_z, p_dir_y, p_dir_z, p_dir_x, p_mesh_cell, p_speed, p_time, dx, mesh_total_xsec, L, p_dist_travled, p_end_trans, rands):
self.p_pos_x: pk.View1D[pk.double] = p_pos_x
self.p_pos_y: pk.View1D[pk.double] = p_pos_y
self.p_pos_z: pk.View1D[pk.double] = p_pos_z
self.p_dir_y: pk.View1D[pk.double] = p_dir_y
self.p_dir_z: pk.View1D[pk.double] = p_dir_z
self.p_dir_x: pk.View1D[pk.double] = p_dir_x
self.p_mesh_cell: pk.View1D[int] = p_mesh_cell
self.p_speed: pk.View1D[pk.double] = p_speed
self.p_time: pk.View1D[pk.double] = p_time
self.dx: pk.double = dx
self.L: pk.double = L
#print(dx)
#print(L)
self.num_part: int = num_part
self.mesh_total_xsec: pk.View1D[pk.double] = mesh_total_xsec
self.p_dist_travled: pk.View1D[pk.double] = p_dist_travled
self.p_end_trans: pk.View1D[int] = p_end_trans
self.rands: pk.View1D[pk.double] = rands
@pk.main
def run(self):
pk.parallel_for(self.num_part, self.advanceCycle_wu)
@pk.workunit
def advanceCycle_wu(self, i: int):
kicker: pk.double = 1e-8
if (self.p_end_trans[i] == 0):
if (self.p_pos_x[i] < 0): #exited rhs
self.p_end_trans[i] = 1
elif (self.p_pos_x[i] >= self.L): #exited lhs
self.p_end_trans[i] = 1
else:
dist: pk.double = -math.log(self.rands[i]) / self.mesh_total_xsec[self.p_mesh_cell[i]]
#pk.printf('%d %f %f %f\n', i, dist, rands[i], mesh_total_xsec[p_mesh_cell[i]])
#p_dist_travled[i] = dist
x_loc: pk.double = (self.p_dir_x[i] * dist) + self.p_pos_x[i]
LB: pk.double = self.p_mesh_cell[i] * self.dx
RB: pk.double = LB + self.dx
if (x_loc < LB): #move partilce into cell at left
self.p_dist_travled[i] = (LB - self.p_pos_x[i])/self.p_dir_x[i] + kicker
self.p_mesh_cell[i] -= 1
elif (x_loc > RB): #move particle into cell at right
self.p_dist_travled[i] = (RB - self.p_pos_x[i])/self.p_dir_x[i] + kicker
self.p_mesh_cell[i] += 1
else: #move particle in cell
self.p_dist_travled[i] = dist
self.p_end_trans[i] = 1
#pk.printf('%d: x pos before step %f\n', i, p_pos_x[i])
self.p_pos_x[i] = self.p_dir_x[i]*self.p_dist_travled[i] + self.p_pos_x[i]
self.p_pos_y[i] = self.p_dir_y[i]*self.p_dist_travled[i] + self.p_pos_y[i]
self.p_pos_z[i] = self.p_dir_z[i]*self.p_dist_travled[i] + self.p_pos_z[i]
#pk.printf('%d: x pos after step: %f should be: %f\n', i, p_pos_x[i], (temp_x))
self.p_time[i] += dist/self.p_speed[i]
@pk.workload
class DistTraveled:
def __init__(self, num_part, max_mesh_index, mesh_dist_traveled_pk, mesh_dist_traveled_squared_pk, p_dist_travled, mesh, p_end_trans, clever_out):
self.num_part: int = num_part
self.max_mesh_index: int = max_mesh_index
self.mesh_dist_traveled_pk: pk.View1D[pk.double] = mesh_dist_traveled_pk
self.mesh_dist_traveled_squared_pk: pk.View1D[pk.double] = mesh_dist_traveled_squared_pk
self.p_dist_travled: pk.View1D[pk.double] = p_dist_travled
self.mesh: pk.View1D[int] = mesh
self.p_end_trans: pk.View1D[int] = p_end_trans
self.clever_out: pk.View1D[int] = clever_out
@pk.main
def distTraveled_main(self):
end_flag: int = 1
cur_cell: int = 0
summer: int = 0
#pk.printf('1 %d\n', cur_cell)
#pk.printf('3 %f\n', mesh_dist_traveled_pk[cur_cell])
for i in range(self.num_part):
cur_cell = int(self.mesh[i])
if (0 < cur_cell) and (cur_cell < self.max_mesh_index):
self.mesh_dist_traveled_pk[cur_cell] += self.p_dist_travled[i]
self.mesh_dist_traveled_squared_pk[cur_cell] += self.p_dist_travled[i]**2
if self.p_end_trans[i] == 0:
end_flag = 0
summer += p_end_trans[i]
clever_out[0] = end_flag
clever_out[1] = summer
#@pk.workunit
#def CellSum
# for i in range(num_parts)
#@profile
def Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time,
num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, L):
max_mesh_index = int(len(mesh_total_xsec)-1)
p_end_trans: pk.View1D[int] = pk.View([num_part], int) #flag
p_end_trans.fill(0)
p_dist_travled: pk.View1D[pk.double] = pk.View([num_part], pk.double)
clever_out: pk.View1D[int] = pk.View([4], int)
end_flag = 0
cycle_count = 0
while end_flag == 0:
#allocate randoms
summer = 0
rands_np = np.random.random([num_part])
rands = pk.from_numpy(rands_np)
#vector of indicies for particle transport
p = pk.RangePolicy(pk.get_default_space(), 0, num_part)
p_dist_travled.fill(0)
pre_p_mesh = p_mesh_cell
L = float(L)
#space = pk.ExecutionSpace.OpenMP
pk.execute(pk.ExecutionSpace.OpenMP, Advance_cycle(num_part, p_pos_x, p_pos_y, p_pos_z, p_dir_y, p_dir_z, p_dir_x, p_mesh_cell, p_speed, p_time, dx, mesh_total_xsec, L, p_dist_travled, p_end_trans, rands))#pk for number still in transport
pk.execute(pk.ExecutionSpace.OpenMP,
DistTraveled(num_part, max_mesh_index, mesh_dist_traveled, mesh_dist_traveled_squared, p_dist_travled, pre_p_mesh, p_end_trans, clever_out))
end_flag = clever_out[0]
summer = clever_out[1]
#print(cycle_count)
if (cycle_count > int(1e3)):
print("************ERROR**********")
print(" Max itter hit")
print(p_end_trans)
print()
print()
return()
cycle_count += 1
print("Advance Complete:......{1}% ".format(cycle_count, int(100*summer/num_part)), end = "\r")
print()
@pk.workload
class StillIn:
def __init__(self, p_pos_x, surface_distances, p_alive, num_part, clever_out):
self.p_pos_x: pk.View1D[pk.double] = p_pos_x
self.clever_out: pk.View1D[int] = clever_out
self.surface_distances: pk.View1D[pk.double] = surface_distances
self.p_alive: pk.View1D[int] = p_alive
self.num_part: int = num_part
@pk.main
def run(self):
tally_left: int = 0
tally_right: int = 0
for i in range(self.num_part):
#exit at left
if self.p_pos_x[i] <= 0:
tally_left += 1
self.p_alive[i] = 0
elif self.p_pos_x[i] >= 1:
tally_right += 1
self.p_alive[i] = 0
self.clever_out[0] = tally_left
self.clever_out[1] = tally_right
def speedTestAdvance():
# Position
num_part = int(1e8)
phase_parts = num_parts
p_pos_x_np = np.zeros(phase_parts, dtype=float)
p_pos_y_np = np.zeros(phase_parts, dtype=float)
p_pos_z_np = np.zeros(phase_parts, dtype=float)
p_pos_x = pk.from_numpy(p_pos_x_np)
p_pos_y = pk.from_numpy(p_pos_y_np)
p_pos_z = pk.from_numpy(p_pos_z_np)
# Direction
p_dir_x_np = np.zeros(phase_parts, dtype=float)
p_dir_y_np = np.zeros(phase_parts, dtype=float)
p_dir_z_np = np.zeros(phase_parts, dtype=float)
p_dir_x = pk.from_numpy(p_dir_x_np)
p_dir_y = pk.from_numpy(p_dir_y_np)
p_dir_z = pk.from_numpy(p_dir_z_np)
# Speed
p_speed_np = np.zeros(phase_parts, dtype=float)
p_speed = pk.from_numpy(p_speed_np)
# Time
p_time_np = np.zeros(phase_parts, dtype=float)
p_time = pk.from_numpy(p_time_np)
# Region
p_mesh_cell_np = np.zeros(phase_parts, dtype=np.int32)
p_mesh_cell = pk.from_numpy(p_mesh_cell_np)
# Flags
p_alive_np = np.full(phase_parts, False, dtype=np.int32)
p_alive = pk.from_numpy(p_alive_np)
kernels.Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, surface_distances[len(surface_distances)-1])
"""
def test_Advance():
L = 1
dx = .25
N_m = 4
num_part = 6
p_pos_x = np.array([-.01, 0, .1544, .2257, .75, 1.1])
p_pos_y = 2.1*np.ones(num_part)
p_pos_z = 3.4*np.ones(num_part)
p_mesh_cell = np.array([-1, 0, 0, 1, 3, 4], dtype=int)
p_dir_x = np.ones(num_part)
p_dir_x[0] = -1
p_dir_y = np.zeros(num_part)
p_dir_z = np.zeros(num_part)
p_speed = np.ones(num_part)
p_time = np.zeros(num_part)
p_alive = np.ones(num_part, bool)
p_alive[5] = False
particle_speed = 1
mesh_total_xsec = np.array([0.1,1,.1,100])
mesh_dist_traveled_squared = np.zeros(N_m)
mesh_dist_traveled = np.zeros(N_m)
[p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, mesh_dist_traveled, mesh_dist_traveled_squared] = Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, L)
assert (np.sum(mesh_dist_traveled) > 0)
assert (np.sum(mesh_dist_traveled_squared) > 0)
assert (p_pos_x[0] == -.01)
assert (p_pos_x[5] == 1.1)
assert (p_pos_x[1:4].all() > .75)
"""
def test_StillIn():
num_part = 7
surface_distances = [0,.25,.75,1]
p_pos_x = np.array([-.01, 0, .1544, .2257, .75, 1.1, 1])
p_alive = np.ones(num_part, bool)
[p_alive, tally_left, tally_right] = StillIn(p_pos_x, surface_distances, p_alive, num_part)
assert(p_alive[0] == False)
assert(p_alive[5] == False)
assert(tally_left == 2)
assert(tally_right == 2)
assert(p_alive[2:4].all() == True)
if __name__ == '__main__':
speedTestAdvance()
| 10,726 | 4,538 |
from django.conf import settings
from ..defaults import AUTHENTICATORS
from ..utils import get_module
authenticators = []
all_args = []
def collect_authenticators():
global authenticators, all_args
authenticators = list(map(get_module, getattr(settings, 'GRAHENE_ADDONS_AUTHENTICATORS', AUTHENTICATORS)))
for authenticator in authenticators:
authenticator.args = list(map(str.lower, authenticator.args))
all_args = [arg for authenticator in authenticators for arg in authenticator.args]
| 514 | 152 |
import face_recognition as fr
def compare_faces(file1, file2):
"""
Compare two images and return True / False for matching.
"""
# Load the jpg files into numpy arrays
image1 = fr.load_image_file(file1)
image2 = fr.load_image_file(file2)
# Get the face encodings for each face in each image file
# Assume there is only 1 face in each image, so get 1st face of an image.
image1_encoding = fr.face_encodings(image1)[0]
image2_encoding = fr.face_encodings(image2)[0]
# results is an array of True/False telling if the unknown face matched anyone in the known_faces array
results = fr.compare_faces([image1_encoding], image2_encoding)
return results[0]
# Each face is tuple of (Name,sample image)
known_faces = [('Stark','sample_images/stark.jpg'),
('Hannah','sample_images/hannah.jpg'),
]
def face_rec(file):
"""
Return name for a known face, otherwise return 'Uknown'.
"""
for name, known_file in known_faces:
if compare_faces(known_file,file):
return name
return 'Unknown'
| 1,123 | 345 |
import numpy as np
import pandas as pd
import pyproj as pyproj
import shapely as shapely
from shapely.geometry import Point
from haversine import haversine
import src.config.column_names as col_names
def correlate_and_save(crime_df: pd.DataFrame,
census_df: pd.DataFrame,
file_name: str,
correlation_mode: str):
# Using geolocation information in census dataset, add zipcode column to the crime dataset
if correlation_mode == 'euclidean':
add_zip_code_column_using_euclidean(crime_df, census_df)
else:
add_zip_code_column_using_haversine(crime_df, census_df)
# Save
crime_df.to_csv(file_name, index=False)
# Finds nearest zipcode geolocation to the crime location, then adds this zipcode to the crime dataset
def add_zip_code_column_using_haversine(crime_df: pd.DataFrame, census_df: pd.DataFrame):
crime_df[col_names.ZIP_CODE] = np.nan
for index, row in crime_df.iterrows():
nearest_zip_code = np.nan
nearest_zip_code_distance = -1
for census_index, census_row in census_df.iterrows():
distance = haversine((row[col_names.LATITUDE], row[col_names.LONGITUDE]),
(census_row[col_names.LATITUDE], census_row[col_names.LONGITUDE]))
if nearest_zip_code_distance == -1 or distance < nearest_zip_code_distance:
nearest_zip_code = census_row[col_names.ZIP_CODE]
nearest_zip_code_distance = distance
crime_df.loc[index, col_names.ZIP_CODE] = nearest_zip_code
def add_zip_code_column_using_euclidean(crime_df: pd.DataFrame, census_df: pd.DataFrame):
crime_df[col_names.ZIP_CODE] = np.nan
wgs84_proj = pyproj.CRS('EPSG:4326')
los_angeles_proj = pyproj.CRS('EPSG:6423')
project_los_angeles = pyproj.Transformer.from_crs(wgs84_proj, los_angeles_proj, always_xy=True).transform
census_list = list()
for census_index, census_row in census_df.iterrows():
point2_transformed = shapely.ops.transform(project_los_angeles,
Point(census_row[col_names.LATITUDE], census_row[col_names.LONGITUDE]))
census_list.append((census_row, point2_transformed))
for index, row in crime_df.iterrows():
nearest_zip_code = np.nan
nearest_zip_code_distance = -1
point1_transformed = shapely.ops.transform(project_los_angeles, Point(row[col_names.LATITUDE], row[col_names.LONGITUDE]))
for census_data in census_list:
distance = point1_transformed.distance(census_data[1])
if nearest_zip_code_distance == -1 or distance < nearest_zip_code_distance:
nearest_zip_code = census_data[0][col_names.ZIP_CODE]
nearest_zip_code_distance = distance
crime_df.loc[index, col_names.ZIP_CODE] = nearest_zip_code
def merge_crime_and_census(crime_df: pd.DataFrame, census_df: pd.DataFrame, file_name: str):
merged_df = crime_df.merge(census_df, on=col_names.ZIP_CODE, how='inner')
redundant_columns = ['Latitude_x', 'Latitude_y', 'Longitude_x', 'Longitude_y',
col_names.TOTAL_HOUSEHOLDS, col_names.AVERAGE_HOUSEHOLD_SIZE]
merged_df.drop(columns=redundant_columns, inplace=True)
# Save
merged_df.to_csv(file_name, index=False)
| 3,354 | 1,116 |
def increment(*numbers):
print(numbers)
def dance(*numbers):
total = 1
for number in numbers:
total *= number
return total
increment(3,4,5,6)
print(dance(1,2,3,4,5,6))
| 197 | 76 |
from flask_restful import Resource
from flask_restful import reqparse
from sqlalchemy.exc import SQLAlchemyError
from config import Configuration
from models.tag import TagModel as TM
class Tag(Resource):
parser = reqparse.RequestParser()
parser.add_argument('name',
type=str,
required=True,
help='This field cannot be blank.')
def post(self):
data = Tag.parser.parse_args()
name = data['name']
if len(name) > Configuration.MAX_TAG_NAME_SIZE:
return {'message': 'A name\'s length is more than {}'.format(Configuration.MAX_TAG_NAME_SIZE)}
if TM.query.filter(TM.name == name).first():
return {'message': 'Tag \'{}\' already exists'.format(name)}
tag = TM(name=name)
try:
tag.save_to_db()
except SQLAlchemyError as e:
err = str(e.__class__.__name__)
return {'message': '{}'.format(err)}, 500
return tag.get_json(), 201
def put(self):
data = Tag.parser.parse_args()
name = data['name']
if len(name) > Configuration.MAX_TAG_NAME_SIZE:
return {'message': 'A tag\'s length is more than {}'.format(Configuration.MAX_TAG_NAME_SIZE)}
tag = TM.find_by_name(name)
if not tag:
tag = TM(name=name)
else:
if not TM.query.filter(TM.name == name).first():
tag.name = name
else:
return {'message': 'Tag name \'{}\' already exists'.format(data['name'])}
try:
tag.save_to_db()
except SQLAlchemyError as e:
err = str(e.__class__.__name__)
return {'message': '{}'.format(err)}, 500
return tag.get_json(), 201
def delete(self):
data = Tag.parser.parse_args()
name = data['name']
tag = TM.find_by_name(name)
if tag:
try:
tag.delete_from_db()
except SQLAlchemyError as e:
err = str(e.__class__.__name__)
return {'message': '{}'.format(err)}, 500
return {'message': 'Tag was deleted'}
return {'message': 'Tag with name: \'{}\' was not found'.format(name)}
class TagList(Resource):
def get(self):
return {'tags': [tag.get_json() for tag in TM.query.all()]}
| 2,386 | 718 |
# Generated by Django 3.0.7 on 2020-06-06 16:06
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Comments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.IntegerField()),
('post_id', models.IntegerField()),
('user_name', models.CharField(max_length=50)),
('comment', models.CharField(max_length=1000)),
('user_avatar', models.CharField(max_length=250)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Likes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.IntegerField()),
('post_id', models.IntegerField()),
('liked', models.BooleanField()),
('unliked', models.BooleanField()),
('liked_by', models.CharField(blank=True, max_length=50)),
('unliked_by', models.CharField(blank=True, max_length=50)),
],
),
migrations.CreateModel(
name='UploadTorrents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('torrent_name', models.CharField(max_length=50)),
('torrent_description', models.CharField(max_length=250)),
('torrent_image', models.ImageField(null=True, upload_to='static/torrent_images/')),
('torrent_file', models.FileField(upload_to='static/torrent-files/')),
('likes', models.IntegerField(default=False)),
('unlikes', models.IntegerField(default=False)),
('uploader_name', models.CharField(max_length=50)),
('user_id', models.IntegerField(default=False)),
],
),
]
| 2,198 | 610 |
"""
结合所有的 Manager,实现text-in text-out的交互式 agent 的接口
"""
import os
import sys
import time
import argparse
import logging
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '../..'))
os.environ["CUDA_VISIBLE_DEVICES"]="1"
from DM.DST.StateTracking import DialogStateTracker
from DM.policy.RuleMapping import RulePolicy
from data.DataManager import DataManager
from NLU.NLUManager import NLUManager
from NLG.NLGManager import rule_based_NLG
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--print', type=bool, default=True, help='print details')
FLAGS= parser.parse_args()
UserPersonal = {
"已购业务": ["180元档幸福流量年包", "18元4G飞享套餐升级版"], # 这里应该是完整的业务的信息dict
"套餐使用情况": "剩余流量 11.10 GB,剩余通话 0 分钟,话费余额 110.20 元,本月已产生话费 247.29 元",
"号码": "18811369685",
"归属地" : "北京",
"品牌": "动感地带",
"是否转品牌过渡期": "否",
"话费查询": "话费余额 110.20 元",
"流量查询": "剩余流量 11.10 GB",
"订购时间": "订购时间 2017-04-04, 生效时间 2017-05-01",
"是否停机": "否",
"话费充值": "请登录网上营业厅、微厅或 APP 充值",
"流量充值": "请登录网上营业厅、微厅或 APP 充值",
"账单查询": "请登录网上营业厅、微厅或 APP 查询"
}
NLU_save_path_dict = {
'domain': os.path.join(BASE_DIR, 'NLU/DomDect/model/ckpt'),
'useract': os.path.join(BASE_DIR, 'NLU/UserAct/model/ckpt'),
'slotfilling': os.path.join(BASE_DIR, 'NLU/SlotFilling/model/ckpt'),
'entity': os.path.join(BASE_DIR, 'NLU/ER/entity_list.txt'),
'sentiment': os.path.join(BASE_DIR, 'NLU/SentiDect')
}
class DialogAgent:
def __init__(self):
self.history_savedir = None
self.detail_savedir = None
self.logger = None
self.user = self.create_user()
self.rule_policy = RulePolicy()
self.dst = DialogStateTracker(UserPersonal, FLAGS.print, self.logger)
self.data_manager = DataManager(os.path.join(BASE_DIR, 'data/tmp'))
self.nlu_manager = NLUManager(NLU_save_path_dict)
# self.nlg_template = NLG_template
self.turn_num = 1
self.dialog_history = []
def create_user(self):
user_name = input("请输入您的用户名:")
user_path = os.path.join(BASE_DIR, 'user', user_name)
log_path = os.path.join(user_path, 'log')
if not os.path.exists(user_path):
os.mkdir(user_path)
os.mkdir(log_path)
self.history_savedir = user_path + '/dialogs.txt'
log_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
self.detail_savedir = log_path +'/' + log_name + '.log'
self.logger = self.create_logger(self.detail_savedir)
return user_name
def create_logger(self, logdir):
fmt = '%(message)s'
# datefmt = "%y-%m-%d %H:%M:%S"
logging.basicConfig(level=logging.INFO,
format=fmt)
# datefmt=datefmt)
logger = logging.getLogger('mylogger')
logger.setLevel(logging.INFO)
fh = logging.FileHandler(logdir)
fh.setLevel(logging.INFO)
logger.addHandler(fh)
return logger
def run(self):
if FLAGS.print:
self.logger.info('对话记录时间:'+time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime()))
try:
while True:
user_utter = input("用户输入:")
if FLAGS.print:
with open(self.detail_savedir, 'a') as f:
f.write('-------------- Turn ' + str(self.turn_num) + '--------------\n')
f.write('用户:' + user_utter + '\n')
self.dialog_history.append('用户:' + user_utter)
if user_utter in ['restart' , '重来' , '重新开始']:
self.dst = DialogStateTracker(UserPersonal, FLAGS.print, self.logger)
self.rule_policy = RulePolicy()
if FLAGS.print:
self.logger.info('对话状态已重置')
else:
print('对话状态已重置')
continue
if '再见' in user_utter or '结束' in user_utter or '谢谢' in user_utter:
self.close()
break
nlu_results = self.nlu_manager.get_NLU_results(user_utter, self.data_manager)
self.dst.update(nlu_results, self.rule_policy, self.data_manager)
reply = rule_based_NLG(self.dst)
if FLAGS.print:
self.logger.info('系统:' + reply + '\n')
else:
print('系统:', reply, '\n')
self.dialog_history.append('系统:' + reply)
self.turn_num += 1
except KeyboardInterrupt:
self.close()
def close(self):
self.nlu_manager.close()
reply = '感谢您的使用,再见!'
if FLAGS.print:
self.logger.info('系统:' + reply + '\n')
else:
print('系统:', reply, '\n')
with open(os.path.join(BASE_DIR, self.history_savedir), 'a') as f:
f.write('对话记录时间:')
f.write(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())+'\n\n')
for dialog in self.dialog_history:
dialog = '\n'.join(dialog.split())
f.write(dialog+'\n\n')
f.write('系统:感谢您的使用,再见!\n')
f.write('————————————————————————————————\n')
if __name__ == '__main__':
agent = DialogAgent()
agent.run()
| 5,493 | 2,177 |
# -*- coding: utf-8 -*-
from __future__ import print_function
""" CLU’s Custom exception classes live here """
class BadDotpathWarning(Warning):
""" Conversion from a path to a dotpath resulted in a bad dotpath
… likely there are invalid characters like dashes in there.
"""
pass
class CDBError(Exception):
""" A problem with a compilation database """
pass
class ConfigurationError(Exception):
""" An error that occurred in the course of macro configuration """
pass
class ExecutionError(Exception):
""" An error during the execution of a shell command """
pass
class ExportError(NameError):
""" An error during the preparation of an item for export """
pass
class ExportWarning(Warning):
""" A non-life-threatening condition that occured during an export """
pass
class FilesystemError(Exception):
""" An error that occurred while mucking about with the filesystem """
pass
class KeyValueError(ValueError):
""" An error raised in the clu.keyvalue API """
pass
class Nondeterminism(Exception):
""" An error indicating a “heisenbug” –
a nondeterministic problem.
"""
class UnusedValueWarning(Warning):
""" A warning issued when an AppDirs instance is initialized using
a value or values that aren’t currently utilized on the platform
upon which we are currently running
"""
pass
__all__ = ('BadDotpathWarning',
'CDBError',
'ConfigurationError',
'ExecutionError', 'FilesystemError',
'ExportError', 'ExportWarning',
'KeyValueError',
'Nondeterminism',
'UnusedValueWarning')
__dir__ = lambda: list(__all__) | 1,712 | 453 |
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Prediction of likely occurance of Vehicle Accident',
author='sea karki',
license='',
)
| 231 | 74 |
#!/usr/bin/env python
import sys, time, os
sys.path.append('../src')
import numpy as np
import matplotlib.pylab as plt
import hla
from hla import caget, caput
def compare(o1, o2):
print o1
print o2
npoint, nbpm, ntrim = np.shape(o1._rawmatrix)
print "checking bpm"
for i in range(nbpm):
# bpm name
if o1.bpm[i][0] != o2.bpm[i][0] or \
o1.bpm[i][1] != o2.bpm[i][1] or \
o1.bpm[i][2] != o2.bpm[i][2]:
print i, o1.bpm[i], o2.bpm[i]
print "checking trim"
for i in range(ntrim):
# bpm name
if o1.trim[i][0] != o2.trim[i][0] or \
o1.trim[i][1] != o2.trim[i][1] or \
o1.trim[i][2] != o2.trim[i][2]:
print i, o1.trim[i], o2.trim[i]
print "checking m"
ndiff = 0
for i in range(nbpm):
#if ndiff > 20: break
for j in range(ntrim):
if o1._mask[i,j] and o2._mask[i,j]: continue
if o1.bpm[i][1] != o1.trim[j][1]: continue
if o1._mask[i,j] or o2._mask[i,j]:
print "skip", i,j,o1.bpm[i][0], o1.trim[j][0]
continue
if abs(o1.m[i,j]) < 5e-3: continue
if o2._mask[i,j]: continue
if abs((o1.m[i,j] - o2.m[i,j])/o2.m[i,j]) > .05:
print i, j, o1.bpm[i][0], o1.trim[j][0], \
o1.m[i,j], o2.m[i,j]
plt.clf()
plt.plot(o1._rawkick[j,1:-1], o1._rawmatrix[1:-1,i,j], '-o')
plt.plot(o2._rawkick[j,1:-1], o2._rawmatrix[1:-1,i,j], '-x')
plt.title("%s/%s (%s/%s) %.2e %.2e" % (
o1.bpm[i][0], o1.trim[j][0], o1.bpm[i][1],
o1.trim[j][1], o1.m[i,j], o2.m[i,j]))
plt.savefig('orm-compare-t%04d-b%04d.png' % (j,i))
ndiff = ndiff + 1
def filter_orm(f):
orm = hla.measorm.Orm([], [])
orm.load(f)
orm.checkLinearity()
def merge_orm(f1, f2):
orm1 = hla.measorm.Orm([], [])
orm2 = hla.measorm.Orm([], [])
orm1.load(f1)
orm2.load(f2)
compare(orm1, orm2)
#orm1.update(orm2)
#orm1.save("orm.pkl")
def mask_orm(f, sk):
orm = hla.measorm.Orm([], [])
orm.load(f)
for j in sk: orm._mask[:,j] = 1
orm.save(f)
def test_orbit(f):
orm2 = hla.measorm.Orm([], [])
if not os.path.exists(f): return True
orm2.load(f)
print orm2
print "delay: ", orm2.TSLEEP
npoint, nbpm, ntrim = np.shape(orm2._rawmatrix)
for i in range(5):
#itrim = np.random.randint(ntrim)
itrim = 0
while True:
ibpm = np.random.randint(nbpm)
if orm2.trim[itrim][1] == orm2.bpm[ibpm][1]: break
#print hla.getOrbit()
bpmrb = orm2.bpm[ibpm][2]
trimsp = orm2.trim[itrim][3]
x0 = caget(bpmrb)
k = caget(trimsp)
dk = 1e-4
caput(trimsp, k + dk)
time.sleep(orm2.TSLEEP)
dx = orm2.m[ibpm,itrim]*dk
x1 = caget(bpmrb)
print trimsp, "% .2e" % k, bpmrb, \
"% .4e % .4e % .4e % .4e" % (x0, x1, x1-x0, dx)
caput(trimsp, k)
time.sleep(orm2.TSLEEP)
plt.clf()
plt.plot(orm2._rawkick[itrim, 1:-1], orm2._rawmatrix[1:-1,ibpm,itrim],
'-o')
plt.plot([k, k+dk], [x0, x1], '-x')
plt.grid(True)
plt.title("%s.%s/%s.%s" % (orm2.bpm[ibpm][0], orm2.bpm[ibpm][1],
orm2.trim[itrim][0], orm2.trim[itrim][1]))
plt.savefig('orm-test-%03d.png' % i)
return True
plt.clf()
plt.plot(ratio, '-o')
plt.ylabel("[(x1-x0)-dx]/x0")
plt.savefig("orm-orbit-reproduce-1.png")
plt.clf()
plt.plot(x0, '--', label='orbit 0')
plt.plot(x1, '-', label='orbit 1')
plt.plot(x0+dx, 'x', label='ORM predict')
plt.ylabel("orbit")
plt.savefig("orm-orbit-reproduce-2.png")
for i in range(len(bpm)):
if x0[i]+dx[i] - x1[i] > 1e-4:
print "Not agree well:", i,bpm[i], x0[i]+dx[i], x1[i]
print "Done", time.time()
def update_orm(f):
orm = hla.measorm.Orm([], [])
if not os.path.exists(f): return True
orm.load(f)
npoint, nbpm, ntrim = np.shape(orm._rawmatrix)
bpm = list(set([b[0] for i,b in enumerate(orm.bpm)]))
#orm.measure_update(bpm=bpm, trim = ['CXHG2C30A', 'CXHG2C06A', 'CXHG2C10A', 'CXL1G6C12B', 'CXHG2C14A', 'CXHG2C18A', 'CXHG2C20A', 'CXHG2C22A', 'CXHG2C24A', 'CXHG2C26A', 'FYL1G1C21A'], verbose=1)
orm.checkLinearity(verbose=1)
#orm.save("orm-full-update.pkl")
def correct_orbit(f):
orm = hla.orm.Orm([], [])
if not os.path.exists(f): return True
orm.load(f)
hla.reset_trims()
time.sleep(5)
npoint, nbpm, ntrim = np.shape(orm._rawmatrix)
bpm = []
for c in range(10):
cc = "C%02d" % c
b = hla.getGroupMembers(['*', 'BPMX', cc], op='intersection')
print cc,b
bpm.extend(b)
print len(bpm), bpm
ntrim_used = ntrim
m = np.zeros((len(bpm), ntrim_used), 'd')
sel = [0] * nbpm
bpmpv = []
bpmfullpv = [b[2] for i,b in enumerate(orm.bpm)]
for i,b in enumerate(orm.bpm):
if not b[0] in bpm: continue
sel[i] = 1
bpmpv.append(b[2])
v0 = np.array(hla.caget(bpmfullpv))
v = np.array(hla.caget(bpmpv))
plt.clf()
plt.plot(v0, 'r-')
plt.plot(v, 'ro')
plt.savefig("orbit-00.png")
m = np.compress(sel, orm.m[:,:ntrim_used], axis=0)
print np.shape(m)
#u,s,vh = np.linalg.svd(m)
#print u, s, vh
dk, resids, rank, s = np.linalg.lstsq(m, -1.0*v)
print dk
trimpv = [t[3] for i,t in enumerate(orm.trim)]
print [t[0] for i,t in enumerate(orm.trim)]
hla.caput(trimpv[:ntrim_used], dk)
for i in range(1,10):
time.sleep(2)
plt.clf()
v1 = np.array(hla.caget(bpmfullpv))
plt.plot(v1, '-o')
plt.savefig("orbit-%02d.png" % i)
if __name__ == "__main__":
#filter_orm('../test/dat/orm-full-0179.pkl')
#filter_orm('../test/dat/orm-full-0181.pkl')
#filter_orm('../test/dat/orm-full.pkl')
#filter_orm('../test/dat/orm-sub1.pkl')
#mask_orm('../test/dat/orm-full-0179.pkl', [52, 90, 141, 226, 317, 413])
#merge_orm('../test/dat/orm-full-0179.pkl',
# '../test/dat/orm-full-0181.pkl')
#merge_orm('../test/dat/orm-full-0181.pkl', 'orm.pkl')
#test_orbit('../test/dat/orm-full-0181.pkl')
#update_orm('../test/dat/orm-full-0184.pkl')
#update_orm('orm-full-update.pkl')
correct_orbit('orm-full-update.pkl')
| 6,543 | 3,025 |
import numpy as np
import cv2
#sample function - dummy callback function
def nothing(x):
pass
#capture live video
cap = cv2.VideoCapture(0)
#window for trackbars
cv2.namedWindow('Track')
#defining trackbars to control HSV values of given video stream
cv2.createTrackbar('L_HUE', 'Track', 0, 255, nothing)
cv2.createTrackbar('L_Sat', 'Track', 0, 255, nothing)
cv2.createTrackbar('L_Val', 'Track', 0, 255, nothing)
cv2.createTrackbar('H_HUE', 'Track', 255, 255, nothing)
cv2.createTrackbar('H_Sat', 'Track', 255, 255, nothing)
cv2.createTrackbar('H_Val', 'Track', 255, 255, nothing)
while cap.isOpened()==True :
#read the video feed
_, frame = cap.read()
cv2.imshow('Actual_Feed', frame)
#get current trackbar positions for every frame
l_hue = cv2.getTrackbarPos('L_HUE', 'Track')
l_sat = cv2.getTrackbarPos('L_Sat', 'Track')
l_val = cv2.getTrackbarPos('L_Val', 'Track')
h_hue = cv2.getTrackbarPos('H_HUE', 'Track')
h_sat = cv2.getTrackbarPos('H_Sat', 'Track')
h_val = cv2.getTrackbarPos('H_Val', 'Track')
#convert the captured frame into HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) #Hue(0), Saturation(1) and Value(2)
# print(hsv.shape)
#trim video feed HSV to a range
lower_bound = np.array([l_hue, l_sat, l_val])
upper_bound = np.array([h_hue, h_sat, h_val])
mask = cv2.inRange(hsv, lower_bound,upper_bound )
frame = cv2.bitwise_and(frame, frame,mask=mask)
cv2.imshow('frame', frame)
cv2.imshow('mask', mask)
# cv2.imshow('converted', hsv)
key = cv2.waitKey(1)
if key==27:
break
cap.release()
cv2.destroyAllWindows() | 1,698 | 714 |
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from typing import Tuple
from absl import flags
from absl.testing import absltest
import grpc
from framework import xds_url_map_testcase
from framework.test_app import client_app
# Type aliases
HostRule = xds_url_map_testcase.HostRule
PathMatcher = xds_url_map_testcase.PathMatcher
GcpResourceManager = xds_url_map_testcase.GcpResourceManager
DumpedXdsConfig = xds_url_map_testcase.DumpedXdsConfig
RpcTypeUnaryCall = xds_url_map_testcase.RpcTypeUnaryCall
RpcTypeEmptyCall = xds_url_map_testcase.RpcTypeEmptyCall
XdsTestClient = client_app.XdsTestClient
ExpectedResult = xds_url_map_testcase.ExpectedResult
logger = logging.getLogger(__name__)
flags.adopt_module_key_flags(xds_url_map_testcase)
# The first batch of RPCs don't count towards the result of test case. They are
# meant to prove the communication between driver and client is fine.
_NUM_RPCS = 10
_LENGTH_OF_RPC_SENDING_SEC = 16
# We are using sleep to synchronize test driver and the client... Even though
# the client is sending at QPS rate, we can't assert that exactly QPS *
# SLEEP_DURATION number of RPC is finished. The final completed RPC might be
# slightly more or less.
_NON_RANDOM_ERROR_TOLERANCE = 0.01
# For random generator involved test cases, we want to be more loose about the
# final result. Otherwise, we will need more test duration (sleep duration) and
# more accurate communication mechanism. The accurate of random number
# generation is not the intention of this test.
_ERROR_TOLERANCE = 0.2
_DELAY_CASE_APPLICATION_TIMEOUT_SEC = 1
_BACKLOG_WAIT_TIME_SEC = 20
def _build_fault_injection_route_rule(abort_percentage: int = 0,
delay_percentage: int = 0):
return {
'priority': 0,
'matchRules': [{
'fullPathMatch': '/grpc.testing.TestService/UnaryCall'
}],
'service': GcpResourceManager().default_backend_service(),
'routeAction': {
'faultInjectionPolicy': {
'abort': {
'httpStatus': 401,
'percentage': abort_percentage,
},
'delay': {
'fixedDelay': {
'seconds': '20'
},
'percentage': delay_percentage,
}
}
},
}
def _wait_until_backlog_cleared(test_client: XdsTestClient,
timeout: int = _BACKLOG_WAIT_TIME_SEC):
""" Wait until the completed RPC is close to started RPC.
For delay injected test cases, there might be a backlog of RPCs due to slow
initialization of the client. E.g., if initialization took 20s and qps is
25, then there will be a backlog of 500 RPCs. In normal test cases, this is
fine, because RPCs will fail immediately. But for delay injected test cases,
the RPC might linger much longer and affect the stability of test results.
"""
logger.info('Waiting for RPC backlog to clear for %d seconds', timeout)
deadline = time.time() + timeout
while time.time() < deadline:
stats = test_client.get_load_balancer_accumulated_stats()
ok = True
for rpc_type in [RpcTypeUnaryCall, RpcTypeEmptyCall]:
started = stats.num_rpcs_started_by_method.get(rpc_type, 0)
completed = stats.num_rpcs_succeeded_by_method.get(
rpc_type, 0) + stats.num_rpcs_failed_by_method.get(rpc_type, 0)
# We consider the backlog is healthy, if the diff between started
# RPCs and completed RPCs is less than 1.5 QPS.
if abs(started - completed) > xds_url_map_testcase.QPS.value * 1.1:
logger.info(
'RPC backlog exist: rpc_type=%s started=%s completed=%s',
rpc_type, started, completed)
time.sleep(_DELAY_CASE_APPLICATION_TIMEOUT_SEC)
ok = False
else:
logger.info(
'RPC backlog clear: rpc_type=%s started=%s completed=%s',
rpc_type, started, completed)
if ok:
# Both backlog of both types of RPCs is clear, success, return.
return
raise RuntimeError('failed to clear RPC backlog in %s seconds' % timeout)
class TestZeroPercentFaultInjection(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
_build_fault_injection_route_rule(abort_percentage=0,
delay_percentage=0)
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 1)
filter_config = xds_config.rds['virtualHosts'][0]['routes'][0][
'typedPerFilterConfig']['envoy.filters.http.fault']
self.assertEqual('20s', filter_config['delay']['fixedDelay'])
self.assertEqual(
0, filter_config['delay']['percentage'].get('numerator', 0))
self.assertEqual('MILLION',
filter_config['delay']['percentage']['denominator'])
self.assertEqual(401, filter_config['abort']['httpStatus'])
self.assertEqual(
0, filter_config['abort']['percentage'].get('numerator', 0))
self.assertEqual('MILLION',
filter_config['abort']['percentage']['denominator'])
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.configure_and_send(test_client,
rpc_types=(RpcTypeUnaryCall,),
num_rpcs=_NUM_RPCS)
self.assertRpcStatusCode(test_client,
expected=(ExpectedResult(
rpc_type=RpcTypeUnaryCall,
status_code=grpc.StatusCode.OK,
ratio=1),),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_NON_RANDOM_ERROR_TOLERANCE)
class TestNonMatchingFaultInjection(xds_url_map_testcase.XdsUrlMapTestCase):
"""EMPTY_CALL is not fault injected, so it should succeed."""
@staticmethod
def client_init_config(rpc: str, metadata: str):
# Python interop client will stuck if the traffic is slow (in this case,
# 20s injected). The purpose of this test is examining the un-injected
# traffic is not impacted, so it's fine to just send un-injected
# traffic.
return 'EmptyCall', metadata
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
_build_fault_injection_route_rule(abort_percentage=100,
delay_percentage=100)
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 1)
# The first route rule for UNARY_CALL is fault injected
self.assertEqual(
"/grpc.testing.TestService/UnaryCall",
xds_config.rds['virtualHosts'][0]['routes'][0]['match']['path'])
filter_config = xds_config.rds['virtualHosts'][0]['routes'][0][
'typedPerFilterConfig']['envoy.filters.http.fault']
self.assertEqual('20s', filter_config['delay']['fixedDelay'])
self.assertEqual(1000000,
filter_config['delay']['percentage']['numerator'])
self.assertEqual('MILLION',
filter_config['delay']['percentage']['denominator'])
self.assertEqual(401, filter_config['abort']['httpStatus'])
self.assertEqual(1000000,
filter_config['abort']['percentage']['numerator'])
self.assertEqual('MILLION',
filter_config['abort']['percentage']['denominator'])
# The second route rule for all other RPCs is untouched
self.assertNotIn(
'envoy.filters.http.fault',
xds_config.rds['virtualHosts'][0]['routes'][1].get(
'typedPerFilterConfig', {}))
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.assertRpcStatusCode(test_client,
expected=(ExpectedResult(
rpc_type=RpcTypeEmptyCall,
status_code=grpc.StatusCode.OK,
ratio=1),),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_NON_RANDOM_ERROR_TOLERANCE)
@absltest.skip('20% RPC might pass immediately, reason unknown')
class TestAlwaysDelay(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
_build_fault_injection_route_rule(abort_percentage=0,
delay_percentage=100)
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 1)
filter_config = xds_config.rds['virtualHosts'][0]['routes'][0][
'typedPerFilterConfig']['envoy.filters.http.fault']
self.assertEqual('20s', filter_config['delay']['fixedDelay'])
self.assertEqual(1000000,
filter_config['delay']['percentage']['numerator'])
self.assertEqual('MILLION',
filter_config['delay']['percentage']['denominator'])
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.configure_and_send(test_client,
rpc_types=(RpcTypeUnaryCall,),
num_rpcs=_NUM_RPCS,
app_timeout=_DELAY_CASE_APPLICATION_TIMEOUT_SEC)
_wait_until_backlog_cleared(test_client)
self.assertRpcStatusCode(
test_client,
expected=(ExpectedResult(
rpc_type=RpcTypeUnaryCall,
status_code=grpc.StatusCode.DEADLINE_EXCEEDED,
ratio=1),),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_NON_RANDOM_ERROR_TOLERANCE)
class TestAlwaysAbort(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
_build_fault_injection_route_rule(abort_percentage=100,
delay_percentage=0)
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 1)
filter_config = xds_config.rds['virtualHosts'][0]['routes'][0][
'typedPerFilterConfig']['envoy.filters.http.fault']
self.assertEqual(401, filter_config['abort']['httpStatus'])
self.assertEqual(1000000,
filter_config['abort']['percentage']['numerator'])
self.assertEqual('MILLION',
filter_config['abort']['percentage']['denominator'])
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.configure_and_send(test_client,
rpc_types=(RpcTypeUnaryCall,),
num_rpcs=_NUM_RPCS)
self.assertRpcStatusCode(
test_client,
expected=(ExpectedResult(
rpc_type=RpcTypeUnaryCall,
status_code=grpc.StatusCode.UNAUTHENTICATED,
ratio=1),),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_NON_RANDOM_ERROR_TOLERANCE)
class TestDelayHalf(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
_build_fault_injection_route_rule(abort_percentage=0,
delay_percentage=50)
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 1)
filter_config = xds_config.rds['virtualHosts'][0]['routes'][0][
'typedPerFilterConfig']['envoy.filters.http.fault']
self.assertEqual('20s', filter_config['delay']['fixedDelay'])
self.assertEqual(500000,
filter_config['delay']['percentage']['numerator'])
self.assertEqual('MILLION',
filter_config['delay']['percentage']['denominator'])
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.configure_and_send(test_client,
rpc_types=(RpcTypeUnaryCall,),
num_rpcs=_NUM_RPCS,
app_timeout=_DELAY_CASE_APPLICATION_TIMEOUT_SEC)
_wait_until_backlog_cleared(test_client)
self.assertRpcStatusCode(
test_client,
expected=(ExpectedResult(
rpc_type=RpcTypeUnaryCall,
status_code=grpc.StatusCode.DEADLINE_EXCEEDED,
ratio=0.5),),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_ERROR_TOLERANCE)
class TestAbortHalf(xds_url_map_testcase.XdsUrlMapTestCase):
@staticmethod
def url_map_change(
host_rule: HostRule,
path_matcher: PathMatcher) -> Tuple[HostRule, PathMatcher]:
path_matcher["routeRules"] = [
_build_fault_injection_route_rule(abort_percentage=50,
delay_percentage=0)
]
return host_rule, path_matcher
def xds_config_validate(self, xds_config: DumpedXdsConfig):
self.assertNumEndpoints(xds_config, 1)
filter_config = xds_config.rds['virtualHosts'][0]['routes'][0][
'typedPerFilterConfig']['envoy.filters.http.fault']
self.assertEqual(401, filter_config['abort']['httpStatus'])
self.assertEqual(500000,
filter_config['abort']['percentage']['numerator'])
self.assertEqual('MILLION',
filter_config['abort']['percentage']['denominator'])
def rpc_distribution_validate(self, test_client: XdsTestClient):
self.configure_and_send(test_client,
rpc_types=(RpcTypeUnaryCall,),
num_rpcs=_NUM_RPCS)
self.assertRpcStatusCode(
test_client,
expected=(ExpectedResult(
rpc_type=RpcTypeUnaryCall,
status_code=grpc.StatusCode.UNAUTHENTICATED,
ratio=0.5),),
length=_LENGTH_OF_RPC_SENDING_SEC,
tolerance=_ERROR_TOLERANCE)
if __name__ == '__main__':
absltest.main()
| 15,934 | 4,779 |
import scrapy
from ..items import BasicHouseItem
from ..utils import generate_url
class FundaListingsSpider(scrapy.Spider):
name = 'funda_listings'
allowed_domains = ["funda.nl"]
custom_settings = {
"FEED_EXPORT_FIELDS": ["address", "zipcode", "price", "house_size", "plot_size", "rooms", "url", "image", "funda_id"]
}
def start_requests(self):
return [scrapy.FormRequest(generate_url())]
def parse(self, response):
result_blocks = response.css("ol.search-results")
for result_block in result_blocks:
results = result_block.css("li.search-result")
for result in results:
url = result.css("div.search-result__header-title-col a::attr(href)").get()
address = result.css("h2.search-result__header-title::text").get().strip()
zipcode = result.css("h4.search-result__header-subtitle::text").get().strip()
price = result.css("span.search-result-price::text").get()
properties = result.css(".search-result-kenmerken")
house_size = properties.css("span[title*='Gebruiksoppervlakte wonen']::text").get()
plot_size = properties.css("span[title*='Perceeloppervlakte']::text").get()
rooms = properties.css("li::text")[-1].get()
image = result.css(".search-result-image img::attr(src)").get()
funda_id = result.css("div.search-result__header-title-col a::attr(data-search-result-item-anchor)").get()
items = BasicHouseItem()
items['url'] = f"https://www.funda.nl/{url}"
items['address'] = address
items['zipcode'] = zipcode
items['price'] = price
items['house_size'] = house_size
items['plot_size'] = plot_size
items['rooms'] = rooms
items['image'] = image
items['funda_id'] = funda_id
yield items
| 2,008 | 579 |
from wfdb.io.record import (Record, MultiRecord, rdheader, rdrecord, rdsamp,
wrsamp, dl_database, edf2mit, mit2edf, wav2mit,
mit2wav, wfdb2mat, csv2mit, sampfreq, signame,
wfdbdesc, wfdbtime, sigavg)
from wfdb.io.annotation import (Annotation, rdann, wrann, show_ann_labels,
show_ann_classes, ann2rr, rr2ann, csv2ann,
rdedfann, mrgann)
from wfdb.io.download import get_dbs, get_record_list, dl_files, set_db_index_url
from wfdb.plot.plot import plot_items, plot_wfdb, plot_all_records
from wfdb.plot.plot_plotly import plot_items_pl, plot_wfdb_pl, plot_all_records_pl
from wfdb.version import __version__
| 754 | 265 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('reports', '0102_recordspec_group_key'),
]
operations = [
migrations.AlterField(
model_name='reportimage',
name='image_url',
field=models.URLField(max_length=800),
preserve_default=True,
),
migrations.AlterField(
model_name='reportimage',
name='thumbnail_url',
field=models.URLField(max_length=800),
preserve_default=True,
),
]
| 647 | 196 |
"""
You are given a digital number written down on a sheet of paper.
Your task is to figure out if you rotate the given sheet of paper by 180 degrees would the number still look exactly the same.
input: "1"
output: false
input: "29562"
output: true
input: "77"
output: false
"""
def digital_number(number):
"""
>>> digital_number('1')
False
>>> digital_number('29562')
True
>>> digital_number('77')
False
>>> digital_number('000')
True
The trick here is you need to understand what rotation is:
https://en.wikipedia.org/wiki/Seven-segment_display_character_representations
In short:
0 -> 0
2 -> 2
5 -> 5
6 -> 9
8 -> 8
9 -> 6
Other numbers can't be rotated into another number
"""
rule = {'0':'0', '2':'2', '5':'5', '6':'9', '8':'8', '9':'6'}
new_str = [rule.get(digit, "x") for digit in number ]
return number == "".join(new_str[::-1])
if __name__ == "__main__":
import doctest
doctest.testmod() | 936 | 365 |
import argparse
import os, json, sys
import azureml.core
from azureml.core import Workspace
from azureml.core import Experiment
from azureml.core.model import Model
import azureml.core
from azureml.core import Run
from azureml.core.webservice import AciWebservice, Webservice
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core.image import ContainerImage
from azureml.core import Image
print("In evaluate.py")
parser = argparse.ArgumentParser("evaluate")
parser.add_argument("--model_name", type=str, help="model name", dest="model_name", required=True)
parser.add_argument("--image_name", type=str, help="image name", dest="image_name", required=True)
parser.add_argument("--output", type=str, help="eval output directory", dest="output", required=True)
args = parser.parse_args()
print("Argument 1: %s" % args.model_name)
print("Argument 2: %s" % args.image_name)
print("Argument 3: %s" % args.output)
run = Run.get_context()
ws = run.experiment.workspace
print('Workspace configuration succeeded')
model_list = Model.list(ws, name = args.model_name)
latest_model = sorted(model_list, reverse=True, key = lambda x: x.created_time)[0]
latest_model_id = latest_model.id
latest_model_name = latest_model.name
latest_model_version = latest_model.version
latest_model_path = latest_model.get_model_path(latest_model_name, _workspace=ws)
print('Latest model id: ', latest_model_id)
print('Latest model name: ', latest_model_name)
print('Latest model version: ', latest_model_version)
print('Latest model path: ', latest_model_path)
latest_model_run_id = latest_model.tags.get("run_id")
print('Latest model run id: ', latest_model_run_id)
latest_model_run = Run(run.experiment, run_id = latest_model_run_id)
latest_model_accuracy = latest_model_run.get_metrics().get("acc")
print('Latest model accuracy: ', latest_model_accuracy)
ws_list = Webservice.list(ws, model_name = latest_model_name)
print('webservice list')
print(ws_list)
deploy_model = False
current_model = None
if(len(ws_list) > 0):
webservice = ws_list[0]
try:
image_id = webservice.tags['image_id']
image = Image(ws, id = image_id)
current_model = image.models[0]
print('Found current deployed model!')
except:
deploy_model = True
print('Image id tag not found!')
else:
deploy_model = True
print('No deployed webservice for model: ', latest_model_name)
current_model_accuracy = -1 # undefined
if current_model != None:
current_model_run = Run(run.experiment, run_id = current_model.tags.get("run_id"))
current_model_accuracy = current_model_run.get_metrics().get("acc")
print('accuracies')
print(latest_model_accuracy, current_model_accuracy)
if latest_model_accuracy > current_model_accuracy:
deploy_model = True
print('Current model performs better and will be deployed!')
else:
print('Current model does NOT perform better and thus will NOT be deployed!')
eval_info = {}
eval_info["model_name"] = latest_model_name
eval_info["model_version"] = latest_model_version
eval_info["model_path"] = latest_model_path
eval_info["model_acc"] = latest_model_accuracy
eval_info["deployed_model_acc"] = current_model_accuracy
eval_info["deploy_model"] = deploy_model
eval_info["image_name"] = args.image_name
eval_info["image_id"] = ""
os.makedirs(args.output, exist_ok=True)
eval_filepath = os.path.join(args.output, 'eval_info.json')
if deploy_model == False:
with open(eval_filepath, "w") as f:
json.dump(eval_info, f)
print('eval_info.json saved')
print('Model did not meet the accuracy criteria and will not be deployed!')
print('Exiting')
sys.exit(0)
# Continue to package Model and create image
print('Model accuracy has met the criteria!')
print('Proceeding to package model and create the image...')
print('Updating scoring file with the correct model name')
with open('score.py') as f:
data = f.read()
with open('score_fixed.py', "w") as f:
f.write(data.replace('MODEL-NAME', args.model_name)) #replace the placeholder MODEL-NAME
print('score_fixed.py saved')
# create a Conda dependencies environment file
print("Creating conda dependencies file locally...")
conda_packages = ['numpy']
pip_packages = ['tensorflow==1.12.2', 'keras==2.2.4', 'azureml-sdk', 'azureml-monitoring']
mycondaenv = CondaDependencies.create(conda_packages=conda_packages, pip_packages=pip_packages)
conda_file = 'scoring_dependencies.yml'
with open(conda_file, 'w') as f:
f.write(mycondaenv.serialize_to_string())
# create container image configuration
print("Creating container image configuration...")
image_config = ContainerImage.image_configuration(execution_script = 'score_fixed.py',
runtime = 'python', conda_file = conda_file)
print("Creating image...")
image = Image.create(name=args.image_name, models=[latest_model], image_config=image_config, workspace=ws)
# wait for image creation to finish
image.wait_for_creation(show_output=True)
eval_info["image_id"] = image.id
with open(eval_filepath, "w") as f:
json.dump(eval_info, f)
print('eval_info.json saved')
| 5,196 | 1,655 |
from tools import instruction_helpers
from tools.errors import InstructionNotFound
from logs.logconfig import log_config
LOG = log_config()
class Instruction:
binary_instruction = ""
def __init__(self):
inst = raw_input("Please type a single command\n")
self.opcode = None
self.binary_instruction = self.parse_instruction(inst)
def parse_instruction(self, instruction=""):
instruction = instruction.replace(",","")
instruction = instruction.upper()
instruction_parts = instruction.split(" ")
LOG.info('<note> INSTRUCTION UNITS ARE {}'.format(instruction_parts))
self.opcode = instruction_parts[0]
type = instruction_helpers.type_finder(opcode=self.opcode)
if type == "I":
LOG.info('<note> FOUND I TYPE INSTRUCTION')
opcode = instruction_helpers.i_type_look_up.get(instruction_parts[0]).get('OPCODE')
rs = instruction_helpers.to_binary(instruction_parts[2])
rt = instruction_helpers.to_binary(instruction_parts[1])
imm = instruction_helpers.immediate_to_binary(instruction_parts[3])
binary_instruction = "{}{}{}{}".format(opcode, rs, rt, imm)
elif type == "R":
LOG.info('<note> FOUND R TYPE INSTRUCTION')
opcode = instruction_helpers.r_type_look_up.get(instruction_parts[0]).get('OPCODE')
rs = instruction_helpers.to_binary(instruction_parts[2])
rt = instruction_helpers.to_binary(instruction_parts[3])
rd = instruction_helpers.to_binary(instruction_parts[1])
func_code = instruction_helpers.r_type_look_up.get(instruction_parts[0]).get('FUNCTION')
binary_instruction = "{}{}{}{}{}{}".format(opcode,rs, rt, rd, "00000", func_code)
elif type == "J":
LOG.info('<note>FOUND J TYPE INSTRUCTION')
else:
raise InstructionNotFound
LOG.info('<note> BINARY INSTRUCTION {}\n'.format(binary_instruction))
return binary_instruction
| 2,048 | 631 |
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from chameleon.decorators import terms_required
from django.contrib import messages
from django.http import (
Http404,
HttpResponseForbidden,
HttpResponse,
HttpResponseRedirect,
HttpResponseNotAllowed,
JsonResponse,
)
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django import forms
from datetime import datetime
from django.conf import settings
from .models import Project, ProjectExtras
from projects.serializer import ProjectExtrasJSONSerializer
from django.contrib.auth.models import User
from django.views.decorators.http import require_POST
from .forms import (
ProjectCreateForm,
ProjectAddUserForm,
AllocationCreateForm,
EditNicknameForm,
AddBibtexPublicationForm,
)
from django.db import IntegrityError
import re
import logging
import json
from keystoneclient.v3 import client as ks_client
from keystoneauth1 import adapter
from django.conf import settings
import uuid
import sys
from chameleon.keystone_auth import admin_ks_client, sync_projects, get_user
from util.project_allocation_mapper import ProjectAllocationMapper
logger = logging.getLogger("projects")
def project_pi_or_admin_or_superuser(user, project):
if user.is_superuser:
return True
if user.groups.filter(name="Allocation Admin").count() == 1:
return True
if user.username == project.pi.username:
return True
return False
def project_member_or_admin_or_superuser(user, project, project_user):
if project_pi_or_admin_or_superuser(user, project):
return True
for pu in project_user:
if user.username == pu.username:
return True
return False
@login_required
def user_projects(request):
context = {}
username = request.user.username
mapper = ProjectAllocationMapper(request)
user = mapper.get_user(username)
context["is_pi_eligible"] = user["piEligibility"].lower() == "eligible"
context["username"] = username
context["projects"] = mapper.get_user_projects(username, to_pytas_model=True)
return render(request, "projects/user_projects.html", context)
@login_required
def view_project(request, project_id):
mapper = ProjectAllocationMapper(request)
try:
project = mapper.get_project(project_id)
if project.source != "Chameleon":
raise Http404("The requested project does not exist!")
except Exception as e:
logger.error(e)
raise Http404("The requested project does not exist!")
form = ProjectAddUserForm()
nickname_form = EditNicknameForm()
pubs_form = AddBibtexPublicationForm()
if request.POST and project_pi_or_admin_or_superuser(request.user, project):
form = ProjectAddUserForm()
if "add_user" in request.POST:
form = ProjectAddUserForm(request.POST)
if form.is_valid():
try:
add_username = form.cleaned_data["username"]
if mapper.add_user_to_project(project, add_username):
sync_project_memberships(request, add_username)
messages.success(
request, f'User "{add_username}" added to project!'
)
form = ProjectAddUserForm()
except Exception as e:
logger.exception("Failed adding user")
messages.error(
request,
(
"Unable to add user. Confirm that the username is "
"correct and corresponds to a current Chameleon user."
),
)
else:
messages.error(
request,
(
"There were errors processing your request. "
"Please see below for details."
),
)
elif "del_user" in request.POST:
try:
del_username = request.POST["username"]
# Ensure that it's not possible to remove the PI
if del_username == project.pi.username:
raise PermissionDenied(
"Removing the PI from the project is not allowed."
)
if mapper.remove_user_from_project(project, del_username):
sync_project_memberships(request, del_username)
messages.success(
request, 'User "%s" removed from project' % del_username
)
except PermissionDenied as exc:
messages.error(request, exc)
except:
logger.exception("Failed removing user")
messages.error(
request,
"An unexpected error occurred while attempting "
"to remove this user. Please try again",
)
elif "nickname" in request.POST:
nickname_form = edit_nickname(request, project_id)
users = mapper.get_project_members(project)
if not project_member_or_admin_or_superuser(request.user, project, users):
raise PermissionDenied
for a in project.allocations:
if a.start and isinstance(a.start, str):
a.start = datetime.strptime(a.start, "%Y-%m-%dT%H:%M:%SZ")
if a.dateRequested:
if isinstance(a.dateRequested, str):
a.dateRequested = datetime.strptime(
a.dateRequested, "%Y-%m-%dT%H:%M:%SZ"
)
if a.dateReviewed:
if isinstance(a.dateReviewed, str):
a.dateReviewed = datetime.strptime(a.dateReviewed, "%Y-%m-%dT%H:%M:%SZ")
if a.end:
if isinstance(a.end, str):
a.end = datetime.strptime(a.end, "%Y-%m-%dT%H:%M:%SZ")
user_mashup = []
for u in users:
user = {
"username": u.username,
"role": u.role,
}
try:
portal_user = User.objects.get(username=u.username)
user["email"] = portal_user.email
user["first_name"] = portal_user.first_name
user["last_name"] = portal_user.last_name
except User.DoesNotExist:
logger.info("user: " + u.username + " not found")
user_mashup.append(user)
return render(
request,
"projects/view_project.html",
{
"project": project,
"project_nickname": project.nickname,
"users": user_mashup,
"is_pi": request.user.username == project.pi.username,
"form": form,
"nickname_form": nickname_form,
"pubs_form": pubs_form,
},
)
def set_ks_project_nickname(chargeCode, nickname):
for region in list(settings.OPENSTACK_AUTH_REGIONS.keys()):
ks_admin = admin_ks_client(region=region)
project_list = ks_admin.projects.list(domain=ks_admin.user_domain_id)
project = [
this
for this in project_list
if getattr(this, "charge_code", None) == chargeCode
]
logger.info(
"Assigning nickname {0} to project with charge code {1} at {2}".format(
nickname, chargeCode, region
)
)
if project and project[0]:
project = project[0]
ks_admin.projects.update(project, name=nickname)
logger.info(
"Successfully assigned nickname {0} to project with charge code {1} at {2}".format(
nickname, chargeCode, region
)
)
def sync_project_memberships(request, username):
"""Re-sync a user's Keystone project memberships.
This calls utils.auth.keystone_auth.sync_projects under the hood, which
will dynamically create missing projects as well.
Args:
request (Request): the parent request; used for region detection.
username (str): the username to sync memberships for.
Return:
List[keystone.Project]: a list of Keystone projects the user is a
member of.
"""
mapper = ProjectAllocationMapper(request)
try:
ks_admin = admin_ks_client(request=request)
ks_user = get_user(ks_admin, username)
if not ks_user:
logger.error(
(
"Could not fetch Keystone user for {}, skipping membership syncing".format(
username
)
)
)
return
active_projects = mapper.get_user_projects(
username, alloc_status=["Active"], to_pytas_model=True
)
return sync_projects(ks_admin, ks_user, active_projects)
except Exception as e:
logger.error("Could not sync project memberships for %s: %s", username, e)
return []
@login_required
@terms_required("project-terms")
def create_allocation(request, project_id, allocation_id=-1):
mapper = ProjectAllocationMapper(request)
user = mapper.get_user(request.user.username)
if user["piEligibility"].lower() != "eligible":
messages.error(
request,
"Only PI Eligible users can request allocations. If you would "
"like to request PI Eligibility, please "
'<a href="/user/profile/edit/">submit a PI Eligibility '
"request</a>.",
)
return HttpResponseRedirect(reverse("projects:user_projects"))
project = mapper.get_project(project_id)
allocation = None
allocation_id = int(allocation_id)
if allocation_id > 0:
for a in project.allocations:
if a.id == allocation_id:
allocation = a
# goofiness that we should clean up later; requires data cleansing
abstract = project.description
if "--- Supplemental details ---" in abstract:
additional = abstract.split("\n\n--- Supplemental details ---\n\n")
abstract = additional[0]
additional = additional[1].split("\n\n--- Funding source(s) ---\n\n")
justification = additional[0]
if len(additional) > 1:
funding_source = additional[1]
else:
funding_source = ""
elif allocation:
justification = allocation.justification
if "--- Funding source(s) ---" in justification:
parts = justification.split("\n\n--- Funding source(s) ---\n\n")
justification = parts[0]
funding_source = parts[1]
else:
funding_source = ""
else:
justification = ""
funding_source = ""
if request.POST:
form = AllocationCreateForm(
request.POST,
initial={
"description": abstract,
"supplemental_details": justification,
"funding_source": funding_source,
},
)
if form.is_valid():
allocation = form.cleaned_data.copy()
allocation["computeRequested"] = 20000
# Also update the project
project.description = allocation.pop("description", None)
supplemental_details = allocation.pop("supplemental_details", None)
logger.error(supplemental_details)
funding_source = allocation.pop("funding_source", None)
# if supplemental_details == None:
# raise forms.ValidationError("Justifcation is required")
# This is required
if not supplemental_details:
supplemental_details = "(none)"
logger.error(supplemental_details)
if funding_source:
allocation[
"justification"
] = "%s\n\n--- Funding source(s) ---\n\n%s" % (
supplemental_details,
funding_source,
)
else:
allocation["justification"] = supplemental_details
allocation["projectId"] = project_id
allocation["requestorId"] = mapper.get_portal_user_id(request.user.username)
allocation["resourceId"] = "39"
if allocation_id > 0:
allocation["id"] = allocation_id
try:
logger.info(
"Submitting allocation request for project %s: %s"
% (project.id, allocation)
)
updated_project = mapper.save_project(project.as_dict())
mapper.save_allocation(
allocation, project.chargeCode, request.get_host()
)
messages.success(request, "Your allocation request has been submitted!")
return HttpResponseRedirect(
reverse("projects:view_project", args=[updated_project["id"]])
)
except:
logger.exception("Error creating allocation")
form.add_error(
"__all__", "An unexpected error occurred. Please try again"
)
else:
form.add_error(
"__all__",
"There were errors processing your request. "
"Please see below for details.",
)
else:
form = AllocationCreateForm(
initial={
"description": abstract,
"supplemental_details": justification,
"funding_source": funding_source,
}
)
context = {
"form": form,
"project": project,
"alloc_id": allocation_id,
"alloc": allocation,
}
return render(request, "projects/create_allocation.html", context)
@login_required
@terms_required("project-terms")
def create_project(request):
mapper = ProjectAllocationMapper(request)
form_args = {"request": request}
user = mapper.get_user(request.user.username)
if user["piEligibility"].lower() != "eligible":
messages.error(
request,
"Only PI Eligible users can create new projects. "
"If you would like to request PI Eligibility, please "
'<a href="/user/profile/edit/">submit a PI Eligibility '
"request</a>.",
)
return HttpResponseRedirect(reverse("projects:user_projects"))
if request.POST:
form = ProjectCreateForm(request.POST, **form_args)
if form.is_valid():
# title, description, typeId, fieldId
project = form.cleaned_data.copy()
# let's check that any provided nickname is unique
project["nickname"] = project["nickname"].strip()
nickname_valid = (
project["nickname"]
and ProjectExtras.objects.filter(nickname=project["nickname"]).count()
< 1
and Project.objects.filter(nickname=project["nickname"]).count() < 1
)
if not nickname_valid:
form.add_error("__all__", "Project nickname unavailable")
return render(request, "projects/create_project.html", {"form": form})
project.pop("accept_project_terms", None)
# pi
pi_user_id = mapper.get_portal_user_id(request.user.username)
project["piId"] = pi_user_id
# allocations
allocation = {
"resourceId": 39,
"requestorId": pi_user_id,
"computeRequested": 20000,
}
supplemental_details = project.pop("supplemental_details", None)
funding_source = project.pop("funding_source", None)
# if supplemental_details == None:
# raise forms.ValidationError("Justifcation is required")
if not supplemental_details:
supplemental_details = "(none)"
if funding_source:
allocation[
"justification"
] = "%s\n\n--- Funding source(s) ---\n\n%s" % (
supplemental_details,
funding_source,
)
else:
allocation["justification"] = supplemental_details
project["allocations"] = [allocation]
# startup
project["typeId"] = 2
# source
project["source"] = "Chameleon"
try:
created_project = mapper.save_project(project, request.get_host())
logger.info("newly created project: " + json.dumps(created_project))
messages.success(request, "Your project has been created!")
return HttpResponseRedirect(
reverse("projects:view_project", args=[created_project["id"]])
)
except:
logger.exception("Error creating project")
form.add_error(
"__all__", "An unexpected error occurred. Please try again"
)
else:
form.add_error(
"__all__",
"There were errors processing your request. "
"Please see below for details.",
)
else:
form = ProjectCreateForm(**form_args)
return render(request, "projects/create_project.html", {"form": form})
@login_required
def edit_project(request):
context = {}
return render(request, "projects/edit_project.html", context)
@require_POST
def edit_nickname(request, project_id):
mapper = ProjectAllocationMapper(request)
project = mapper.get_project(project_id)
if not project_pi_or_admin_or_superuser(request.user, project):
messages.error(request, "Only the project PI can update nickname.")
return EditNicknameForm()
form = EditNicknameForm(request.POST)
if form.is_valid(request):
# try to update nickname
try:
nickname = form.cleaned_data["nickname"]
ProjectAllocationMapper.update_project_nickname(project_id, nickname)
form = EditNicknameForm()
set_ks_project_nickname(project.chargeCode, nickname)
messages.success(request, "Update Successful")
except:
messages.error(request, "Nickname not available")
else:
messages.error(request, "Nickname not available")
return form
def get_extras(request):
provided_token = request.GET.get("token") if request.GET.get("token") else None
stored_token = getattr(settings, "PROJECT_EXTRAS_API_TOKEN", None)
if not provided_token or not stored_token or provided_token != stored_token:
logger.error("Project Extras json api Access Token validation failed")
return HttpResponseForbidden()
logger.info("Get all project extras json endpoint requested")
response = {"status": "success"}
try:
serializer = ProjectExtrasJSONSerializer()
response["message"] = ""
extras = json.loads(serializer.serialize(ProjectExtras.objects.all()))
response["result"] = extras
except ProjectExtras.DoesNotExist:
response["message"] = "Does not exist."
response["result"] = None
return JsonResponse(response)
| 19,377 | 5,144 |
class Solution:
def findTheDistanceValue(self, arr1: List[int], arr2: List[int], d: int) -> int:
distance = 0
for element in arr1:
if not any(c for c in arr2 if abs(c-element) <= d):
distance += 1
return distance | 268 | 81 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# pipe.py
#
# Copyright 2014 Giorgio Gilestro <gg@kozak>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# Listen from pipefile
# e.g.: echo "TEST COMMAND" > /tmp/pipefile
import os, tempfile
import logging
import threading
class pipe():
def __init__(self, pipefile, queue, actions):
"""
Reads from a pipe
"""
self.pipefile = pipefile
self.queue = queue
actions["pipe"] = {}
self.__makefifo()
self.listening_thread = threading.Thread(target=self.listen_from_pipe)
#self.listening_thread.daemon = True
self.isListening = True
self.listening_thread.start()
def transmit(self, received):
"""
"""
cmd = ("pipe", received)
self.queue.put(cmd)
def __makefifo(self):
"""
"""
try:
os.mkfifo(self.pipefile)
logging.debug("Listening to FIFO Pipe at %s" % self.pipefile)
return True
except:
logging.debug("Error creating FIFO Pipe %s. File already existing?" % self.pipefile)
return False
def listen_from_pipe(self):
"""
"""
while self.isListening:
logging.debug("Listening from PIPE %s" % self.pipefile)
with open(self.pipefile) as fifo:
self.transmit(fifo.read().strip())
if __name__ == '__main__':
p = pipe("pipefile", "none")
| 2,193 | 687 |
import os
import sys
# A Python script that can be used to determine which files that require
# patching have been touched between two points in the repo.
def shell(command):
stream = os.popen(command)
result = stream.read()
stream.close()
return result
def get_patches():
patches = {}
for file in shell('find android-patches/patches -type f').splitlines():
slash_indices = [i for (i, c) in enumerate(file) if c == '/']
if len(slash_indices) < 3:
continue
patch_name = file[slash_indices[1]+1:slash_indices[2]]
filename = file[slash_indices[2]+1:]
if patch_name not in patches:
patches[patch_name] = []
patches[patch_name].append(filename)
return patches
def get_touched_files(branch_from, branch_to):
files = []
command = 'git diff --name-status {0} {1}'.format(branch_from, branch_to)
for line in shell(command).splitlines():
files.append(line.split('\t')[-1])
return files
if __name__ == '__main__':
if len(sys.argv) != 3:
sys.stderr.write('Usage: android-patch.py <commit> <commit>')
sys.exit(1)
patches = get_patches()
touched_files = set(get_touched_files(sys.argv[1], sys.argv[2]))
for patch_name in sorted(patches.keys()):
patched_and_touched = [file for file in patches[patch_name] \
if file in touched_files]
if len(patched_and_touched) > 0:
print('\033[4m{0}\033[0m'.format(patch_name))
for file in patched_and_touched:
print('* {0}'.format(file))
| 1,608 | 523 |
from pyautofinance.common.strategies.bracket_strategy import BracketStrategy
class LiveTradingTestStrategy(BracketStrategy):
def _open_long_condition(self) -> bool:
return True
def _open_short_condition(self) -> bool:
return False
| 259 | 78 |
import json
from docopt import docopt
from bigip_utils.logger import logger
from bigip_utils.bigip import *
#
# This script enforces all attack signatures that are ready to be enforced:
# https://support.f5.com/csp/article/K60640453?utm_source=f5support&utm_medium=RSS
#
__doc__ = """
Usage:
enforce-ready-signatures.py [-hvndsb] [-p POLICY_NAME] -l LIST_FILE
Options:
-h --help Show this screen.
-v --version Show version.
-n --dry-run Show actions. Do not execute them.
-s --sync Sync devices after changes.
-b --backup-config Create and download a UCS file.
-d --dev-devices-only Skip non DEV devices.
-l LIST_FILE --list-file=LIST_FILE CSV file with list of bigips. Format: hostname,ip,username,password
-p POLICY_NAME --policy-name=POLICY_NAME Name of a policy to act on. [default: all]
"""
VERSION = "0.2"
def enforce_ready_signatures(bigip, id):
params = {
'$select': '',
'$filter': 'hasSuggestions eq false AND wasUpdatedWithinEnforcementReadinessPeriod eq false and performStaging eq true',
}
data = {'performStaging': 'false'}
url_base_asm = f'https://{bigip.ip}/mgmt/tm/asm/policies/{id}/signatures'
json_data = bigip.patch(url_base_asm, params=params, data=json.dumps(data))
count = int(json_data.get('totalItems', 0))
return count
def get_ready_signatures_count(bigip, id):
params = {
'$select': '',
'$filter': 'hasSuggestions eq false AND wasUpdatedWithinEnforcementReadinessPeriod eq false and performStaging eq true',
'$top': '1',
}
url_base_asm = f'https://{bigip.ip}/mgmt/tm/asm/policies/{id}/signatures'
json_data = bigip.get(url_base_asm, params=params)
# for d in json_data['items']:
# results[d['signatureReference']['name']] = d['signatureReference']['signatureId']
count = int(json_data.get('totalPages', 0))
return count
def process_device(bigip, dry_run=True, policy=None, sync_device_group=None):
policies_virtuals = get_virtuals_asm_policies(bigip)
policies=bigip.get_asm_policies()
enforced_signatures_count = 0
ready_signatures = {}
for i in policies:
if(i['type'] == 'parent'):
continue
policy_id = i['id']
policy_name = i['name']
policy_virtuals = policies_virtuals[policy_name]
if not policy == 'all' and not policy == policy_name:
continue
if(i['enforcementMode'] == 'blocking'):
ready_signatures[policy_name] = get_ready_signatures_count(
bigip, policy_id)
if ready_signatures[policy_name] and dry_run:
logger.info(
f"{bigip.hostname}: [DRY-RUN] : {policy_name}: Enforcing {ready_signatures[policy_name]} ready attack signatures. VIPs={len(policy_virtuals)}")
elif ready_signatures[policy_name]:
logger.info(
f"{bigip.hostname}: {policy_name}: Enforcing {ready_signatures[policy_name]} ready attack signatures. VIPs={len(policy_virtuals)}")
count = enforce_ready_signatures(bigip, policy_id)
if count:
r = apply_asm_policy(bigip, policy_id)
if not r:
logger.error(
f"{bigip.hostname}: Applying policy {policy_name} did not complete successfully.")
enforced_signatures_count += count
if enforced_signatures_count and sync_device_group:
logger.info(f"{bigip.hostname}: Syncing device group.")
sync_devices(bigip, device_group=sync_device_group)
return enforced_signatures_count
if __name__ == "__main__":
arguments = docopt(__doc__, version=VERSION)
devices_file = arguments['--list-file']
dry_run = arguments['--dry-run']
dev_only = arguments['--dev-devices-only']
policy_name = arguments['--policy-name']
sync = arguments['--sync']
backup_config = arguments['--backup-config']
for (hostname, ip, username, password) in get_bigips(devices_file, dev_only=dev_only):
b = BigIP(hostname, username, password, ip=ip, verify_ssl=False)
logger.info(
f"{b.hostname}: Started. Policy: {policy_name} Dry-Run: {dry_run}")
proceed = True
check_active(b)
device_group = get_asm_sync_group(b)
if not device_group and not check_standalone(b):
logger.error(
f"{b.hostname}: Could not find ASM device group name. {device_group}")
proceed = False
elif device_group:
logger.info(f"{b.hostname}: Sync Device Group: {device_group}")
if (not b.token):
logger.warning(
f'{b.hostname}: Unable to obtain authentication token')
proceed = False
if not check_active(b):
logger.warning(f'{b.hostname}: Not active, skipping device.')
proceed = False
enforced_signatures_count = 0
get_ucs(b,overwrite=True)
if proceed:
if backup_config and not dry_run:
get_ucs(b,overwrite=True)
enforced_signatures_count = process_device(
b, dry_run=dry_run, policy=policy_name, sync_device_group=device_group)
logger.info(
f"{b.hostname}: Finished. enforced signatures count: {enforced_signatures_count}")
logger.info("Done.")
| 5,624 | 1,709 |
# sysinstall.py vi:ts=4:sw=4:expandtab:
#
# Copyright (c) 2006-2008 Three Rings Design, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright owner nor the names of contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import copy
import os
import string
import farb
class ConfigSection(object):
"""
Abstract class implementing re-usable functions for install.cfg(8)
configuration sections.
"""
def _serializeOptions(self, output):
"""
Serialize all install.cfg options for this section
and the to an output file.
Concrete subclasses MUST provide a sectionOptions list as a class
attribute. This list must contain all valid install.cfg options for
the section, in the order required by sysinstall(8).
Given the sectionOptions list, this implementation will introspect
'self' for attributes with names that match the sectionOptions.
Any available attributes will be used, and any missing attributes
will be ignored.
@param output: Open, writable file handle
"""
for option in self.sectionOptions:
if hasattr(self, option):
output.write('%s=%s\n' % (option, getattr(self, option)))
def _serializeCommands(self, output, commands=None):
"""
Write out all commands listed in the sectionCommands class
attribute.
@param output: Open, writable file handle
@param commands: Commands to output. Defaults to sectionCommands.
"""
if (not commands):
commands = self.sectionCommands
for command in commands:
output.write('%s\n' % (command))
class NetworkConfig(ConfigSection):
"""
install.cfg(8) network configuration section.
"""
# Section option names
sectionOptions = (
'hostname', # New Server's Host Name
'domainname', # New Server's Domain Name
'netDev', # Network Interface
'nfs', # NFS Installation Media
'tryDHCP' # DHCP an address
)
# Default option values
tryDHCP = 'YES'
# Section commands
sectionCommands = (
'mediaSetNFS',
)
def __init__(self, section, config):
"""
Initialize network configuration for a given
installation.
@param section: ZConfig Installation section
@param config: ZConfig Farbot Config
"""
# Install-specific Options
self.hostname = section.hostname
self.domainname = section.domain
self.netDev = section.networkdevice
# FarBot-wide Options
self.nfshost = config.Releases.nfshost
self.nfspath = os.path.join(config.Releases.installroot, section.release.lower())
self.nfs = self.nfshost + ':' + self.nfspath
def serialize(self, output):
self._serializeOptions(output)
self._serializeCommands(output)
class DistSetConfig(ConfigSection):
"""
install.cfg(8) distribution set configuration section.
"""
# Section option names
sectionOptions = (
'dists', # Install these distribution sets
)
# Section commands
sectionCommands = (
'distSetCustom',
)
def __init__(self, release, config):
"""
Initialize distribution set configuration for a given
installation.
@param release: ZConfig Release section
@param config: ZConfig Farbot Config
"""
# Flatten lists of dists, source dists, and kernel dists, inserting the
# sub lists after src or kernels. Not sure if it really necessary to have
# those sub lists in that exact location, but let's be safe.
self.dists = copy.copy(release.dists)
if self.dists.count('src') > 0:
self.dists.insert(self.dists.index('src') + 1, string.join(release.sourcedists))
if self.dists.count('kernels') > 0:
self.dists.insert(self.dists.index('kernels') + 1, string.join(release.kerneldists))
self.dists = string.join(self.dists)
def serialize(self, output):
self._serializeOptions(output)
self._serializeCommands(output)
class DiskLabelConfig(ConfigSection):
"""
install.cfg(8) FreeBSD labels (partition) configuration section.
"""
# Section option names are generated
# Section commands
sectionCommands = (
'diskLabelEditor',
)
def __init__(self, section, diskDevice):
"""
Initialize a disk label configuration for a given
partition map and device.
@param section: ZConfig PartitionMap section
@param diskDevice: Device to label (eg ad0s1)
"""
# Section option names are generated
self.sectionOptions = []
self.diskDevice = diskDevice
# Grab our partition map
for part in section.Partition:
# Build device + slice + partition number, and append it to
# sectionOptions
slice = self.diskDevice + '-' + part.getSectionName()
self.sectionOptions.append(slice)
# Partition settings
if (part.softupdates):
setattr(self, slice, "%s %d %s 1" % (part.type, part.size, part.mount))
else:
setattr(self, slice, "%s %d %s" % (part.type, part.size, part.mount))
# Ensure that partitions are in order (1 ... 9)
self.sectionOptions.sort()
def serialize(self, output):
self._serializeOptions(output)
self._serializeCommands(output)
class DiskPartitionConfig(ConfigSection):
"""
install.cfg(8) BIOS partition configuration section.
"""
# Section option names
sectionOptions = (
'disk', # Disk to partition
'partition', # Partitioning method
'bootManager', # Boot manage to install
)
# We hardcode the use of the entire disk
partition = 'all'
# Hardcode the use of the boot manager, too
bootManager = 'standard'
# Section commands
sectionCommands = (
'diskPartitionEditor',
)
def __init__(self, section, config):
"""
Initialize a disk partition configuration for a given
disk section.
@param section: ZConfig Disk section
@param config: ZConfig Farbot Config
"""
self.disk = section.getSectionName()
# Grab our partition map
# If it doesn't exist, complain loudly
self.diskLabelConfig = None
for map in config.Partitions.PartitionMap:
if (section.partitionmap.lower() == map.getSectionName()):
# Set up the disk labels. Always s1!
self.diskLabelConfig = DiskLabelConfig(map, self.disk + 's1')
break
def serialize(self, output):
self._serializeOptions(output)
self._serializeCommands(output)
self.diskLabelConfig.serialize(output)
class SystemCommandConfig(ConfigSection):
"""
install.cfg(8) system command configuration section.
"""
# Section option names
sectionOptions = (
'command', # Command name and arguments
)
# Section commands
sectionCommands = (
'system',
)
def __init__(self, cmd):
"""
Initialize system command configuration for a given
installation.
@param section: ZConfig command key value
"""
# Build command + options
self.cmd = cmd
setattr(self, 'command', "%s" % (cmd))
def serialize(self, output):
self._serializeOptions(output)
self._serializeCommands(output)
class PackageConfig(SystemCommandConfig):
"""
install.cfg(8) package install configuration section.
Sysinstall's dependency handling is seriously broken,
relying on an INDEX that doesn't necessarily reflect reality.
We skip the sysinstall package installation code entirely and
use a SystemCommand to call pkg_add(8) ourselves post-install.
"""
installPackageScript = os.path.join('/dist', os.path.basename(farb.INSTALL_PACKAGE_SH))
def __init__(self, section):
"""
Initialize package install configuration for a given
installation.
@param section: ZConfig Package section
"""
# /dist/install_package.sh <package name>
self.package = section.package
cmd = "%s %s" % (self.installPackageScript, self.package)
super(PackageConfig, self).__init__(cmd)
class InstallationConfig(ConfigSection):
"""
InstallationConfig instances represent a
complete install.cfg file for sysinstall(8)
"""
# Section option names
sectionOptions = (
'debug',
'nonInteractive',
'noWarn'
)
# Defaults
debug = 'YES'
nonInteractive = 'YES'
noWarn = 'YES'
# Commands needed to start up the interactive partitioner
interactivePartitionCommands = (
'diskInteractive="YES"', # Partition and label disks interactively
'diskPartitionEditor', # Run disk partition (MBR) editor
'diskLabelEditor' # Run disk label editor
)
# Pre-package commands
prePackageCommands = (
'diskLabelCommit', # Write disk labels to disk
'installCommit' # Write install distribution to disk
)
# Section commands
sectionCommands = (
'shutdown',
)
def __init__(self, section, config):
"""
Initialize a new installation configuration.
@param section: ZConfig Installation section
@param config: ZConfig Farbot Config
"""
self.name = section.getSectionName()
# Network configuration
self.networkConfig = NetworkConfig(section, config)
# Distribution sets
for release in config.Releases.Release:
if release.getSectionName() == section.release.lower():
self.distSetConfig = DistSetConfig(release, config)
break
# Disks (Partitions and Labels)
self.diskPartitionConfigs = []
for disk in section.Disk:
diskPartitionConfig = DiskPartitionConfig(disk, config)
self.diskPartitionConfigs.append(diskPartitionConfig)
# Packages
self.packageConfigs = []
for psetName in section.packageset:
foundPset = False
for pset in config.PackageSets.PackageSet:
if (psetName.lower() == pset.getSectionName()):
foundPset = True
break
for package in pset.Package:
pkgc = PackageConfig(package)
self.packageConfigs.append(pkgc)
# System Commands
self.systemCommandConfigs = []
if (section.PostInstall):
for cmd in section.PostInstall.command:
systemCommandConfig = SystemCommandConfig(cmd)
self.systemCommandConfigs.append(systemCommandConfig)
def serialize(self, output):
# Global configuration options
self._serializeOptions(output)
# Network configuration
self.networkConfig.serialize(output)
# Select distribution sets
self.distSetConfig.serialize(output)
# Disk formatting
for disk in self.diskPartitionConfigs:
disk.serialize(output)
# If we have no diskPartitionConfigs, partition interactively
if len(self.diskPartitionConfigs) == 0:
self._serializeCommands(output, commands=self.interactivePartitionCommands)
# Commit installation to disk
self._serializeCommands(output, commands=self.prePackageCommands)
# Packages
for pkgc in self.packageConfigs:
pkgc.serialize(output)
# System Commands
for scc in self.systemCommandConfigs:
scc.serialize(output)
# Global commands
self._serializeCommands(output)
| 13,358 | 3,631 |
"""
This module is for fetching information from dynamo about different sites
"""
from .inventory import _get_inventory
def _small_query(*args):
"""
This is a wrapper function for opening and closing inventory connection
:param args: arguments to pass to query
:returns: Result of the query
:rtype: list
"""
mysql_reg = _get_inventory()
sql = args[0]
result = mysql_reg.query(sql, *(args[1:]))
mysql_reg.close()
return result
def site_list():
"""
:returns: The list of sites dynamo is storing
:rtype: list
"""
return _small_query('SELECT name FROM sites')
_READY = None # A cached list of ready sites
def ready_sites():
"""
:returns: Set of sites that are in ready status
:rtype: set
"""
global _READY # pylint: disable=global-statement
if _READY is None:
_READY = set(_small_query('SELECT name FROM sites WHERE status = "ready"'))
return _READY
def get_gfal_location(site):
"""
:param str site: A site that we want to list with GFAL
:returns: The host and path needed by the gfal-ls command
:rtype: str
"""
return _small_query('SELECT backend FROM sites WHERE name=%s', site)[0]
| 1,233 | 383 |
import shutil
import os
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from .models import BookCover, get_book_cover_image_path
class Command(BaseCommand):
help = ""
def handle(self, *args, **options):
qs = BookCover.objects.exclude(image__iregex=r"^covers/[a-z0-9_-]/[a-z0-9_-]/")
print(qs.count())
counter = 0
for cover in qs:
path = cover.image.path
name = cover.image.name.split("/")[-1].replace(".peg", ".jpeg")
new_path = get_book_cover_image_path(cover, name)
#print(path, "-->", new_path)
new_path_abs = os.path.join(settings.MEDIA_ROOT, new_path)
try:
os.makedirs("/".join(new_path_abs.split("/")[:-1]))
except OSError:
pass
try:
shutil.copyfile(path, new_path_abs)
except shutil.SameFileError:
pass
cover.image.name = new_path
cover.save()
os.remove(path)
counter += 1
if counter % 1000 == 0:
print(counter)
| 1,163 | 362 |
"""The admin_history database table.
This is a stopgap representation of changes to the admin table until we have a
group system and a group-based authorization system up and running.
"""
from __future__ import annotations
from datetime import datetime
from sqlalchemy import Column, DateTime, Enum, Index, Integer, String
from sqlalchemy.dialects import postgresql
from ..models.history import AdminChange
from .base import Base
__all__ = ["AdminHistory"]
class AdminHistory(Base):
__tablename__ = "admin_history"
id: int = Column(Integer, primary_key=True)
username: str = Column(String(64), nullable=False)
action: AdminChange = Column(Enum(AdminChange), nullable=False)
actor: str = Column(String(64), nullable=False)
ip_address: str = Column(
String(64).with_variant(postgresql.INET, "postgresql"), nullable=False
)
event_time: datetime = Column(DateTime, nullable=False)
__table_args__ = (Index("admin_history_by_time", "event_time", "id"),)
| 1,002 | 295 |
import traceback
from ..models import ServerError
def build_handled_query(query):
def handled_query(*kwargs, **args):
try:
return query(*kwargs, **args)
except:
print('Stack Trace ==>', traceback.format_exc())
return ServerError()
return handled_query
| 316 | 85 |
import re
from typing import Any, Dict, Sequence, List, Optional, Tuple
from .base import Parser
from .components import InterestingFinding
from .results import WPScanResults
#################### CLI PARSER ######################
class WPScanCliParser(Parser):
"""Main interface to parse WPScan CLI output.
- wpscan_output: WPScan output as string.
- false_positives_strings: List of false positive strings.
"""
def __init__(self, wpscan_output:str,
false_positives_strings:Optional[Sequence[str]]=None) -> None:
if not wpscan_output:
wpscan_output = ""
# Parser config: false positives string and verbosity (not available with cli parser)
parser_config = dict(
false_positives_strings=false_positives_strings, show_all_details=False
)
super().__init__({'output':wpscan_output}, **parser_config)
self._infos, self._warnings, self._alerts = self.parse_cli(wpscan_output)
def get_infos(self) -> Sequence[str]:
""" Return all the parsed infos"""
return self._infos
def get_warnings(self) -> Sequence[str]:
""" Return all the parsed warnings"""
return self._warnings
def get_alerts(self)-> Sequence[str]:
""" Return all the parsed alerts"""
return self._alerts
def _parse_cli_toogle(self, line:str, warning_on:bool, alert_on:bool) -> Tuple[bool, bool]:
# Color parsing
if "33m[!]" in line:
warning_on = True
elif "31m[!]" in line:
alert_on = True
# No color parsing Warnings string are hard coded here
elif "[!]" in line and any(
[
m in line
for m in [
"The version is out of date",
"No WPVulnDB API Token given",
"You can get a free API token",
]
]
):
warning_on = True
elif "[!]" in line:
alert_on = True
# Both method with color and no color apply supplementary proccessing
# Warning for insecure Wordpress and based on interesting findings strings
if any(
string in line
for string in ["Insecure", "Outdated"]
+ InterestingFinding.INTERESTING_FINDING_WARNING_STRINGS
):
warning_on = True
# Trigger alert based on interesting finding alert strings
if any(
string in line
for string in InterestingFinding.INTERESTING_FINDING_ALERT_STRINGS
):
alert_on = True
# Lower voice of Vulnerabilities found but not plugin version
if "The version could not be determined" in line and alert_on:
alert_on = False
warning_on = True
return (warning_on, alert_on)
def _ignore_false_positives(self, infos:List[str], warnings:List[str], alerts:List[str]) -> Tuple[List[str], List[str], List[str]]:
"""Process false positives"""
for alert in warnings + alerts:
if self.is_false_positive(alert):
try:
alerts.remove(alert)
except ValueError:
warnings.remove(alert)
infos.append("[False positive]\n{}".format(alert))
return infos, warnings, alerts
def parse_cli(self, wpscan_output:str) -> Tuple[List[str], List[str], List[str]]:
"""Parse the ( messages, warnings, alerts ) from WPScan CLI output string.
Return results as tuple( messages, warnings, alerts )."""
# Init scan messages
(messages, warnings, alerts) = ([], [], [])
# Init messages toogles
warning_on, alert_on = False, False
message_lines = []
current_message = ""
# Every blank ("") line will be considered as a message separator
for line in wpscan_output.splitlines() + [""]:
# Parse all output lines and build infos, warnings and alerts
line = line.strip()
# Parse line
warning_on, alert_on = self._parse_cli_toogle(line, warning_on, alert_on)
# Remove colorization anyway after parsing
line = re.sub(r"(\x1b|\[[0-9][0-9]?m)", "", line)
# Append line to message. Handle the begin of the message case
message_lines.append(line)
# Build message
current_message = "\n".join(
[m for m in message_lines if m not in ["", "|"]]
).strip()
# Message separator just a white line.
# Only if the message if not empty.
if line.strip() not in [""] or current_message.strip() == "":
continue
# End of the message
# Post process message to separate ALERTS into different messages of same status and add rest of the infos to warnings
if (alert_on or warning_on) and any(
s in current_message
for s in ["vulnerabilities identified", "vulnerability identified"]
):
messages_separated = []
msg: List[str] = []
for l in message_lines + ["|"]:
if l.strip() == "|":
messages_separated.append(
"\n".join([m for m in msg if m not in ["", "|"]])
)
msg = []
msg.append(l)
# Append Vulnerabilities messages to ALERTS and other infos in one message
vulnerabilities = [
m for m in messages_separated if "| [!] Title" in m.splitlines()[0]
]
# Add the plugin infos to warnings or false positive if every vulnerabilities are ignore
plugin_infos = "\n".join(
[
m
for m in messages_separated
if "| [!] Title" not in m.splitlines()[0]
]
)
if (
len([v for v in vulnerabilities if not self.is_false_positive(v)])
> 0
and "The version could not be determined" in plugin_infos
):
warnings.append(
plugin_infos + "\nAll known vulnerabilities are listed"
)
else:
messages.append(plugin_infos)
if alert_on:
alerts.extend(vulnerabilities)
elif warning_on:
warnings.extend(vulnerabilities)
elif warning_on:
warnings.append(current_message)
else:
messages.append(current_message)
message_lines = []
current_message = ""
# Reset Toogle Warning/Alert
warning_on, alert_on = False, False
return self._ignore_false_positives(messages, warnings, alerts)
def get_error(self) -> Optional[str]:
if "Scan Aborted" in self.data.get('output', ''):
return "WPScan failed: {}".format(
"\n".join(
line for line in self.data.get('output', '').splitlines() if "Scan Aborted" in line
)
)
else:
return None
def get_results(self) -> WPScanResults:
"""
Returns a dictionnary structure like
::
{
'infos':[],
'warnings':[],
'alerts':[],
'summary':{
'table':None,
'line':'WPScan result summary: alerts={}, warnings={}, infos={}, error={}'
},
'error':None
}
"""
results = WPScanResults()
results["infos"] = self.get_infos()
results["warnings"] = self.get_warnings()
results["alerts"] = self.get_alerts()
results["summary"]["line"] = self.get_summary_line()
results["error"] = self.get_error()
return results
| 8,153 | 2,154 |
from typing import Any, Dict, Union
from django.conf import settings
from django.http.response import HttpResponseBase, HttpResponseRedirect
from django.views.generic import TemplateView
from django.contrib.auth.views import LoginView, redirect_to_login
class LoginView(LoginView):
template_name = "login_form.html"
class IndexView(TemplateView):
template_name = "index.html"
def dispatch(
self, request, *args, **kwargs
) -> Union[HttpResponseRedirect, HttpResponseBase]:
if self.request.user.is_superuser is False: # type: ignore
# no superuser no app
return redirect_to_login("/", "/login/")
return super(IndexView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs) -> Dict[str, Any]:
context = super(IndexView, self).get_context_data(**kwargs)
uri = settings.CENTRIFUGO_HOST.replace("https", "wss").replace("http", "ws")
if settings.CENTRIFUGO_PORT is not None:
uri += f":{settings.CENTRIFUGO_PORT}"
context["uri"] = uri
return context
| 1,095 | 319 |
import pytest
from mcanitexgen.animation.generator import Animation, GeneratorError
def frame(index: int, time: int):
return {"index": index, "time": time}
class Test_append:
def test(self):
anim1 = Animation(0, 10, [frame(0, 10)])
anim2 = Animation(10, 20, [frame(0, 10)])
anim1.append(anim2)
assert anim1 == Animation(0, 20, [frame(0, 10), frame(0, 10)])
@pytest.mark.parametrize(
"anim1, anim2, result",
[
(
Animation(0, 10, [frame(0, 10)]),
Animation(11, 20, [frame(0, 9)]),
Animation(0, 20, [frame(0, 11), frame(0, 9)]),
),
(
Animation(0, 10, [frame(0, 10)]),
Animation(30, 40, [frame(0, 10)]),
Animation(0, 40, [frame(0, 30), frame(0, 10)]),
),
],
)
def test_fill_time_gap_between_animations(
self, anim1: Animation, anim2: Animation, result: Animation
):
anim1.append(anim2)
assert anim1 == result
def test_time_ranges_overlap(self):
anim1 = Animation(0, 10, [frame(0, 10)])
anim2 = Animation(5, 15, [frame(0, 10)])
with pytest.raises(GeneratorError, match=".*starts before the other.*"):
anim1.append(anim2)
class Test_add_frame:
@pytest.mark.parametrize(
"anim, index, start, end, result",
[
(Animation(0, 0), 0, 0, 10, Animation(0, 10, [frame(0, 10)])),
],
)
def test(self, anim: Animation, index, start, end, result):
anim.add_frame(index, start, end)
assert anim == result
@pytest.mark.parametrize(
"anim, index, start, end, result",
[
(Animation(0, 0), 0, 10, 25, Animation(10, 25, [frame(0, 15)])),
(Animation(10, 10), 0, 20, 30, Animation(20, 30, [frame(0, 10)])),
],
)
def test_add_frame_with_start_to_empty_animation(
self, anim: Animation, index, start, end, result
):
anim.add_frame(index, start, end)
assert anim == result
@pytest.mark.parametrize(
"anim, index, start, end, result",
[
(
Animation(0, 10, [frame(0, 10)]),
0,
20,
30,
Animation(0, 30, [frame(0, 20), frame(0, 10)]),
),
(
Animation(20, 40, [frame(0, 5), frame(0, 5)]),
0,
60,
70,
Animation(20, 70, [frame(0, 5), frame(0, 25), frame(0, 10)]),
),
],
)
def test_fill_time_gap(self, anim: Animation, index, start, end, result):
anim.add_frame(index, start, end)
assert anim == result
@pytest.mark.parametrize(
"start, end",
[
(0, 0),
(10, 10),
(12, 11),
(-4, -5),
(-6, -5),
],
)
def test_invalid_start_and_end(self, start, end):
anim = Animation(0, 0)
with pytest.raises(
GeneratorError, match=f"Illegal start and end for frame: '{start}' '{end}'"
):
anim.add_frame(0, start, end)
| 3,220 | 1,201 |
#! /usr/bin/env python
"""
This is a "quick and dirty" solution to getting polarization data through the pipeline.
This script creates new fits files with independent polarization states.
Make sure you have plenty of diskspace.
"""
from __future__ import print_function
import argparse
import os
from time import sleep
from astropy.io import fits
from tqdm import tqdm
parser = argparse.ArgumentParser(description='Process MIRC-X raw data files')
parser.add_argument("--no-warn", action="store_true")
parser.add_argument("--crop-bad", action="store_true")
parser.add_argument("files", nargs="+", help="File(s) to process")
args = parser.parse_args()
if not args.no_warn:
print("Warning: Make sure you have plenty of disk space; this is going to hurt.")
print("(Hint: ^C while you still can! Sleeping 10 seconds for your benefit.)")
sleep(10)
for dir in ["pol1", "pol2"]:
try:
os.mkdir(dir)
except FileExistsError:
if os.path.isdir(dir):
print("Warning: directory `" + dir + "` already exists")
else:
raise FileExistsError("Looks like you have a file named `" + dir + "`; please remove it.")
def polstate(file, state):
f = fits.open(file)
f[0].header["POLSTATE"] = state
f[0].header["CONF_NA"] = "H_PRISM50" # TEMPORARY FIX
rows = f[0].header["CROPROWS"].split(",")
if len(rows) != 2:
raise ValueError("There must be exactly 2 detector regions. Is this a polarization data file?")
span = 1 - eval(rows[0]) # 50-50 chance it should be rows[1]
if state == 1:
f[0].data = f[0].data[:,:,:span,:]
elif state == 2:
if args.crop_bad:
f[0].data = f[0].data[:,:,span:-2,:]
else:
f[0].data = f[0].data[:,:,span:,:]
else:
raise ValueError("`state` (2nd arg of fcn `polstate`) must have the value either 1 or 2")
path = "pol" + str(state) + "/" + file
f.writeto(path)
f.close()
os.system("fpack " + path)
os.remove(path)
for file in tqdm(args.files):
fz = file[-3:] == ".fz"
if fz:
os.system("funpack " + file)
file = file[:-3]
polstate(file, 1)
polstate(file, 2)
if fz:
os.remove(file)
| 2,214 | 761 |
from holoprot.models.trainer import Trainer
| 44 | 14 |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...context import get_context
from ...serialize import Int64Field, KeyField
from ...tiles import TilesError
from ..operands import DataFrameOperand, DataFrameOperandMixin
class HeadOptimizedDataSource(DataFrameOperand, DataFrameOperandMixin):
__slots__ = '_tiled',
# Data source op that optimized for head,
# First, it will try to trigger first_chunk.head() and raise TilesError,
# When iterative tiling is triggered,
# check if the first_chunk.head() meets requirements.
_nrows = Int64Field('nrows')
# for chunk
_first_chunk = KeyField('first_chunk')
@property
def nrows(self):
return self._nrows
@property
def first_chunk(self):
return getattr(self, '_first_chunk', None)
@classmethod
def _tile(cls, op): # pragma: no cover
raise NotImplementedError
@classmethod
def _tile_head(cls, op: "HeadOptimizedDataSource"):
if op.first_chunk is None:
op._tiled = tiled = cls._tile(op)
chunks = tiled[0].chunks
err = TilesError('HeadOrTailOptimizeDataSource requires '
'some dependencies executed first')
op._first_chunk = chunk = chunks[0]
err.partial_tiled_chunks = [chunk.data]
raise err
else:
tiled = op._tiled
chunks = tiled[0].chunks
del op._tiled
ctx = get_context()
chunk_shape = ctx.get_chunk_metas([op.first_chunk.key])[0].chunk_shape
# reset first chunk
op._first_chunk = None
for c in chunks:
c.op._first_chunk = None
if chunk_shape[0] == op.nrows:
# the first chunk has enough data
tiled[0]._nsplits = tuple((s,) for s in chunk_shape)
chunks[0]._shape = chunk_shape
tiled[0]._chunks = chunks[:1]
tiled[0]._shape = chunk_shape
else:
for chunk in tiled[0].chunks:
chunk.op._nrows = None
# otherwise
tiled = [tiled[0].iloc[:op.nrows]._inplace_tile()]
return tiled
@classmethod
def tile(cls, op: "HeadOptimizedDataSource"):
if op.nrows is not None:
return cls._tile_head(op)
else:
return cls._tile(op)
class ColumnPruneSupportedDataSourceMixin(DataFrameOperandMixin):
__slots__ = ()
def get_columns(self): # pragma: no cover
raise NotImplementedError
def set_pruned_columns(self, columns): # pragma: no cover
raise NotImplementedError
| 3,233 | 972 |
# -*- coding: utf-8 -*-
"""
author: pwxcoo
date: 2018-02-05
description: 抓取下载成语并保存
"""
import requests, json
from bs4 import BeautifulSoup
def downloader(url):
"""
下载成语并保存
"""
response = requests.get(url)
if response.status_code != 200:
print(f'{url} is failed!')
return
print(f'{url} is parsing')
html = BeautifulSoup(response.content.decode('gbk', errors='ignore'), "lxml")
table = html.find_all('table')[-2]
prefix = 'http://www.zd9999.com'
words = [prefix + a.get('href') for a in table.find_all('a')]
res = []
for i in range(0, len(words)):
response = requests.get(words[i])
print(f'{[words[i]]} is parsing')
if response.status_code != 200:
print(f'{words[i]} is failed!')
continue
wordhtml = BeautifulSoup(response.content.decode('gbk', errors='ignore'), "lxml")
explanation = wordhtml.find_all('table')[-3].find_all('tr')
res.append({'word':explanation[0].text.strip(),\
'pinyin': explanation[1].find_all('tr')[0].find_all('td')[1].text.strip(),\
'explanation': explanation[1].find_all('tr')[1].find_all('td')[1].text.strip(),\
'derivation': explanation[1].find_all('tr')[2].find_all('td')[1].text.strip(),\
'example': explanation[1].find_all('tr')[3].find_all('td')[1].text.strip()})
return res
if __name__ == '__main__':
res = downloader('http://www.zd9999.com/cy/')
for i in range(2, 199):
res += downloader(f'http://www.zd9999.com/cy/index_{i}.htm')
print(len(res))
with open('chengyu.json', mode='w+', encoding='utf-8') as json_file:
json.dump(res, json_file, ensure_ascii=False) | 1,759 | 647 |
import os
import sys
import copy
import ipdb
import json
import mat73
import numpy as np
import scipy.io as sio
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from lib.camera.camera import CameraInfoPacket, catesian2homogenous
rot = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]]) # rotate along the x axis 90 degrees
mpii_3dhp_cameras_intrinsic_params = [
{
'id': '0',
'center': [1024.704, 1051.394],
'focal_length': [1497.693, 1497.103],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '1',
'center': [1030.519, 1052.626],
'focal_length': [1495.217, 1495.52],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '2',
'center': [983.8873, 987.5902],
'focal_length': [1495.587, 1497.828],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '3',
'center': [1029.06, 1041.409],
'focal_length': [1495.886, 1496.033],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '4',
'center': [987.6075, 1019.069],
'focal_length': [1490.952, 1491.108],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '5',
'center': [1012.331, 998.5009],
'focal_length': [1500.414, 1499.971],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '6',
'center': [999.7319, 1010.251],
'focal_length': [1498.471, 1498.8],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '7',
'center': [987.2716, 976.8773],
'focal_length': [1498.831, 1499.674],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '8',
'center': [1017.387, 1043.032],
'focal_length': [1500.172, 1500.837],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '9',
'center': [1010.423, 1037.096],
'focal_length': [1501.554, 1501.9],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '10',
'center': [1041.614, 997.0433],
'focal_length': [1498.423, 1498.585],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '11',
'center': [1009.802, 999.9984],
'focal_length': [1495.779, 1493.703],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '12',
'center': [1000.56, 1014.975],
'focal_length': [1501.326, 1501.491],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '13',
'center': [1005.702, 1004.214],
'focal_length': [1496.961, 1497.378],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
}
]
mpii_3dhp_cameras_extrinsic_params = [
{
'translation': [-0.5628666, 1.3981379999999999, 3.852623],
'R':
[
[0.9650164, 0.00488022, 0.262144],
[-0.004488356, -0.9993728, 0.0351275],
[0.262151, -0.03507521, -0.9643893]
]
},
{
'translation': [-1.429856, 0.7381779, 4.897966],
'R':
[
[0.6050639, -0.02184232, 0.7958773],
[-0.22647, -0.9630526, 0.1457429],
[0.7632883, -0.2684261, -0.587655]
]
},
{
'translation': [0.05725702, 1.307287, 2.7998220000000003],
'R':
[
[-0.3608179, -0.009492658, 0.932588],
[-0.0585942, -0.9977421, -0.03282591],
[0.9307939, -0.06648842, 0.359447]
]
},
{
'translation': [-0.2848168, 0.8079184, 3.1771599999999998],
'R':
[
[-0.0721105, -0.04817664, 0.9962325],
[-0.4393254, -0.8951841, -0.07508985],
[0.895429, -0.443085, 0.04338695]
]
},
{
'translation': [-1.563911, 0.8019607999999999, 3.5173159999999997],
'R':
[
[0.3737275, 0.09688602, 0.9224646],
[-0.009716132, -0.9940662, 0.1083427],
[0.9274878, -0.04945343, -0.3705685]
]
},
{
'translation': [0.35841340000000005, 0.9945657999999999, 3.439832],
'R':
[
[-0.3521056, 0.01328985, -0.9358659],
[-0.04961938, -0.9987582, 0.004485628],
[-0.9346441, 0.0480165, 0.3523278]
]
},
{
'translation': [0.5694388, 0.528871, 3.6873690000000003],
'R':
[
[-0.9150326, -0.04843184, 0.4004618],
[-0.1804886, -0.8386868, -0.5138369],
[0.3607481, -0.5424563, 0.7586845]
]
},
{
'translation': [1.378866, 1.270781, 2.631567],
'R':
[
[-0.9995936, 0.02847456, 0.001368653],
[-0.02843213, -0.9992908, 0.0246889],
[0.002070688, 0.02463995, 0.9996943]
]
},
{
'translation': [0.2213543, 0.65987, 3.644688],
'R':
[
[0.000575281, 0.06160985, -0.9981001],
[0.2082146, -0.9762325, -0.06013997],
[-0.978083, -0.2077844, -0.01338968]
]
},
{
'translation': [0.38862169999999996, 0.1375452, 4.216635],
'R':
[
[0.04176839, 0.00780962, -0.9990969],
[0.5555364, -0.831324, 0.01672664],
[-0.8304425, -0.5557333, -0.03906159]
]
},
{
'translation': [1.167962, 0.6176362000000001, 4.472351],
'R':
[
[-0.8970265, 0.1361548, -0.4204822],
[0.09417118, -0.8706428, -0.4828178],
[-0.4318278, -0.4726976, 0.7681679]
]
},
{
'translation': [0.1348272, 0.2515094, 4.570244],
'R':
[
[0.9170455, 0.1972746, -0.3465695],
[0.1720879, 0.5882171, 0.7901813],
[0.3597408, -0.7842726, 0.5054733]
]
},
{
'translation': [0.4124695, 0.5327588, 4.887095],
'R':
[
[-0.7926738, 0.1323657, 0.5951031],
[-0.396246, 0.6299778, -0.66792],
[-0.4633114, -0.7652499, -0.4469175]
]
},
{
'translation': [0.8671278, 0.8274571999999999, 3.985159],
'R':
[
[-0.8701088, -0.09522671, -0.4835728],
[0.4120245, 0.3978655, -0.8197188],
[0.270456, -0.9124883, -0.3069505]
]
}
]
subjects = [
'S1_Seq1_0', 'S1_Seq1_1', 'S1_Seq1_2', 'S1_Seq1_3', 'S1_Seq1_4', 'S1_Seq1_5', 'S1_Seq1_6', 'S1_Seq1_7',
'S1_Seq1_8', 'S1_Seq1_9', 'S1_Seq1_10', 'S1_Seq1_11', 'S1_Seq1_12', 'S1_Seq1_13', 'S1_Seq2_0', 'S1_Seq2_1',
'S1_Seq2_2', 'S1_Seq2_3', 'S1_Seq2_4', 'S1_Seq2_5', 'S1_Seq2_6', 'S1_Seq2_7', 'S1_Seq2_8', 'S1_Seq2_9',
'S1_Seq2_10', 'S1_Seq2_11', 'S1_Seq2_12', 'S1_Seq2_13', 'S2_Seq1_0', 'S2_Seq1_1', 'S2_Seq1_2', 'S2_Seq1_3',
'S2_Seq1_4', 'S2_Seq1_5', 'S2_Seq1_6', 'S2_Seq1_7', 'S2_Seq1_8', 'S2_Seq1_9', 'S2_Seq1_10', 'S2_Seq1_11',
'S2_Seq1_12', 'S2_Seq1_13', 'S2_Seq2_0', 'S2_Seq2_1', 'S2_Seq2_2', 'S2_Seq2_3', 'S2_Seq2_4', 'S2_Seq2_5',
'S2_Seq2_6', 'S2_Seq2_7', 'S2_Seq2_8', 'S2_Seq2_9', 'S2_Seq2_10', 'S2_Seq2_11', 'S2_Seq2_12', 'S2_Seq2_13',
'S3_Seq1_0', 'S3_Seq1_1', 'S3_Seq1_2', 'S3_Seq1_3', 'S3_Seq1_4', 'S3_Seq1_5', 'S3_Seq1_6', 'S3_Seq1_7',
'S3_Seq1_8', 'S3_Seq1_9', 'S3_Seq1_10', 'S3_Seq1_11', 'S3_Seq1_12', 'S3_Seq1_13', 'S3_Seq2_0', 'S3_Seq2_1',
'S3_Seq2_2', 'S3_Seq2_3', 'S3_Seq2_4', 'S3_Seq2_5', 'S3_Seq2_6', 'S3_Seq2_7', 'S3_Seq2_8', 'S3_Seq2_9',
'S3_Seq2_10', 'S3_Seq2_11', 'S3_Seq2_12', 'S3_Seq2_13', 'S4_Seq1_0', 'S4_Seq1_1', 'S4_Seq1_2', 'S4_Seq1_3',
'S4_Seq1_4', 'S4_Seq1_5', 'S4_Seq1_6', 'S4_Seq1_7', 'S4_Seq1_8', 'S4_Seq1_9', 'S4_Seq1_10', 'S4_Seq1_11',
'S4_Seq1_12', 'S4_Seq1_13', 'S4_Seq2_0', 'S4_Seq2_1', 'S4_Seq2_2', 'S4_Seq2_3', 'S4_Seq2_4', 'S4_Seq2_5',
'S4_Seq2_6', 'S4_Seq2_7', 'S4_Seq2_8', 'S4_Seq2_9', 'S4_Seq2_10', 'S4_Seq2_11', 'S4_Seq2_12', 'S4_Seq2_13',
'S5_Seq1_0', 'S5_Seq1_1', 'S5_Seq1_2', 'S5_Seq1_3', 'S5_Seq1_4', 'S5_Seq1_5', 'S5_Seq1_6', 'S5_Seq1_7',
'S5_Seq1_8', 'S5_Seq1_9', 'S5_Seq1_10', 'S5_Seq1_11', 'S5_Seq1_12', 'S5_Seq1_13', 'S5_Seq2_0', 'S5_Seq2_1',
'S5_Seq2_2', 'S5_Seq2_3', 'S5_Seq2_4', 'S5_Seq2_5', 'S5_Seq2_6', 'S5_Seq2_7', 'S5_Seq2_8', 'S5_Seq2_9',
'S5_Seq2_10', 'S5_Seq2_11', 'S5_Seq2_12', 'S5_Seq2_13', 'S6_Seq1_0', 'S6_Seq1_1', 'S6_Seq1_2', 'S6_Seq1_3',
'S6_Seq1_4', 'S6_Seq1_5', 'S6_Seq1_6', 'S6_Seq1_7', 'S6_Seq1_8', 'S6_Seq1_9', 'S6_Seq1_10', 'S6_Seq1_11',
'S6_Seq1_12', 'S6_Seq1_13', 'S6_Seq2_0', 'S6_Seq2_1', 'S6_Seq2_2', 'S6_Seq2_3', 'S6_Seq2_4', 'S6_Seq2_5',
'S6_Seq2_6', 'S6_Seq2_7', 'S6_Seq2_8', 'S6_Seq2_9', 'S6_Seq2_10', 'S6_Seq2_11', 'S6_Seq2_12', 'S6_Seq2_13',
'S7_Seq1_0', 'S7_Seq1_1', 'S7_Seq1_2', 'S7_Seq1_3', 'S7_Seq1_4', 'S7_Seq1_5', 'S7_Seq1_6', 'S7_Seq1_7',
'S7_Seq1_8', 'S7_Seq1_9', 'S7_Seq1_10', 'S7_Seq1_11', 'S7_Seq1_12', 'S7_Seq1_13', 'S7_Seq2_0', 'S7_Seq2_1',
'S7_Seq2_2', 'S7_Seq2_3', 'S7_Seq2_4', 'S7_Seq2_5', 'S7_Seq2_6', 'S7_Seq2_7', 'S7_Seq2_8', 'S7_Seq2_9',
'S7_Seq2_10', 'S7_Seq2_11', 'S7_Seq2_12', 'S7_Seq2_13', 'S8_Seq1_0', 'S8_Seq1_1', 'S8_Seq1_2', 'S8_Seq1_3',
'S8_Seq1_4', 'S8_Seq1_5', 'S8_Seq1_6', 'S8_Seq1_7', 'S8_Seq1_8', 'S8_Seq1_9', 'S8_Seq1_10', 'S8_Seq1_11',
'S8_Seq1_12', 'S8_Seq1_13', 'S8_Seq2_0', 'S8_Seq2_1', 'S8_Seq2_2', 'S8_Seq2_3', 'S8_Seq2_4', 'S8_Seq2_5',
'S8_Seq2_6', 'S8_Seq2_7', 'S8_Seq2_8', 'S8_Seq2_9', 'S8_Seq2_10', 'S8_Seq2_11', 'S8_Seq2_12', 'S8_Seq2_13',
'TS1', 'TS3', 'TS4'
]
camera_params = dict()
for sbj in subjects:
if sbj.startswith('S'):
subject, seq, cid = sbj.split('_')
cid = int(cid)
camera_meta = dict()
camera_meta.update(mpii_3dhp_cameras_extrinsic_params[cid])
camera_meta.update(mpii_3dhp_cameras_intrinsic_params[cid])
camera_params[sbj] = [camera_meta]
if sbj.startswith('T'):
camera_meta = dict()
camera_meta.update(mpii_3dhp_cameras_extrinsic_params[8])
camera_meta.update(mpii_3dhp_cameras_intrinsic_params[8])
camera_params[sbj] = [camera_meta]
def read_ann(ann_file, mode):
"""
:param ann_file:
:param mode:
:return:
"""
if mode == 'train':
return sio.loadmat(ann_file)
if mode == 'test':
return mat73.loadmat(ann_file)
def read_cali(cali_file, vid_idx, mode):
"""
:param cali_file:
:param vid_idx:
:return:
"""
Ks, Rs, Ts = [], [], []
if mode == 'train':
file = open(cali_file, 'r')
content = file.readlines()
for vid_i in vid_idx:
K = np.array([float(s) for s in content[vid_i * 7 + 5][11:-2].split()])
K = np.reshape(K, (4, 4))[:3, :3]
RT = np.array([float(s) for s in content[vid_i * 7 + 6][11:-2].split()])
RT = np.reshape(RT, (4, 4))
R = RT[:3, :3]
R = R @ np.linalg.inv(rot)
T = RT[:3, 3] / 1000 # mm to m
Ks.append(K)
Rs.append(R)
Ts.append(T)
if mode == 'test':
raise NotImplementedError
return Ks, Rs, Ts
if __name__ == '__main__':
# REFERENCE: https://github.com/nkolot/SPIN/blob/master/datasets/preprocess/mpi_inf_3dhp.py
data_root = '/ssd/yzhan/data/benchmark/3D/mpi_inf_3dhp'
res_w = 2048
res_h = 2048
# train
train_subjects = ['S1', 'S2', 'S3', 'S4', 'S5', 'S6', 'S7', 'S8']
sequences = ['Seq1', 'Seq2']
video_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
train_kpt_idx = [4, 23, 24, 25, 18, 19, 20, 3, 5, 6, 7, 9, 10, 11, 14, 15, 16]
# test
test_subjects = ['TS1', 'TS3', 'TS4'] # drop TS2, due to inaccurate extrinsic
test_kpt_idx = [14, 8, 9, 10, 11, 12, 13, 15, 1, 16, 0, 5, 6, 7, 2, 3, 4]
METADATA = {
'layout': '3dhp',
'num_joints': 17,
'keypoints_symmetry': [[4, 5, 6, 11, 12, 13], [1, 2, 3, 14, 15, 16]]
}
data_3d = {}
data_2d = {}
intrinsics = {}
extrinsics = {}
for sbj in train_subjects:
for seq in sequences:
ann_meta = read_ann(os.path.join(data_root, sbj, seq, 'annot.mat'), mode='train')
valid_cameras = ann_meta['cameras'].reshape(-1).tolist()
valid_frames = ann_meta['frames'].reshape(-1).tolist()
kpts_2d = ann_meta['annot2']
kpts_3d = ann_meta['annot3']
Ks, Rs, Ts = read_cali(os.path.join(data_root, sbj, seq, 'camera.calibration'), video_list, mode='train')
assert len(Ks) == len(Rs) == len(Ts) == len(valid_cameras), 'camera miss match'
for cam_idx in valid_cameras:
subject = '{}_{}_{}'.format(sbj, seq, cam_idx)
joints_2d = kpts_2d[cam_idx, 0][:len(valid_frames)].reshape(len(valid_frames), -1, 2)[:, train_kpt_idx]
joints_3d = kpts_3d[cam_idx, 0][:len(valid_frames)].reshape(len(valid_frames), -1, 3)[:, train_kpt_idx]
joints_3d /= 1000 # mm to m
valid_joints_2d = list()
valid_joints_3d = list()
valid_file_names = list()
num_invalid_frame = 0
for frame_idx in range(len(valid_frames)):
joint_2d = joints_2d[frame_idx]
joint_3d = joints_3d[frame_idx]
x_in = np.logical_and(joint_2d[:, 0] < res_w, joint_2d[:, 0] >= 0)
y_in = np.logical_and(joint_2d[:, 1] < res_h, joint_2d[:, 1] >= 0)
ok_pts = np.logical_and(x_in, y_in)
if np.sum(ok_pts) < len(train_kpt_idx):
num_invalid_frame += 1
continue
frame_name = os.path.join(data_root, sbj, seq, 'imageSequence',
'video_{}'.format(cam_idx), 'img_%06d.jpg' % (frame_idx + 1))
if not os.path.exists(frame_name):
num_invalid_frame += 1
continue
valid_joints_2d.append(joint_2d)
valid_joints_3d.append(joint_3d)
valid_file_names.append('img_%06d.jpg' % (frame_idx + 1))
print('sbj -> {}, seq -> {}, camera -> {}, total frames -> {}, invalid frames -> {}'.format(
sbj, seq, cam_idx, len(valid_frames), num_invalid_frame)
)
valid_joints_2d = np.array(valid_joints_2d)
valid_joints_3d = np.array(valid_joints_3d)
assert valid_joints_2d.shape[0] == valid_joints_3d.shape[0] == len(valid_frames) - num_invalid_frame
data_3d.setdefault(subject, dict())
data_3d[subject].setdefault('Action', list())
data_3d[subject]['Action'] = valid_joints_3d
data_2d.setdefault(subject, dict())
data_2d[subject].setdefault('Action', list())
data_2d[subject]['Action'].append(
{
'file_name': valid_file_names,
'positions_2d': valid_joints_2d
}
)
intrinsics.setdefault(subject, [Ks[cam_idx].tolist()])
extrinsics.setdefault(subject, [Rs[cam_idx].tolist(), Ts[cam_idx].tolist()])
for sbj in test_subjects:
ann_meta = read_ann(os.path.join(data_root, sbj, 'annot_data.mat'), mode='test')
valid_frames = ann_meta['valid_frame'].reshape(-1).tolist()
kpts_2d = ann_meta['annot2'].transpose(2, 1, 0)[:, test_kpt_idx]
kpts_3d = ann_meta['annot3'].transpose(2, 1, 0)[:, test_kpt_idx]
kpts_3d /= 1000 # mm to m
valid_joints_2d = list()
valid_joints_3d = list()
valid_file_names = list()
num_invalid_frame = 0
for frame_idx, flag in enumerate(valid_frames):
if flag == 0:
num_invalid_frame += 1
continue
joint_2d = kpts_2d[frame_idx]
joint_3d = kpts_3d[frame_idx]
x_in = np.logical_and(joint_2d[:, 0] < res_w, joint_2d[:, 0] >= 0)
y_in = np.logical_and(joint_2d[:, 1] < res_h, joint_2d[:, 1] >= 0)
ok_pts = np.logical_and(x_in, y_in)
if np.sum(ok_pts) < len(train_kpt_idx):
num_invalid_frame += 1
continue
frame_name = os.path.join(data_root, sbj, 'imageSequence', 'img_%06d.jpg' % (frame_idx + 1))
if not os.path.exists(frame_name):
num_invalid_frame += 1
continue
valid_joints_2d.append(joint_2d)
valid_joints_3d.append(joint_3d)
valid_file_names.append('img_%06d.jpg' % (frame_idx + 1))
print('sbj -> {}, total frames -> {}, invalid frames -> {}'.format(
sbj, len(valid_frames), num_invalid_frame)
)
valid_joints_2d = np.array(valid_joints_2d)
valid_joints_3d = np.array(valid_joints_3d)
try:
assert valid_joints_2d.shape[0] == valid_joints_3d.shape[0] == len(valid_frames) - num_invalid_frame
except:
ipdb.set_trace()
data_3d.setdefault(sbj, dict())
data_3d[sbj].setdefault('Action', list())
data_3d[sbj]['Action'] = valid_joints_3d
data_2d.setdefault(sbj, dict())
data_2d[sbj].setdefault('Action', list())
data_2d[sbj]['Action'].append(
{
'file_name': valid_file_names,
'positions_2d': valid_joints_2d
}
)
_cameras = copy.deepcopy(camera_params)
for cameras in _cameras.values():
for i, cam in enumerate(cameras):
for k, v in cam.items():
if k not in ['id', 'res_w', 'res_h']:
cam[k] = np.array(v, dtype='float32')
camera_info = dict()
for subject in _cameras:
camera_info.setdefault(subject, list())
for cam in _cameras[subject]:
if 'translation' not in cam:
continue
K = np.eye(3, dtype=np.float)
K[0, 0] = cam['focal_length'][0]
K[1, 1] = cam['focal_length'][1]
K[0, 2] = cam['center'][0]
K[1, 2] = cam['center'][1]
R = cam['R']
t = np.array(cam['translation'], dtype=np.float64).reshape(3, 1)
camera_info[subject].append(CameraInfoPacket(P=None, K=K, R=R, t=t,
res_w=cam['res_w'], res_h=cam['res_h'],
azimuth=cam['azimuth'],
dist_coeff=None, undistort=False))
new_camera_info = dict()
for subject in _cameras:
new_camera_info.setdefault(subject, list())
for cam in _cameras[subject]:
if 'translation' not in cam:
continue
K = np.eye(3, dtype=np.float)
K[0, 0] = cam['focal_length'][0]
K[1, 1] = cam['focal_length'][1]
K[0, 2] = cam['center'][0]
K[1, 2] = cam['center'][1]
R = cam['R']
R = R @ np.linalg.inv(rot)
t = np.array(cam['translation'], dtype=np.float64).reshape(3, 1)
if subject.startswith('S'):
cid = int(subject.split('_')[-1])
else:
cid = 8
try:
assert np.sum(K - Ks[cid]) < 1e-3
except:
ipdb.set_trace()
assert np.sum(R - Rs[cid]) < 1e-6
assert np.sum(t.reshape(3) - Ts[cid]) < 1e-6
new_camera_info[subject].append(CameraInfoPacket(P=None, K=K, R=R, t=t,
res_w=cam['res_w'], res_h=cam['res_h'],
azimuth=cam['azimuth'],
dist_coeff=None, undistort=False))
for ky in subjects:
joint_2d, file_names = data_2d[ky]['Action'][0]['positions_2d'], data_2d[ky]['Action'][0]['file_name']
joint_3d = data_3d[ky]['Action']
cam = camera_info[ky][0]
new_cam = new_camera_info[ky][0]
world_3d = cam.camera2world(joint_3d)
world_3d_update = world_3d.copy()
for idx in range(world_3d.shape[0]):
world_3d_update[idx] = (rot @ world_3d[idx].T).T
projected_2d = new_cam.project(catesian2homogenous(world_3d_update))
error = np.sum(joint_2d - projected_2d)
print('{} error: {}'.format(ky, error/world_3d_update.shape[0]))
data_3d[ky]['Action'] = world_3d_update
np.savez(os.path.join(data_root, 'data_2d_3dhp_gt.npz'), metadata=METADATA, positions_2d=data_2d)
np.savez(os.path.join(data_root, 'data_3d_3dhp.npz'), positions_3d=data_3d)
json.dump(intrinsics, open(os.path.join(data_root, 'intrinsic.json'), 'w'), indent=4)
json.dump(extrinsics, open(os.path.join(data_root, 'extrinsic.json'), 'w'), indent=4) | 21,563 | 10,409 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.LeadsOrderInfo import LeadsOrderInfo
class KoubeiServindustryLeadsRecordBatchqueryResponse(AlipayResponse):
def __init__(self):
super(KoubeiServindustryLeadsRecordBatchqueryResponse, self).__init__()
self._leads_order_info_list = None
self._total_count = None
@property
def leads_order_info_list(self):
return self._leads_order_info_list
@leads_order_info_list.setter
def leads_order_info_list(self, value):
if isinstance(value, list):
self._leads_order_info_list = list()
for i in value:
if isinstance(i, LeadsOrderInfo):
self._leads_order_info_list.append(i)
else:
self._leads_order_info_list.append(LeadsOrderInfo.from_alipay_dict(i))
@property
def total_count(self):
return self._total_count
@total_count.setter
def total_count(self, value):
self._total_count = value
def parse_response_content(self, response_content):
response = super(KoubeiServindustryLeadsRecordBatchqueryResponse, self).parse_response_content(response_content)
if 'leads_order_info_list' in response:
self.leads_order_info_list = response['leads_order_info_list']
if 'total_count' in response:
self.total_count = response['total_count']
| 1,512 | 474 |
from django.contrib.admin.filters import RelatedFieldListFilter, AllValuesFieldListFilter
from django.db import models
from django.db.models.query_utils import Q
from django.utils.translation import ugettext as _
from django.utils.encoding import smart_unicode
class CellFilter(object):
title = ""
menu_labels = {'lt': _('Less than'),
'gt': _('Greater than'),
'lte': _('Less or equals than'),
'gte': _('Greater or equals than'),
'exact': _('Equals to'),
'not': _('Not equals to'),
'rem': _('Remove filter')}
def __init__(self, field, request, params, model, model_admin, field_path, column=None):
self.column = column or field_path or field.name
self.col_operators = model_admin.cell_filter_operators.get(field.name, ('exact', 'not'))
self.seed = field_path
def __repr__(self):
return "<%s for `%s` as %s>" % (self.__class__.__name__, self.column, id(self))
def is_active(self, cl):
active_filters = cl.params.keys()
for x in self.expected_parameters():
if x in active_filters:
return True
return False
def has_output(self):
return True
def get_menu_item_for_op(self, op):
return CellFilter.menu_labels.get(op), '%s__%s' % (self.seed, op)
def expected_parameters(self):
expected_parameters = []
for op in self.col_operators:
filter = '%s__%s' % (self.seed, op)
expected_parameters.append(filter)
return expected_parameters
class ChoicesCellFilter(CellFilter, AllValuesFieldListFilter):
pass
class BooleanCellFilter(CellFilter, AllValuesFieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path, column=None):
self.col_operators = model_admin.cell_filter_operators.get(field.name, ('exact', 'not'))
super(BooleanCellFilter, self).__init__(field, request, params, model, model_admin, field_path, column)
def get_menu_item_for_op(self, op):
if op in ('exact', ''):
return _('Yes'), self.seed
else:
return _('No'), '%s__not' % self.seed
def expected_parameters(self):
expected_parameters = []
ops = [op for op in self.col_operators if op != 'exact']
expected_parameters.append(self.seed)
for op in ops:
filter = '%s__%s' % (self.seed, op)
expected_parameters.append(filter)
return expected_parameters
class FieldCellFilter(CellFilter, AllValuesFieldListFilter):
def get_menu_item_for_op(self, op):
if op == 'exact':
return CellFilter.menu_labels.get(op), self.seed
return CellFilter.menu_labels.get(op), '%s__%s' % (self.seed, op)
def expected_parameters(self):
expected_parameters = []
ops = [op for op in self.col_operators if op != 'exact']
expected_parameters.append(self.seed)
for op in ops:
filter = '%s__%s' % (self.seed, op)
expected_parameters.append(filter)
return expected_parameters
class RelatedFieldCellFilter(RelatedFieldListFilter, CellFilter):
def __init__(self, field, request, params, model, model_admin, field_path, column=None):
super(RelatedFieldCellFilter, self).__init__(field, request, params, model, model_admin, field_path)
self.column = column or field_path or field.name
self.col_operators = model_admin.cell_filter_operators.get(field.name, ('exact', 'not'))
self.seed = "__".join(self.lookup_kwarg.split('__')[:-1])
class AllValuesComboFilter(AllValuesFieldListFilter):
template = 'iadmin/filters/combobox.html'
class RelatedFieldComboFilter(RelatedFieldListFilter):
template = 'iadmin/filters/fieldcombobox.html'
class RelatedFieldRadioFilter(RelatedFieldListFilter):
template = 'iadmin/filters/fieldradio.html'
class RelatedFieldCheckBoxFilter(RelatedFieldListFilter):
template = 'iadmin/filters/fieldcheckbox.html'
def __init__(self, field, request, params, model, model_admin, field_path):
super(RelatedFieldCheckBoxFilter, self).__init__(field, request, params, model, model_admin, field_path)
self.lookup_val = request.GET.getlist(self.lookup_kwarg, [])
def queryset(self, request, queryset):
if not len(self.lookup_val):
return queryset
filters = []
for val in self.lookup_val:
filters.append(Q(**{self.lookup_kwarg: val}))
query = filters.pop()
for item in filters:
query |= item
return queryset.filter(query)
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {
'selected': not len(self.lookup_val) and not self.lookup_val_isnull,
'query_string': cl.get_query_string({},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
yield {
'selected': self.lookup_val_isnull,
'query_string': cl.get_query_string({self.lookup_kwarg_isnull: 1},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('None'),
}
for pk_val, val in self.lookup_choices:
yield {
'selected': smart_unicode(pk_val) in self.lookup_val,
'query_string': cl.get_query_string({
self.lookup_kwarg: pk_val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if (isinstance(self.field, models.related.RelatedObject)
and self.field.field.null or hasattr(self.field, 'rel')
and self.field.null):
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': EMPTY_CHANGELIST_VALUE,
}
| 6,125 | 1,817 |
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
import settings
engine = create_engine(settings.API_DATA)
db_session = scoped_session(sessionmaker(bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
| 332 | 101 |
# https://www.hackerrank.com/challenges/np-arrays/problem
import numpy
def arrays(arr):
return numpy.array(arr, float)[::-1]
| 132 | 50 |
# -*- coding:utf-8 -*-
"""
PyCfg - R. Souweine, 2016.
"""
import sys
import os
import ConfigParser
from datetime import datetime
class Cfg():
def __init__(self, cfg_file, cfg_sections, debug=False):
"""
Lecture d'un fichier de configuation type.
"""
if os.path.isfile(cfg_file):
self.cfg_file = cfg_file
self.cfg_sections = cfg_sections
self.config = ConfigParser.ConfigParser()
self.config.read(cfg_file)
self.debug = debug
# Dictionnaire des paramètres des sections de config désirées
config_params = {}
for section in self.config.sections():
if section in self.cfg_sections:
config_params.update(self.ConfigSectionMap(self.config, section))
# Passage des paramètres en attribut de l'objet Cfg
for key in config_params:
setattr(self, key, config_params[key])
else:
self.error("%s is doesn't exists." % cfg_file, exit=True)
if self.debug is True:
print "Fichier de configuration lu."
def ConfigSectionMap(self, config_object, section):
"""
Mapping des sections d'un fichier de config.
"""
section_dict = {}
options = config_object.options(section)
for option in options:
try:
section_dict[option] = config_object.get(section, option)
if section_dict[option] == -1:
self.warning("skip: %s" % option)
except:
self.warning("exception on %s!" % option)
section_dict[option] = None
return section_dict
def show_config(self):
parametres_caches = ("cfg_file", "cfg_sections", "config", "debug", "None")
print "Paramètres de configuration"
print "----------------------------"
for key in self.__dict__:
if key not in (parametres_caches) and key is not None:
print key, "-", self.__dict__[key]
print ""
def info(self, msg):
print "%s PgDb > %s" % (datetime.now().strftime('%Y-%m-%d %H:%M'), msg)
def warning(self, msg):
print "%s PgDb > WARNING: %s" % (datetime.now().strftime('%Y-%m-%d %H:%M'), msg)
def error(self, msg, exit=False):
print "%s PgDb > ERROR: %s" % (datetime.now().strftime('%Y-%m-%d %H:%M'), msg)
if exit is True:
sys.exit()
if __name__ == "__main__":
c = Cfg("configs/global.cfg", ["inv", "general"])
print c.show_config() | 2,738 | 799 |
from .utils import get_filters, get_values, get_relations, set_values, set_relations
def default_resolve(schema, model, data, **kwargs):
filters = get_filters(schema, data)
return model.objects.filter(**filters)
def default_execute(schema, data, raw_data, **kwargs):
values = get_values(schema, raw_data)
relations = get_relations(schema, raw_data)
for model in data:
set_values(model, values)
set_relations(model, relations)
model.save()
return data
| 504 | 155 |
from math import ceil, floor, trunc
x = 1.4
y = 2.6
print(floor(x), floor(y))
print(floor(-x), floor(-y))
print(ceil(x), ceil(y))
print(ceil(-x), ceil(-y))
print(trunc(x), trunc(y))
print(trunc(-x), trunc(-y))
| 212 | 97 |
#!/usr/bin/env python
import requests
import json
store = 'http://localhost:8080/source'
print "post a document"
doc = {'name': 'tom'}
headers = {'Content-Type': 'application/json'}
r = requests.post(store, headers=headers, data=json.dumps(doc))
print r.status_code
docid = r.json()['id']
revid = r.json()['rev']
print "get the document"
r = requests.get(store + '/' + docid)
print r.status_code
print "delete the document"
r = requests.delete(store + '/' + docid + '?rev=' + revid)
print r.status_code
| 513 | 186 |
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.distributions import seed_stream
import tensorflow_probability as tfp
import mvg_distributions.covariance_representations as cov_rep
from mvg_distributions.gamma import SqrtGamma
tfd = tfp.distributions
tfb = tfp.bijectors
class SqrtGammaGaussian(tfd.Distribution):
def __init__(self, df, log_diag_scale, add_mode_correction=False, validate_args=False, allow_nan_stats=True,
name="SqrtGammaGaussian"):
"""
Square root Gamma-Gaussian distribution, this is equivalent to a Cholesky-Wishart distribution with a
diagonal scale matrix. Thus it has the same hyper-parameters, as a the Cholesky-Wishart distribution.
This distribution expects as input Cholesky Precision matrices. Moreover, it assumes that the diagonal elements
in the matrix are log(values).
Args:
The distribution is defined for batch (b) of M (pxp) matrices, forming a tensor of [b, p, p]
df: degrees of freedom, a tensor of [b], the values in it must be df > p - 1
log_diag_scale: a tensor of [b, p] with the log diagonal values of the matrix S
add_mode_correction: bool, if using the distribution as a prior, setting this to True will add
a correction factor to log_diag_scale, such that the log_prob will have the maximum in S
validate_args:
allow_nan_stats:
name:
"""
parameters = locals()
with tf.name_scope(name=name):
df = tf.convert_to_tensor(df)
log_diag_scale = tf.convert_to_tensor(log_diag_scale)
assert df.shape.ndims == 1
assert log_diag_scale.shape.ndims == 2
self._df = df
self._log_diag_scale = log_diag_scale
graph_parents = [df, log_diag_scale]
self.p = self.log_diag_scale.shape[1].value
if self.p is None:
self.p = tf.shape(self.log_diag_scale)[1]
self._mode_correction_factor(add_mode_correction)
self._sqrt_gamma_dist = None
self._normal_dist = None
super().__init__(dtype=self.df.dtype, reparameterization_type=tf.distributions.FULLY_REPARAMETERIZED,
validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters,
graph_parents=graph_parents, name=name)
@property
def sqrt_gamma_dist(self):
if self._sqrt_gamma_dist is None:
half_df = 0.5 * self.df # [b]
# 0.0 to 0.5 - 0.5 p, then add 0.5 * df to all
a = np.linspace(0.0, 0.5 - 0.5 * self.p, self.p, dtype=np.float32) # [n]
a = a[np.newaxis, :] + half_df[:, tf.newaxis] # [b, n]
b = 0.5 / tf.exp(self.log_diag_scale) # [b, n]
self._sqrt_gamma_dist = SqrtGamma(concentration=a, rate=b)
return self._sqrt_gamma_dist
@property
def normal_dist(self):
if self._normal_dist is None:
sqrt_diag_scale = tf.exp(0.5 * self.log_diag_scale)
sqrt_diag_scale = tf.tile(sqrt_diag_scale[:, :, tf.newaxis], (1, 1, self.p)) # [b, n, n]
self._normal_dist = tfd.Normal(loc=0, scale=sqrt_diag_scale) # [b, n, n]
return self._normal_dist
@property
def log_diag_scale(self):
return self._log_diag_scale
@property
def df(self):
return self._df
def _mode_correction_factor(self, add_mode_correction):
if add_mode_correction:
# corrected_diag_scale = diag_scale * ((p - 1)/(tf.range(p) * (1 - p) + (p - 1) * (df - 1)))
correction_factor = tf.log(self.p - 1.)
p_range = tf.range(self.p, dtype=self._log_diag_scale.dtype)[tf.newaxis, :]
correction_factor -= tf.log(p_range * (1. - self.p) + (self.p - 1.) * (self.df[:, tf.newaxis] - 1.))
self._log_diag_scale += correction_factor
def _log_prob_sqrt_gamma(self, x):
log_diag_prob = self.sqrt_gamma_dist.log_prob(tf.matrix_diag_part(x))
return tf.reduce_sum(log_diag_prob, axis=1)
def _log_prob_normal(self, x):
log_off_diag_prob = self.normal_dist.log_prob(x)
off_diag_mask = tf.ones(shape=tf.shape(x))
off_diag_mask = tf.matrix_band_part(off_diag_mask, -1, 0)
off_diag_mask = tf.matrix_set_diag(off_diag_mask, tf.zeros(shape=tf.shape(x)[:-1]))
log_off_diag_prob *= off_diag_mask
return tf.reduce_sum(log_off_diag_prob, axis=[1, 2])
def _log_prob(self, x):
log_diag_prob = self._log_prob_sqrt_gamma(x)
log_off_diag_prob = self._log_prob_normal(x)
return log_diag_prob + log_off_diag_prob
def _batch_shape_tensor(self):
return tf.shape(self.log_diag_scale)[0]
def _batch_shape(self):
return self.log_diag_scale.shape[0:1]
def _event_shape_tensor(self):
event_dim = tf.shape(self.log_diag_scale)[1]
return tf.stack([event_dim, event_dim])
def _event_shape(self):
event_dim = self.log_diag_scale.shape[1]
return tf.TensorShape([event_dim, event_dim])
def _sample_n(self, n, seed=None):
stream = seed_stream.SeedStream(seed=seed, salt="Wishart")
# Sample a normal full matrix
x = self.normal_dist.sample(sample_shape=n, seed=stream())
# Sample the log diagonal
log_g = self.sqrt_gamma_dist.sample(sample_shape=n, seed=stream())
# Discard the upper triangular part
x = tf.matrix_band_part(x, -1, 0)
# Set the diagonal
x = tf.matrix_set_diag(x, log_g)
return x
class SparseSqrtGammaGaussian(SqrtGammaGaussian):
def __init__(self, df, log_diag_scale, add_mode_correction=False, validate_args=False, allow_nan_stats=True,
name="SparseSqrtGammaGaussian"):
"""
Sparse square root Gamma-Gaussian distribution, this is equivalent to a Cholesky-Wishart distribution with a
diagonal scale matrix and with a sparsity correction factor. Thus it has the same hyper-parameters, as a the
Cholesky-Wishart distribution.
Args:
The distribution is defined for batch (b) of M (pxp) matrices, forming a tensor of [b, p, p]
df: degrees of freedom, a tensor of [b], the values in it must be df > p - 1
log_diag_scale: a tensor of [b, p] with the log diagonal values of the matrix S
add_mode_correction: bool, if using the distribution as a prior, setting this to True will add
a correction factor to log_diag_scale, such that the log_prob will have the maximum in S
validate_args:
allow_nan_stats:
name:
"""
super().__init__(df, log_diag_scale, add_mode_correction=add_mode_correction, validate_args=validate_args,
allow_nan_stats=allow_nan_stats, name=name)
@staticmethod
def _convert_to_cov_obj(value):
if not isinstance(value, cov_rep.PrecisionConvCholFilters):
value = tf.convert_to_tensor(value, name="value")
log_prob_shape = ()
if value.shape.ndims == 2:
# Add batch dimension
value = tf.expand_dims(value, axis=0)
if value.shape.ndims == 3:
log_prob_shape = tf.shape(value)[0:1]
if value.shape.ndims == 4:
# Collapse batch and sample dimension
shape = tf.shape(value)
log_prob_shape = shape[0:2]
new_shape = [log_prob_shape[0] * log_prob_shape[1]]
new_shape = tf.concat((new_shape, shape[2:]), axis=0)
value = tf.reshape(value, new_shape)
value = cov_rep.PrecisionCholesky(chol_precision=value)
else:
log_prob_shape = value.sample_shape[0:1]
return value, log_prob_shape
@property
def normal_dist(self):
if self._normal_dist is None:
sqrt_diag_scale = tf.exp(0.5 * self.log_diag_scale)
sqrt_diag_scale = sqrt_diag_scale[:, :, tf.newaxis] # [b, n, 1]
self._normal_dist = tfd.Normal(loc=0, scale=sqrt_diag_scale) # [b, n, 1]
return self._normal_dist
def _call_log_prob(self, value, name, **kwargs):
with self._name_scope(name):
value, log_prob_shape = self._convert_to_cov_obj(value)
try:
log_prob = self._log_prob(value)
return tf.reshape(log_prob, log_prob_shape)
except NotImplementedError as original_exception:
try:
log_prob = tf.log(self._prob(value))
return tf.reshape(log_prob, log_prob_shape)
except NotImplementedError:
raise original_exception
def _log_prob_sqrt_gamma(self, x):
log_diag_prob = self.sqrt_gamma_dist.log_prob(x.log_diag_chol_precision)
return tf.reduce_sum(log_diag_prob, axis=1)
def _log_prob_normal(self, x):
if isinstance(x, cov_rep.PrecisionConvCholFilters):
nb = x.recons_filters_precision.shape[2].value
# Get the elements in matrix [b, n, n] after they've been aligned per row, this is a [b, n, nb] tensor
# that if it were reshaped to [b, n_w, n_h, n_b], the vector [b, i, j, :] contain the values of
# the kth row in the matrix, where k corresponds to the i,j pixel.
# For each row, we discard the leading zeros and the diagonal element
off_diag_elements_aligned = x.recons_filters_precision_aligned[:, :, nb // 2 + 1:]
log_off_diag_prob = self.normal_dist.log_prob(off_diag_elements_aligned)
# Some elements in recons_filters_precision get zeroed out due to the zero padding for elements out of the
# image in the convolution operator, thus they are not part of the Cholesky matrix.
# Do not take into account those elements for the log probability computation
off_diag_mask_aligned = x.off_diag_mask_compact_aligned()
# log_off_diag_prob is [b, n, nb // 2 + 1], off_diag_mask is [n, nb]
log_off_diag_prob *= off_diag_mask_aligned[tf.newaxis, :, nb // 2 + 1:]
log_off_diag_prob = tf.reduce_sum(log_off_diag_prob, axis=[1, 2])
else:
log_off_diag_prob = super()._log_prob_normal(x.chol_precision)
return log_off_diag_prob
| 10,463 | 3,473 |
import json
from datetime import datetime
from django.utils.timezone import utc
from .base import AnymailBaseWebhookView
from ..signals import AnymailTrackingEvent, EventType, RejectReason, tracking
class SendinBlueTrackingWebhookView(AnymailBaseWebhookView):
"""Handler for SendinBlue delivery and engagement tracking webhooks"""
esp_name = "SendinBlue"
signal = tracking
def parse_events(self, request):
esp_event = json.loads(request.body.decode('utf-8'))
return [self.esp_to_anymail_event(esp_event)]
# SendinBlue's webhook payload data doesn't seem to be documented anywhere.
# There's a list of webhook events at https://apidocs.sendinblue.com/webhooks/#3.
event_types = {
# Map SendinBlue event type: Anymail normalized (event type, reject reason)
"request": (EventType.QUEUED, None), # received even if message won't be sent (e.g., before "blocked")
"delivered": (EventType.DELIVERED, None),
"hard_bounce": (EventType.BOUNCED, RejectReason.BOUNCED),
"soft_bounce": (EventType.BOUNCED, RejectReason.BOUNCED),
"blocked": (EventType.REJECTED, RejectReason.BLOCKED),
"spam": (EventType.COMPLAINED, RejectReason.SPAM),
"invalid_email": (EventType.BOUNCED, RejectReason.INVALID),
"deferred": (EventType.DEFERRED, None),
"opened": (EventType.OPENED, None), # see also unique_opened below
"click": (EventType.CLICKED, None),
"unsubscribe": (EventType.UNSUBSCRIBED, None),
"list_addition": (EventType.SUBSCRIBED, None), # shouldn't occur for transactional messages
"unique_opened": (EventType.OPENED, None), # you'll *also* receive an "opened"
}
def esp_to_anymail_event(self, esp_event):
esp_type = esp_event.get("event")
event_type, reject_reason = self.event_types.get(esp_type, (EventType.UNKNOWN, None))
recipient = esp_event.get("email")
try:
# SendinBlue supplies "ts", "ts_event" and "date" fields, which seem to be based on the
# timezone set in the account preferences (and possibly with inconsistent DST adjustment).
# "ts_epoch" is the only field that seems to be consistently UTC; it's in milliseconds
timestamp = datetime.fromtimestamp(esp_event["ts_epoch"] / 1000.0, tz=utc)
except (KeyError, ValueError):
timestamp = None
tags = []
try:
# If `tags` param set on send, webhook payload includes 'tags' array field.
tags = esp_event['tags']
except KeyError:
try:
# If `X-Mailin-Tag` header set on send, webhook payload includes single 'tag' string.
# (If header not set, webhook 'tag' will be the template name for template sends.)
tags = [esp_event['tag']]
except KeyError:
pass
try:
metadata = json.loads(esp_event["X-Mailin-custom"])
except (KeyError, TypeError):
metadata = {}
return AnymailTrackingEvent(
description=None,
esp_event=esp_event,
event_id=None, # SendinBlue doesn't provide a unique event id
event_type=event_type,
message_id=esp_event.get("message-id"),
metadata=metadata,
mta_response=esp_event.get("reason"),
recipient=recipient,
reject_reason=reject_reason,
tags=tags,
timestamp=timestamp,
user_agent=None,
click_url=esp_event.get("link"),
)
| 3,606 | 1,081 |
"""Custom strategy"""
from urllib.parse import urlencode
from django.db import transaction
from social_django.strategy import DjangoStrategy
from main import features
from profiles.models import LegalAddress, Profile
class BootcampDjangoStrategy(DjangoStrategy):
"""Abstract strategy for botcamp app"""
def redirect_with_partial(self, url, backend, partial_token):
"""Redirect to the specified url with a partial token"""
qs = urlencode({"backend": backend, "partial_token": partial_token.token})
return self.redirect(self.build_absolute_uri(f"{url}?{qs}"))
def is_api_enabled(self):
"""Returns True if the social auth api is enabled"""
return features.is_enabled(features.SOCIAL_AUTH_API)
def is_api_request(self):
"""Returns True if the request is being executed in an API context"""
raise NotImplementedError("is_api_request must be implemented")
class DefaultStrategy(BootcampDjangoStrategy):
"""Default strategy for standard social auth requests"""
def is_api_request(self):
"""Returns True if the request is being executed in an API context"""
return False
def create_user(self, *args, **kwargs):
with transaction.atomic():
user = super().create_user(*args, **kwargs)
LegalAddress.objects.create(user=user)
Profile.objects.create(user=user)
return user
class DjangoRestFrameworkStrategy(BootcampDjangoStrategy):
"""Strategy specific to handling DRF requests"""
def __init__(self, storage, drf_request=None, tpl=None):
self.drf_request = drf_request
# pass the original django request to DjangoStrategy
request = drf_request._request # pylint: disable=protected-access
super().__init__(storage, request=request, tpl=tpl)
def is_api_request(self):
"""Returns True if the request is being executed in an API context"""
return True
def request_data(self, merge=True):
"""Returns the request data"""
if not self.drf_request:
return {}
# DRF stores json payload data here, not in request.POST or request.GET like PSA expects
return self.drf_request.data
| 2,232 | 606 |
__version__ = '0.0.2'
from pathlib import Path
APP_DIR = Path(__file__).parent
| 83 | 34 |
# -*- coding: utf-8 -*-
from addons.base.tests.base import OAuthAddonTestCaseMixin, AddonTestCase
from addons.swift.provider import SwiftProvider
from addons.swift.serializer import SwiftSerializer
from addons.swift.tests.factories import SwiftAccountFactory
class SwiftAddonTestCase(OAuthAddonTestCaseMixin, AddonTestCase):
ADDON_SHORT_NAME = 'swift'
ExternalAccountFactory = SwiftAccountFactory
Provider = SwiftProvider
Serializer = SwiftSerializer
client = None
folder = {
'path': 'container',
'name': 'container',
'id': 'container'
}
| 580 | 178 |
# -*- coding: utf-8 -*-
'''
Mac hardware information, generated by system_profiler.
This is a separate grains module because it has a dependency on plistlib.
'''
import logging
import salt.utils
import salt.modules.cmdmod
log = logging.getLogger(__name__)
__virtualname__ = 'mac_sp'
try:
import plistlib
has_libs = True
except ImportError:
has_libs = False
def __virtual__():
if salt.utils.platform.is_darwin() and has_libs:
return __virtualname__
else:
return False
# Chicken and egg problem, SaltStack style
# __salt__ is already populated with grains by this stage.
cmdmod = {
'cmd.run': salt.modules.cmdmod._run_quiet,
# 'cmd.retcode': salt.modules.cmdmod._retcode_quiet,
'cmd.run_all': salt.modules.cmdmod._run_all_quiet
}
def _get_spdatatype(sp_data_type):
'''
Run system_profiler with a specific data type.
Running with all types slows down execution a bit, so be picky about what you need.
'''
output_plist = cmdmod['cmd.run']('system_profiler {0} -xml'.format(sp_data_type))
return output_plist
def hardware():
'''
Get general hardware information.
Provided by SPHardwareDataType (/System/Library/SystemProfiler/SPPlatformReporter.spreporter)
'''
sp_hardware = _get_spdatatype('SPHardwareDataType')
| 1,311 | 429 |
import json
import requests
import discord
from discord.ext import commands
class Fun_insult(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def insult(self, ctx):
'''insult command'''
_res = requests.get(url='https://evilinsult.com/generate_insult.php?lang=en&type=json')
if _res.status_code == 200:
try:
_data = _res.json()
_insult = _data['insult']
except:
return False
_embed = discord.Embed(color=0x42c42b, title=_insult)
_embed.set_author(name='Bhendi Bot')
_embed.set_thumbnail(url='https://media.giphy.com/media/2pjspMQCi70k/giphy.gif')
await ctx.send(ctx.author.mention, embed=_embed)
def setup(client):
client.add_cog(Fun_insult(client))
| 901 | 319 |
"""
G2_RIGHTS.
This module defines Monitor class to monitor CPU and memory utilization.
Pre-requisite non-standard Python module(s):
psutil
"""
import time
import threading
import psutil
class Monitor():
def __init__(self, interval=1):
""" Monitor constructor.
Args:
interval (int): the polling interval.
Attributes:
interval (int): The polling interval.
readings (list): Observations.
_running (bool): State of monitor.
"""
self.interval = interval
self.readings = []
self._running = False
def start(self):
"""Start the monitor.
"""
self._running = True
def monitor(self):
"""Monitor CPU and memory usage.
"""
if self._running:
# Current time
ctime = time.time()
# A float representing the current system-wide CPU utilization as a percentage.
cpu = psutil.cpu_percent()
# System memory usage percent = (total - available) * 100.0 / total
memData = dict(psutil.virtual_memory()._asdict())
vmem = memData['percent']
self.readings.append((ctime, cpu, vmem))
t = threading.Timer(self.interval, self.monitor)
t.start()
def stop(self):
"""Stop a monitor.
"""
self._running = False
def writeReadings(self, filepath):
"""Write CPU and memory usage to a CSV file; header row is also written..
Args:
filepath (str): Path to output file.
"""
with open(filepath, "w") as fs:
header = "timestamp,cpuPercent,memoryPercent"
fs.write(header + '\n')
for t, c, m in self.readings:
line = str(t)+','+str(c)+','+str(m)
fs.write(line + '\n')
| 1,873 | 521 |
# Module which defines how the I/O system works
import sys, os
from Registers import *
from random import *
from binascii import *
class IO_supervisor(Registers):
def __init__(self,my_registers):
self.i_o = 'stuff'
self.num_devices = 2
# native OS files simulate devices
working_directory = os.getcwd()
device00 = os.path.join(working_directory,'device00')
device01 = os.path.join(working_directory,'device01')
self.device_names = [device00, device01]
self.files = [open(self.device_names[0],'r+'),\
open(self.device_names[1],'r+')]
self.device_statuses = [1,1]
self.num_channels = 16
self.devices_per_channel = 16
self.my_registers = my_registers
self.hex_digits = "0123456789ABCDEF"
def read_character(self,device):
# low level direct fetch of the next byte from device # device
# device is between 0 and 255
# NOTE: files are sequences of 2 character hexidecimal numbers
if device < 0 or device >255:
raise address_out_of_range
my_buffer = bytearray(1)
hex_string = self.files[device].read(2)
if len(hex_string) <> 2:
raise IO_fault
# convert two hex digits to a byte
my_buffer[0] = unhexlify(hex_string)
return my_buffer[0]
def test_device(self,device):
a = randint(-2,1)
self.device_statuses[device] = a
result = bytearray(1)
if a == -1:
result[0] = self.my_registers.CC_less_than
elif a == 0:
result[0] = self.my_registers.CC_equal_to
else:
result[0] = self.my_registers.CC_greater_than
return result[0]
def write_character(self,device,char):
# low level direct write of the next byte to device # device
# device is between 0 and 255
# NOTE: files are sequences of 2 character hexidecimal numbers
if device < 0 or device >255:
raise address_out_of_range
# break char into low and high half-bytes
## print("in write_character, char is",char)
## print("((char/16)*16) is",((char/16)*16))
low_4_bits = char - ((char/16)*16)
## print("low 4 bits is ",low_4_bits)
high_4_bits = (char - low_4_bits)/16
## print("high 4 bits is ",high_4_bits)
# convert to 2 character hex string
hex_string = self.hex_digits[high_4_bits] + self.hex_digits[low_4_bits]
## print("hex string is ",hex_string)
self.files[device].write(hex_string)
def shutdown(self):
for i in self.files: i.close()
## raise io_shutdown
return
def restart(self):
self.files = [open(self.device_names[0],'r+'),\
open(self.device_names[1],'r+')]
return
| 2,888 | 954 |