signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def bulk_log ( self , log_message = u"Еще одна пачка обработана" , total = None , part_log_time_minutes = 5 ) :
"""Возвращает инстант логгера для обработки списокв данных
: param log _ message : То , что будет написано , когда время придет
: param total : Общее кол - во объектов , если вы знаете его
: param part _ log _ time _ minutes : Раз в какое кол - во минут пытаться писать лог
: return : BulkLogger""" | return BulkLogger ( log = self . log , log_message = log_message , total = total , part_log_time_minutes = part_log_time_minutes ) |
def parse ( self , fd ) :
"""very simple parser - but why would we want it to be complex ?""" | def resolve_args ( args ) : # FIXME break this out , it ' s in common with the templating stuff elsewhere
root = self . sections [ 0 ]
val_dict = dict ( ( '<' + t + '>' , u ) for ( t , u ) in root . get_variables ( ) . items ( ) )
resolved_args = [ ]
for arg in args :
for subst , value in val_dict . items ( ) :
arg = arg . replace ( subst , value )
resolved_args . append ( arg )
return resolved_args
def handle_section_defn ( keyword , parts ) :
if keyword == '@HostAttrs' :
if len ( parts ) != 1 :
raise ParserException ( 'usage: @HostAttrs <hostname>' )
if self . sections [ 0 ] . has_pending_with ( ) :
raise ParserException ( '@with not supported with @HostAttrs' )
self . sections . append ( HostAttrs ( parts [ 0 ] ) )
return True
if keyword == 'Host' :
if len ( parts ) != 1 :
raise ParserException ( 'usage: Host <hostname>' )
self . sections . append ( Host ( parts [ 0 ] , self . sections [ 0 ] . pop_pending_with ( ) ) )
return True
def handle_vardef ( root , keyword , parts ) :
if keyword == '@with' :
root . add_pending_with ( parts )
return True
def handle_set_args ( _ , parts ) :
if len ( parts ) == 0 :
raise ParserException ( 'usage: @args arg-name ...' )
if not self . is_include ( ) :
return
if self . _args is None or len ( self . _args ) != len ( parts ) :
raise ParserException ( 'required arguments not passed to include {url} ({parts})' . format ( url = self . _url , parts = ', ' . join ( parts ) ) )
root = self . sections [ 0 ]
for key , value in zip ( parts , self . _args ) :
root . set_value ( key , value )
def handle_set_value ( _ , parts ) :
if len ( parts ) != 2 :
raise ParserException ( 'usage: @set <key> <value>' )
root = self . sections [ 0 ]
root . set_value ( * resolve_args ( parts ) )
def handle_add_type ( section , parts ) :
if len ( parts ) != 1 :
raise ParserException ( 'usage: @is <HostAttrName>' )
section . add_type ( parts [ 0 ] )
def handle_via ( section , parts ) :
if len ( parts ) != 1 :
raise ParserException ( 'usage: @via <Hostname>' )
section . add_line ( 'ProxyCommand' , ( 'ssh {args} nc %h %p 2> /dev/null' . format ( args = pipes . quote ( resolve_args ( parts ) [ 0 ] ) ) , ) )
def handle_identity ( section , parts ) :
if len ( parts ) != 1 :
raise ParserException ( 'usage: @identity <name>' )
section . add_identity ( resolve_args ( parts ) [ 0 ] )
def handle_include ( _ , parts ) :
if len ( parts ) == 0 :
raise ParserException ( 'usage: @include <https://...|/path/to/file.sedge> [arg ...]' )
url = parts [ 0 ]
parsed_url = urllib . parse . urlparse ( url )
if parsed_url . scheme == 'https' :
req = requests . get ( url , verify = self . _verify_ssl )
text = req . text
elif parsed_url . scheme == 'file' :
with open ( parsed_url . path ) as fd :
text = fd . read ( )
elif parsed_url . scheme == '' :
path = os . path . expanduser ( url )
with open ( path ) as fd :
text = fd . read ( )
else :
raise SecurityException ( 'error: @includes may only use paths or https:// or file:// URLs' )
subconfig = SedgeEngine ( self . _key_library , StringIO ( text ) , self . _verify_ssl , url = url , args = resolve_args ( parts [ 1 : ] ) , parent_keydefs = self . keydefs , via_include = True )
self . includes . append ( ( url , subconfig ) )
def handle_keydef ( _ , parts ) :
if len ( parts ) < 2 :
raise ParserException ( 'usage: @key <name> [fingerprint]...' )
name = parts [ 0 ]
fingerprints = parts [ 1 : ]
self . keydefs [ name ] = fingerprints
def handle_keyword ( section , keyword , parts ) :
handlers = { '@set' : handle_set_value , '@args' : handle_set_args , '@is' : handle_add_type , '@via' : handle_via , '@include' : handle_include , '@key' : handle_keydef , '@identity' : handle_identity }
if keyword in handlers :
handlers [ keyword ] ( section , parts )
return True
for line in ( t . strip ( ) for t in fd ) :
if line . startswith ( '#' ) or line == '' :
continue
keyword , parts = SedgeEngine . parse_config_line ( line )
if handle_section_defn ( keyword , parts ) :
continue
if handle_vardef ( self . sections [ 0 ] , keyword , parts ) :
continue
current_section = self . sections [ - 1 ]
if handle_keyword ( current_section , keyword , parts ) :
continue
if keyword . startswith ( '@' ) :
raise ParserException ( "unknown expansion keyword {}" . format ( keyword ) )
# use other rather than parts to avoid messing up user
# whitespace ; we don ' t handle quotes in here as we don ' t
# need to
current_section . add_line ( keyword , parts ) |
def flags ( self ) :
"""Return set of flags .""" | return set ( ( name . lower ( ) for name in sorted ( TIFF . FILE_FLAGS ) if getattr ( self , 'is_' + name ) ) ) |
def addToLayout ( self , analysis , position = None ) :
"""Adds the analysis passed in to the worksheet ' s layout""" | # TODO Redux
layout = self . getLayout ( )
container_uid = self . get_container_for ( analysis )
if IRequestAnalysis . providedBy ( analysis ) and not IDuplicateAnalysis . providedBy ( analysis ) :
container_uids = map ( lambda slot : slot [ 'container_uid' ] , layout )
if container_uid in container_uids :
position = [ int ( slot [ 'position' ] ) for slot in layout if slot [ 'container_uid' ] == container_uid ] [ 0 ]
elif not position :
used_positions = [ 0 , ] + [ int ( slot [ 'position' ] ) for slot in layout ]
position = [ pos for pos in range ( 1 , max ( used_positions ) + 2 ) if pos not in used_positions ] [ 0 ]
an_type = self . get_analysis_type ( analysis )
self . setLayout ( layout + [ { 'position' : position , 'type' : an_type , 'container_uid' : container_uid , 'analysis_uid' : api . get_uid ( analysis ) } , ] ) |
def visit ( folder , provenance_id , step_name , previous_step_id = None , config = None , db_url = None , is_organised = True ) :
"""Record all files from a folder into the database .
Note :
If a file has been copied from a previous processing step without any transformation , it will be detected and
marked in the DB . The type of file will be detected and stored in the DB ( NIFTI , DICOM , . . . ) . If a files
( e . g . a DICOM file ) contains some meta - data , those will be stored in the DB .
Arguments :
: param folder : folder path .
: param provenance _ id : provenance label .
: param step _ name : Name of the processing step that produced the folder to visit .
: param previous _ step _ id : ( optional ) previous processing step ID . If not defined , we assume this is the first
processing step .
: param config : List of flags :
- boost : ( optional ) When enabled , we consider that all the files from a same folder share the same meta - data .
When enabled , the processing is ( about 2 times ) faster . This option is enabled by default .
- session _ id _ by _ patient : Rarely , a data set might use study IDs which are unique by patient ( not for the whole
study ) .
E . g . : LREN data . In such a case , you have to enable this flag . This will use PatientID + StudyID as a session
ID .
- visit _ id _ in _ patient _ id : Rarely , a data set might mix patient IDs and visit IDs . E . g . : LREN data . In such a
case , you have to enable this flag . This will try to split PatientID into VisitID and PatientID .
- visit _ id _ from _ path : Enable this flag to get the visit ID from the folder hierarchy instead of DICOM meta - data
( e . g . can be useful for PPMI ) .
- repetition _ from _ path : Enable this flag to get the repetition ID from the folder hierarchy instead of DICOM
meta - data ( e . g . can be useful for PPMI ) .
: param db _ url : ( optional ) Database URL . If not defined , it looks for an Airflow configuration file .
: param is _ organised : ( optional ) Disable this flag when scanning a folder that has not been organised yet
( should only affect nifti files ) .
: return : return processing step ID .""" | config = config if config else [ ]
logging . info ( "Visiting %s" , folder )
logging . info ( "-> is_organised=%s" , str ( is_organised ) )
logging . info ( "-> config=%s" , str ( config ) )
logging . info ( "Connecting to database..." )
db_conn = connection . Connection ( db_url )
step_id = _create_step ( db_conn , step_name , provenance_id , previous_step_id )
previous_files_hash = _get_files_hash_from_step ( db_conn , previous_step_id )
checked = dict ( )
def process_file ( file_path ) :
logging . debug ( "Processing '%s'" % file_path )
file_type = _find_type ( file_path )
if "DICOM" == file_type :
is_copy = _hash_file ( file_path ) in previous_files_hash
leaf_folder = os . path . split ( file_path ) [ 0 ]
if leaf_folder not in checked or 'boost' not in config :
ret = dicom_import . dicom2db ( file_path , file_type , is_copy , step_id , db_conn , 'session_id_by_patient' in config , 'visit_id_in_patient_id' in config , 'visit_id_in_patient_id' in config , 'repetition_from_path' in config )
try :
checked [ leaf_folder ] = ret [ 'repetition_id' ]
except KeyError : # TODO : Remove it when dicom2db will be more stable
logging . warning ( "Cannot find repetition ID !" )
else :
dicom_import . extract_dicom ( file_path , file_type , is_copy , checked [ leaf_folder ] , step_id )
elif "NIFTI" == file_type and is_organised :
is_copy = _hash_file ( file_path ) in previous_files_hash
nifti_import . nifti2db ( file_path , file_type , is_copy , step_id , db_conn , 'session_id_by_patient' in config , 'visit_id_in_patient_id' in config )
elif file_type :
is_copy = _hash_file ( file_path ) in previous_files_hash
others_import . others2db ( file_path , file_type , is_copy , step_id , db_conn )
if sys . version_info . major == 3 and sys . version_info . minor < 5 :
matches = [ ]
for root , dirnames , filenames in os . walk ( folder ) :
for filename in fnmatch . filter ( filenames , '*' ) :
matches . append ( os . path . join ( root , filename ) )
for file_path in matches :
process_file ( file_path )
else :
for file_path in glob . iglob ( os . path . join ( folder , "**/*" ) , recursive = True ) :
process_file ( file_path )
logging . info ( "Closing database connection..." )
db_conn . close ( )
return step_id |
def add ( self , child ) :
"""Adds a typed child object to the event handler .
@ param child : Child object to be added .""" | if isinstance ( child , Action ) :
self . add_action ( child )
else :
raise ModelError ( 'Unsupported child element' ) |
def flash_file ( self , path , addr , on_progress = None , power_on = False ) :
"""Flashes the target device .
The given ` ` on _ progress ` ` callback will be called as
` ` on _ progress ( action , progress _ string , percentage ) ` ` periodically as the
data is written to flash . The action is one of ` ` Compare ` ` , ` ` Erase ` ` ,
` ` Verify ` ` , ` ` Flash ` ` .
Args :
self ( JLink ) : the ` ` JLink ` ` instance
path ( str ) : absolute path to the source file to flash
addr ( int ) : start address on flash which to write the data
on _ progress ( function ) : callback to be triggered on flash progress
power _ on ( boolean ) : whether to power the target before flashing
Returns :
Integer value greater than or equal to zero . Has no significance .
Raises :
JLinkException : on hardware errors .""" | if on_progress is not None : # Set the function to be called on flash programming progress .
func = enums . JLinkFunctions . FLASH_PROGRESS_PROTOTYPE ( on_progress )
self . _dll . JLINK_SetFlashProgProgressCallback ( func )
else :
self . _dll . JLINK_SetFlashProgProgressCallback ( 0 )
# First power on the device .
if power_on :
self . power_on ( )
try : # Stop the target before flashing . This is required to be in a
# try - catch as the ' halted ( ) ' check may fail with an exception .
if not self . halted ( ) :
self . halt ( )
except errors . JLinkException :
pass
# Program the target .
bytes_flashed = self . _dll . JLINK_DownloadFile ( path . encode ( ) , addr )
if bytes_flashed < 0 :
raise errors . JLinkFlashException ( bytes_flashed )
return bytes_flashed |
def start ( self , channel ) :
"""Start running this virtual device including any necessary worker threads .
Args :
channel ( IOTilePushChannel ) : the channel with a stream and trace
routine for streaming and tracing data through a VirtualInterface""" | super ( TileBasedVirtualDevice , self ) . start ( channel )
for tile in self . _tiles . values ( ) :
tile . start ( channel = channel ) |
def Read ( self , expected_ids , read_data = True ) :
"""Read ADB messages and return FileSync packets .""" | if self . send_idx :
self . _Flush ( )
# Read one filesync packet off the recv buffer .
header_data = self . _ReadBuffered ( self . recv_header_len )
header = struct . unpack ( self . recv_header_format , header_data )
# Header is ( ID , . . . ) .
command_id = self . wire_to_id [ header [ 0 ] ]
if command_id not in expected_ids :
if command_id == b'FAIL' :
reason = ''
if self . recv_buffer :
reason = self . recv_buffer . decode ( 'utf-8' , errors = 'ignore' )
raise usb_exceptions . AdbCommandFailureException ( 'Command failed: {}' . format ( reason ) )
raise adb_protocol . InvalidResponseError ( 'Expected one of %s, got %s' % ( expected_ids , command_id ) )
if not read_data :
return command_id , header [ 1 : ]
# Header is ( ID , . . . , size ) .
size = header [ - 1 ]
data = self . _ReadBuffered ( size )
return command_id , header [ 1 : - 1 ] , data |
def _sig_handler ( self , signum , stack ) :
'''Handle process INT signal .''' | log_debug ( "Got SIGINT." )
if signum == signal . SIGINT :
LLNetReal . running = False
if self . _pktqueue . qsize ( ) == 0 : # put dummy pkt in queue to unblock a
# possibly stuck user thread
self . _pktqueue . put ( ( None , None , None ) ) |
def parse ( self , inputstring , parser , preargs , postargs ) :
"""Use the parser to parse the inputstring with appropriate setup and teardown .""" | self . reset ( )
pre_procd = None
with logger . gather_parsing_stats ( ) :
try :
pre_procd = self . pre ( inputstring , ** preargs )
parsed = parse ( parser , pre_procd )
out = self . post ( parsed , ** postargs )
except ParseBaseException as err :
raise self . make_parse_err ( err )
except CoconutDeferredSyntaxError as err :
internal_assert ( pre_procd is not None , "invalid deferred syntax error in pre-processing" , err )
raise self . make_syntax_err ( err , pre_procd )
except RuntimeError as err :
raise CoconutException ( str ( err ) , extra = "try again with --recursion-limit greater than the current " + str ( sys . getrecursionlimit ( ) ) , )
if self . strict :
for name in self . unused_imports :
if name != "*" :
logger . warn ( "found unused import" , name , extra = "disable --strict to dismiss" )
return out |
def create_main_frame ( self ) :
self . resize ( 800 , 1000 )
self . main_frame = QWidget ( )
self . dpi = 128
self . fig = Figure ( ( 12 , 11 ) , dpi = self . dpi )
self . fig . subplots_adjust ( hspace = 0.5 , wspace = 0.5 , left = 0.1 , bottom = 0.2 , right = 0.7 , top = 0.9 )
# 8 * np . sqrt ( 3)
self . canvas = FigureCanvas ( self . fig )
self . canvas . setParent ( self . main_frame )
self . axes = self . fig . add_subplot ( 111 )
self . axes . axis ( 'off' )
self . axes . set_xlim ( - 10 , 110 )
self . axes . set_ylim ( - 105 * np . sqrt ( 3 ) / 2 , 105 * np . sqrt ( 3 ) / 2 )
self . canvas = FigureCanvas ( self . fig )
self . canvas . setParent ( self . main_frame )
self . axes = self . fig . add_subplot ( 111 )
# Create the navigation toolbar , tied to the canvas
self . mpl_toolbar = NavigationToolbar ( self . canvas , self . main_frame )
# Other GUI controls
self . save_button = QPushButton ( '&Save' )
self . save_button . clicked . connect ( self . saveImgFile )
# self . result _ button = QPushButton ( ' & Result ' )
# self . result _ button . clicked . connect ( self . Explain )
self . legend_cb = QCheckBox ( '&Legend' )
self . legend_cb . setChecked ( True )
self . legend_cb . stateChanged . connect ( self . QAPF )
# int
self . slider_left_label = QLabel ( 'Plutonic' )
self . slider_right_label = QLabel ( 'Volcanic' )
self . slider = QSlider ( Qt . Horizontal )
self . slider . setRange ( 0 , 1 )
self . slider . setValue ( 0 )
self . slider . setTracking ( True )
self . slider . setTickPosition ( QSlider . TicksBothSides )
self . slider . valueChanged . connect ( self . QAPF )
# int
'''self . Tag _ cb = QCheckBox ( ' & Plutonic ' )
self . Tag _ cb . setChecked ( True )
self . Tag _ cb . stateChanged . connect ( self . QAPF ) # int
if ( self . Tag _ cb . isChecked ( ) ) :
self . Tag _ cb . setText ( ' & Plutonic ' )
else :
self . Tag _ cb . setText ( ' & Volcanic ' )''' | self . detail_cb = QCheckBox ( '&Detail' )
self . detail_cb . setChecked ( True )
self . detail_cb . stateChanged . connect ( self . QAPF )
# int
# Layout with box sizers
self . hbox = QHBoxLayout ( )
for w in [ self . save_button , self . detail_cb , self . legend_cb , self . slider_left_label , self . slider , self . slider_right_label ] :
self . hbox . addWidget ( w )
self . hbox . setAlignment ( w , Qt . AlignVCenter )
self . vbox = QVBoxLayout ( )
self . vbox . addWidget ( self . mpl_toolbar )
self . vbox . addWidget ( self . canvas )
self . vbox . addLayout ( self . hbox )
self . textbox = GrowingTextEdit ( self )
self . vbox . addWidget ( self . textbox )
self . main_frame . setLayout ( self . vbox )
self . setCentralWidget ( self . main_frame )
w = self . width ( )
h = self . height ( )
# setFixedWidth ( w / 10)
self . slider . setMinimumWidth ( w / 10 )
self . slider_left_label . setMinimumWidth ( w / 10 )
self . slider_right_label . setMinimumWidth ( w / 10 ) |
def get ( self ) :
"""Return sbo arch""" | if self . arch . startswith ( "i" ) and self . arch . endswith ( "86" ) :
self . arch = self . x86
elif self . meta . arch . startswith ( "arm" ) :
self . arch = self . arm
return self . arch |
def _drawContents ( self , currentRti = None ) :
"""Draws the attributes of the currentRTI""" | # logger . debug ( " _ drawContents : { } " . format ( currentRti ) )
table = self . table
table . setUpdatesEnabled ( False )
try :
table . clearContents ( )
verticalHeader = table . verticalHeader ( )
verticalHeader . setSectionResizeMode ( QtWidgets . QHeaderView . Fixed )
attributes = currentRti . attributes if currentRti is not None else { }
table . setRowCount ( len ( attributes ) )
for row , ( attrName , attrValue ) in enumerate ( sorted ( attributes . items ( ) ) ) :
attrStr = to_string ( attrValue , decode_bytes = 'utf-8' )
try :
type_str = type_name ( attrValue )
except Exception as ex :
logger . exception ( ex )
type_str = "<???>"
nameItem = QtWidgets . QTableWidgetItem ( attrName )
nameItem . setToolTip ( attrName )
table . setItem ( row , self . COL_ATTR_NAME , nameItem )
valItem = QtWidgets . QTableWidgetItem ( attrStr )
valItem . setToolTip ( attrStr )
table . setItem ( row , self . COL_VALUE , valItem )
table . setItem ( row , self . COL_ELEM_TYPE , QtWidgets . QTableWidgetItem ( type_str ) )
table . resizeRowToContents ( row )
verticalHeader . setSectionResizeMode ( QtWidgets . QHeaderView . ResizeToContents )
finally :
table . setUpdatesEnabled ( True ) |
def get_package_from_string ( txt , paths = None ) :
"""Get a package given a string .
Args :
txt ( str ) : String such as ' foo ' , ' bah - 1.3 ' .
paths ( list of str , optional ) : paths to search for package , defaults
to ` config . packages _ path ` .
Returns :
` Package ` instance , or None if no package was found .""" | o = VersionedObject ( txt )
return get_package ( o . name , o . version , paths = paths ) |
def label_from_instance ( self , obj ) :
"""Creates labels which represent the tree level of each node when
generating option labels .""" | level = getattr ( obj , obj . _mptt_meta . level_attr )
level_indicator = mark_safe ( conditional_escape ( self . level_indicator ) * level )
return mark_safe ( u'%s %s' % ( level_indicator , conditional_escape ( smart_unicode ( obj ) ) ) ) |
def do_help ( self , arg ) :
"""Sets up the header for the help command that explains the background on how to use
the script generally . Help for each command then stands alone in the context of this
documentation . Although we could have documented this on the wiki , it is better served
when shipped with the shell .""" | if arg == "" :
lines = [ ( "The fortpy unit testing analysis shell makes it easy to analyze the results " "of multiple test cases, make plots of trends and tabulate values for use in " "other applications. This documentation will provide an overview of the basics. " "Use 'help <command>' to get specific command help." ) , ( "Each fortpy shell session can hold the results of multiple unit tests. You can " "load a unit test's results into the session using one of the 'parse' commands. " "Once the test is loaded you can tabulate and plot results by setting test case " "filters ('filter'), and independent and dependent variables ('indep', 'dep')." "Switch between different unit tests loaded into the session using 'set'." ) , ( "To make multiple plots/tables for the same unit test, create new analysis " "groups ('group'). " "Each group has its own set of properties that can be set (e.g. variables, plot " "labels for axes, filters for test cases, etc.) The possible properties that affect " "each command are listed in the specific help for that command." ) , ( "You can save the state of a shell session using 'save' and then recover it at " "a later time using 'load'. When a session is re-loaded, all the variables and " "properties/settings for plots/tables are maintained and the latest state of the " "unit test's results are used. A console history is also maintained with bash-like " "commands (e.g. Ctrl-R for reverse history search, etc.) across sessions. You can " "manipulate its behavior with 'history'." ) ]
self . _fixed_width_info ( lines )
cmd . Cmd . do_help ( self , arg ) |
def _getSensorLimits ( self ) :
"""Returns a list of 2 - tuples , e . g . [ ( - 3.14 , 3.14 ) , ( - 0.001 , 0.001 ) ] ,
one tuple per parameter , giving min and max for that parameter .""" | limits = [ ]
limits . extend ( self . _getTotalDemandLimits ( ) )
# limits . extend ( self . _ getDemandLimits ( ) )
# limits . extend ( self . _ getPriceLimits ( ) )
# limits . extend ( self . _ getVoltageSensorLimits ( ) )
# limits . extend ( self . _ getVoltageMagnitudeLimits ( ) )
# limits . extend ( self . _ getVoltageAngleLimits ( ) )
# limits . extend ( self . _ getVoltageLambdaLimits ( ) )
# limits . extend ( self . _ getFlowLimits ( ) )
logger . debug ( "Sensor limits: %s" % limits )
return limits |
def get_trace ( self , reference = None ) :
"""Returns a generator of parents up to : reference : , including reference
If : reference : is * None * root is assumed
Closest ancestor goes first""" | item = self
while item :
yield item
if item == reference :
return
item = item . parent
if reference is not None :
raise Exception ( 'Reference {} is not in Ancestry' . format ( reference ) ) |
def open_file ( self , fname , length = None , offset = None , swap = None , block = None , peek = None ) :
'''Opens the specified file with all pertinent configuration settings .''' | if length is None :
length = self . length
if offset is None :
offset = self . offset
if swap is None :
swap = self . swap_size
return binwalk . core . common . BlockFile ( fname , subclass = self . subclass , length = length , offset = offset , swap = swap , block = block , peek = peek ) |
def generate_public_key ( self ) :
"""Generates a public key from the hex - encoded private key using elliptic
curve cryptography . The private key is multiplied by a predetermined point
on the elliptic curve called the generator point , G , resulting in the
corresponding private key . The generator point is always the same for all
Bitcoin users .
Jacobian coordinates are used to represent the elliptic curve point G .
https : / / en . wikibooks . org / wiki / Cryptography / Prime _ Curve / Jacobian _ Coordinates
The exponentiating by squaring ( also known by double - and - add ) method is
used for the elliptic curve multiplication that results in the public key .
https : / / en . wikipedia . org / wiki / Exponentiation _ by _ squaring
Bitcoin public keys are 65 bytes . The first byte is 0x04 , next 32
bytes correspond to the X coordinate , and last 32 bytes correspond
to the Y coordinate . They are typically encoded as 130 - length hex
characters .
Args :
private _ key ( bytes ) : UTF - 8 encoded hexadecimal
Returns :
str : The public key in hexadecimal representation .""" | private_key = int ( self . private_key , 16 )
if private_key >= self . N :
raise Exception ( 'Invalid private key.' )
G = JacobianPoint ( self . Gx , self . Gy , 1 )
public_key = G * private_key
x_hex = '{0:0{1}x}' . format ( public_key . X , 64 )
y_hex = '{0:0{1}x}' . format ( public_key . Y , 64 )
return '04' + x_hex + y_hex |
def createSummaryFile ( results , maf , prefix ) :
"""Creat the final summary file containing plate bias results .
: param results : the list of all the significant results .
: param maf : the minor allele frequency of the significant results .
: param prefix : the prefix of all the files .
: type results : list
: type maf : dict
: type prefix : str""" | o_filename = prefix + ".significant_SNPs.summary"
try :
with open ( o_filename , "w" ) as o_file :
print >> o_file , "\t" . join ( ( "chrom" , "pos" , "name" , "maf" , "p" , "odds" , "plate" ) )
for row in results :
print >> o_file , "\t" . join ( ( row . chrom , row . pos , row . name , maf . get ( row . name , "N/A" ) , row . p , row . odds , row . plate , ) )
except IOError :
msg = "{}: cannot write file" . format ( o_filename )
raise ProgramError ( msg ) |
def count_comments_handler ( sender , ** kwargs ) :
"""Update Entry . comment _ count when a public comment was posted .""" | comment = kwargs [ 'comment' ]
if comment . is_public :
entry = comment . content_object
if isinstance ( entry , Entry ) :
entry . comment_count = F ( 'comment_count' ) + 1
entry . save ( update_fields = [ 'comment_count' ] ) |
def requestBusName ( self , newName , allowReplacement = False , replaceExisting = False , doNotQueue = True , errbackUnlessAcquired = True ) :
"""Calls org . freedesktop . DBus . RequestName to request that the specified
bus name be associated with the connection .
@ type newName : C { string }
@ param newName : Bus name to acquire
@ type allowReplacement : C { bool }
@ param allowReplacement : If True ( defaults to False ) and another
application later requests this same name , the new requester will
be given the name and this connection will lose ownership .
@ type replaceExisting : C { bool }
@ param replaceExisting : If True ( defaults to False ) and another
application owns the name but specified allowReplacement at the
time of the name acquisition , this connection will assume ownership
of the bus name .
@ type doNotQueue : C { bool }
@ param doNotQueue : If True ( defaults to True ) the name request will
fail if the name is currently in use . If False , the request will
cause this connection to be queued for ownership of the requested
name
@ type errbackUnlessAcquired : C { bool }
@ param errbackUnlessAcquired : If True ( defaults to True ) an
L { twisted . python . failure . Failure } will be returned if the name is
not acquired .
@ rtype : L { twisted . internet . defer . Deferred }
@ returns : a Deferred to""" | flags = 0
if allowReplacement :
flags |= 0x1
if replaceExisting :
flags |= 0x2
if doNotQueue :
flags |= 0x4
d = self . callRemote ( '/org/freedesktop/DBus' , 'RequestName' , interface = 'org.freedesktop.DBus' , signature = 'su' , body = [ newName , flags ] , destination = 'org.freedesktop.DBus' , )
def on_result ( r ) :
if errbackUnlessAcquired and not ( r == NAME_ACQUIRED or r == NAME_ALREADY_OWNER ) :
raise error . FailedToAcquireName ( newName , r )
return r
d . addCallback ( on_result )
return d |
def _init_map ( self ) :
"""stub""" | super ( SimpleDifficultyItemFormRecord , self ) . _init_map ( )
self . my_osid_object_form . _my_map [ 'texts' ] [ 'difficulty' ] = self . _difficulty_metadata [ 'default_string_values' ] [ 0 ] |
def filter_index ( collection , predicate = None , index = None ) :
"""Filter collection with predicate function and index .
If index is not found , returns None .
: param collection :
: type collection : collection supporting iteration and slicing
: param predicate : function to filter the collection with
: type predicate : function
: param index : position of a single element to retrieve
: type index : int
: return : filtered list , or single element of filtered list if index is defined
: rtype : list or object""" | if index is None and isinstance ( predicate , int ) :
index = predicate
predicate = None
if predicate :
collection = collection . __class__ ( filter ( predicate , collection ) )
if index is not None :
try :
collection = collection [ index ]
except IndexError :
collection = None
return collection |
def asBinary ( self ) :
"""Get | ASN . 1 | value as a text string of bits .""" | binString = binary . bin ( self . _value ) [ 2 : ]
return '0' * ( len ( self . _value ) - len ( binString ) ) + binString |
def quit ( self ) :
"""This could be called from another thread , so let ' s do this via alarm""" | def q ( * args ) :
raise urwid . ExitMainLoop ( )
self . worker . shutdown ( wait = False )
self . ui_worker . shutdown ( wait = False )
self . loop . set_alarm_in ( 0 , q ) |
def interface_ip ( interface ) :
"""Determine the IP assigned to us by the given network interface .""" | sock = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
return socket . inet_ntoa ( fcntl . ioctl ( sock . fileno ( ) , 0x8915 , struct . pack ( '256s' , interface [ : 15 ] ) ) [ 20 : 24 ] ) |
def hexists ( self , name , key ) :
"""Returns ` ` True ` ` if the field exists , ` ` False ` ` otherwise .
: param name : str the name of the redis key
: param key : the member of the hash
: return : Future ( )""" | with self . pipe as pipe :
return pipe . hexists ( self . redis_key ( name ) , self . memberparse . encode ( key ) ) |
def send_request ( self , * args , ** kwargs ) :
"""Intercept connection errors which suggest that a managed host has
crashed and raise an exception indicating the location of the log""" | try :
return super ( JSHost , self ) . send_request ( * args , ** kwargs )
except RequestsConnectionError as e :
if ( self . manager and self . has_connected and self . logfile and 'unsafe' not in kwargs ) :
raise ProcessError ( '{} appears to have crashed, you can inspect the log file at {}' . format ( self . get_name ( ) , self . logfile , ) )
raise six . reraise ( RequestsConnectionError , RequestsConnectionError ( * e . args ) , sys . exc_info ( ) [ 2 ] ) |
def start ( name , call = None ) :
'''Start a node
CLI Examples :
. . code - block : : bash
salt - cloud - a start myinstance''' | if call != 'action' :
raise SaltCloudSystemExit ( 'The stop action must be called with -a or --action.' )
log . info ( 'Starting node %s' , name )
instanceId = _get_node ( name ) [ 'InstanceId' ]
params = { 'Action' : 'StartInstance' , 'InstanceId' : instanceId }
result = query ( params )
return result |
def _dateversion ( self ) : # type : ( ) - > int
"""Return the build / revision date as an integer " yyyymmdd " .""" | import re
if self . _head :
ma = re . search ( r'(?<=\()(.*)(?=\))' , self . _head )
if ma :
s = re . split ( r'[, ]+' , ma . group ( 0 ) )
if len ( s ) >= 3 : # month
month_names = ( 'Jan' , 'Feb' , 'Mar' , 'Apr' , 'May' , 'Jun' , 'Jul' , 'Aug' , 'Sep' , 'Oct' , 'Nov' , 'Dec' )
if s [ 0 ] in month_names :
m = month_names . index ( s [ 0 ] ) + 1
# date
if s [ 1 ] . isdigit ( ) :
d = int ( s [ 1 ] )
if 1 <= d <= 31 : # year
if s [ 2 ] . isdigit ( ) :
y = int ( s [ 2 ] )
if y >= 1 : # Return an integer as " yyyymmdd " .
return y * 10000 + m * 100 + d
raise ValueError ( 'failed to parse "{0}"' . format ( self . _head ) )
raise ValueError ( 'no first line' ) |
def merge_mhc_peptide_calls ( job , antigen_predictions , transgened_files , univ_options ) :
"""Merge all the calls generated by spawn _ antigen _ predictors .
: param dict antigen _ predictions : The return value from running : meth : ` spawn _ antigen _ predictors `
: param dict transgened _ files : The transgened peptide files
: param dict univ _ options : Universal options for ProTECT
: return : merged binding predictions
output _ files :
| - ' mhcii _ merged _ files . list ' : fsID
+ - ' mhci _ merged _ files . list ' : fsID
: rtype : dict""" | job . fileStore . logToMaster ( 'Merging MHC calls' )
work_dir = os . getcwd ( )
pept_files = { '10_mer.faa' : transgened_files [ 'transgened_tumor_10_mer_peptides.faa' ] , '10_mer.faa.map' : transgened_files [ 'transgened_tumor_10_mer_peptides.faa.map' ] , '15_mer.faa' : transgened_files [ 'transgened_tumor_15_mer_peptides.faa' ] , '15_mer.faa.map' : transgened_files [ 'transgened_tumor_15_mer_peptides.faa.map' ] }
pept_files = get_files_from_filestore ( job , pept_files , work_dir )
mhci_preds , mhcii_preds = antigen_predictions
mhci_called = mhcii_called = False
# Merge MHCI calls
# Read 10 - mer pepts into memory
peptides = read_peptide_file ( pept_files [ '10_mer.faa' ] )
with open ( pept_files [ '10_mer.faa.map' ] , 'r' ) as mapfile :
pepmap = json . load ( mapfile )
with open ( '/' . join ( [ work_dir , 'mhci_merged_files.list' ] ) , 'w' ) as mhci_resfile :
for key in mhci_preds :
tumor_file = job . fileStore . readGlobalFile ( mhci_preds [ key ] [ 'tumor' ] )
with open ( tumor_file ) as t_f :
tumor_df = pandas . read_json ( eval ( t_f . read ( ) ) )
if tumor_df . empty :
continue
mhci_called = True
# TODO : There must be a better way of doing this
normal_df = _process_mhci ( job . fileStore . readGlobalFile ( mhci_preds [ key ] [ 'normal' ] ) , normal = True )
normal_dict = normal_df . set_index ( 'pept' ) [ 'tumor_pred' ]
normal_preds = [ normal_dict [ x ] for x in list ( tumor_df [ 'normal_pept' ] ) ]
tumor_df [ 'normal_pred' ] = normal_preds
for pred in tumor_df . itertuples ( ) :
print_mhc_peptide ( pred , peptides , pepmap , mhci_resfile )
# Merge MHCII calls
# read 15 - mer pepts into memory
peptides = read_peptide_file ( pept_files [ '15_mer.faa' ] )
with open ( pept_files [ '15_mer.faa.map' ] , 'r' ) as mapfile :
pepmap = json . load ( mapfile )
# Incorporate peptide names into the merged calls
with open ( '/' . join ( [ work_dir , 'mhcii_merged_files.list' ] ) , 'w' ) as mhcii_resfile :
for key in mhcii_preds :
if mhcii_preds [ key ] [ 'predictor' ] is None :
continue
mhcii_called = True
tumor_file = job . fileStore . readGlobalFile ( mhcii_preds [ key ] [ 'tumor' ] )
with open ( tumor_file ) as t_f :
tumor_df = pandas . read_json ( eval ( t_f . read ( ) ) )
if tumor_df . empty :
continue
# TODO : There must be a better way of doing this
if mhcii_preds [ key ] [ 'predictor' ] == 'Consensus' :
normal_df = _process_consensus_mhcii ( job . fileStore . readGlobalFile ( mhcii_preds [ key ] [ 'normal' ] [ 0 ] ) , normal = True )
elif mhcii_preds [ key ] [ 'predictor' ] == 'Sturniolo' :
normal_df = _process_sturniolo_mhcii ( job . fileStore . readGlobalFile ( mhcii_preds [ key ] [ 'normal' ] [ 0 ] ) , normal = True )
elif mhcii_preds [ key ] [ 'predictor' ] == 'netMHCIIpan' :
normal_df = _process_net_mhcii ( job . fileStore . readGlobalFile ( mhcii_preds [ key ] [ 'normal' ] [ 0 ] ) , normal = True )
else :
assert False
normal_dict = normal_df . set_index ( 'pept' ) [ 'tumor_pred' ]
normal_preds = [ normal_dict [ x ] for x in list ( tumor_df [ 'normal_pept' ] ) ]
tumor_df [ 'normal_pred' ] = normal_preds
for pred in tumor_df . itertuples ( ) :
print_mhc_peptide ( pred , peptides , pepmap , mhcii_resfile , netmhc = mhcii_preds [ key ] [ 'predictor' ] == 'netMHCIIpan' )
if not ( mhci_called or mhcii_called ) :
raise RuntimeError ( 'No peptides available for ranking' )
output_files = defaultdict ( )
for mhc_file in [ mhci_resfile . name , mhcii_resfile . name ] :
output_files [ os . path . split ( mhc_file ) [ 1 ] ] = job . fileStore . writeGlobalFile ( mhc_file )
export_results ( job , output_files [ os . path . split ( mhc_file ) [ 1 ] ] , mhc_file , univ_options , subfolder = 'binding_predictions' )
return output_files |
def on_connect ( client ) :
"""Sample on _ connect function .
Handles new connections .""" | print "++ Opened connection to %s" % client . addrport ( )
broadcast ( '%s joins the conversation.\n' % client . addrport ( ) )
CLIENT_LIST . append ( client )
client . send ( "Welcome to the Chat Server, %s.\n" % client . addrport ( ) ) |
def delete ( access_key ) :
"""Delete an existing keypair .
ACCESSKEY : ACCESSKEY for a keypair to delete .""" | with Session ( ) as session :
try :
data = session . KeyPair . delete ( access_key )
except Exception as e :
print_error ( e )
sys . exit ( 1 )
if not data [ 'ok' ] :
print_fail ( 'KeyPair deletion has failed: {0}' . format ( data [ 'msg' ] ) )
sys . exit ( 1 )
print ( 'Key pair is deleted: ' + access_key + '.' ) |
def _dump_json ( self , data , new_line = False ) :
"""Helper function to marshal object into JSON string .
Additionally a sha256sum of the created JSON string is generated .""" | # We do not want any spaces between keys and values in JSON
json_data = json . dumps ( data , separators = ( ',' , ':' ) )
if new_line :
json_data = "%s\n" % json_data
# Generate sha256sum of the JSON data , may be handy
sha = hashlib . sha256 ( json_data . encode ( 'utf-8' ) ) . hexdigest ( )
return json_data , sha |
def build ( self , tokenlist ) :
"""Build a Wikicode object from a list tokens and return it .""" | self . _tokens = tokenlist
self . _tokens . reverse ( )
self . _push ( )
while self . _tokens :
node = self . _handle_token ( self . _tokens . pop ( ) )
self . _write ( node )
return self . _pop ( ) |
def share_application_with_accounts ( application_id , account_ids , sar_client = None ) :
"""Share the application privately with given AWS account IDs .
: param application _ id : The Amazon Resource Name ( ARN ) of the application
: type application _ id : str
: param account _ ids : List of AWS account IDs , or *
: type account _ ids : list of str
: param sar _ client : The boto3 client used to access SAR
: type sar _ client : boto3 . client
: raises ValueError""" | if not application_id or not account_ids :
raise ValueError ( 'Require application id and list of AWS account IDs to share the app' )
if not sar_client :
sar_client = boto3 . client ( 'serverlessrepo' )
application_policy = ApplicationPolicy ( account_ids , [ ApplicationPolicy . DEPLOY ] )
application_policy . validate ( )
sar_client . put_application_policy ( ApplicationId = application_id , Statements = [ application_policy . to_statement ( ) ] ) |
def RY ( angle , qubit ) :
"""Produces the RY gate : :
RY ( phi ) = [ [ cos ( phi / 2 ) , - sin ( phi / 2 ) ] ,
[ sin ( phi / 2 ) , cos ( phi / 2 ) ] ]
This gate is a single qubit Y - rotation .
: param angle : The angle to rotate around the y - axis on the bloch sphere .
: param qubit : The qubit apply the gate to .
: returns : A Gate object .""" | return Gate ( name = "RY" , params = [ angle ] , qubits = [ unpack_qubit ( qubit ) ] ) |
def set_state ( self , site , timestamp = None ) :
"""Write status dict to client status file .
FIXME - should have some file lock to avoid race""" | parser = ConfigParser ( )
parser . read ( self . status_file )
status_section = 'incremental'
if ( not parser . has_section ( status_section ) ) :
parser . add_section ( status_section )
if ( timestamp is None ) :
parser . remove_option ( status_section , self . config_site_to_name ( site ) )
else :
parser . set ( status_section , self . config_site_to_name ( site ) , str ( timestamp ) )
with open ( self . status_file , 'w' ) as configfile :
parser . write ( configfile )
configfile . close ( ) |
def _guess_type_from_validator ( validator ) :
"""Utility method to return the declared type of an attribute or None . It handles _ OptionalValidator and _ AndValidator
in order to unpack the validators .
: param validator :
: return : the type of attribute declared in an inner ' instance _ of ' validator ( if any is found , the first one is used )
or None if no inner ' instance _ of ' validator is found""" | if isinstance ( validator , _OptionalValidator ) : # Optional : look inside
return _guess_type_from_validator ( validator . validator )
elif isinstance ( validator , _AndValidator ) : # Sequence : try each of them
for v in validator . validators :
typ = _guess_type_from_validator ( v )
if typ is not None :
return typ
return None
elif isinstance ( validator , _InstanceOfValidator ) : # InstanceOf validator : found it !
return validator . type
else : # we could not find the type
return None |
def _accumulate ( data_list , no_concat = ( ) ) :
"""Concatenate a list of dicts ` ( name , array ) ` .
You can specify some names which arrays should not be concatenated .
This is necessary with lists of plots with different sizes .""" | acc = Accumulator ( )
for data in data_list :
for name , val in data . items ( ) :
acc . add ( name , val )
out = { name : acc [ name ] for name in acc . names if name not in no_concat }
# Some variables should not be concatenated but should be kept as lists .
# This is when there can be several arrays of variable length ( NumPy
# doesn ' t support ragged arrays ) .
out . update ( { name : acc . get ( name ) for name in no_concat } )
return out |
def func_args_as_dict ( func , args , kwargs ) :
"""Return given function ' s positional and key value arguments as an ordered
dictionary .""" | if six . PY2 :
_getargspec = inspect . getargspec
else :
_getargspec = inspect . getfullargspec
arg_names = list ( OrderedDict . fromkeys ( itertools . chain ( _getargspec ( func ) [ 0 ] , kwargs . keys ( ) ) ) )
return OrderedDict ( list ( six . moves . zip ( arg_names , args ) ) + list ( kwargs . items ( ) ) ) |
def set_order ( self , order ) :
"""Takes a list of dictionaries . Those correspond to the arguments of
` list . sort ` and must contain the keys ' key ' and ' reverse ' ( a boolean ) .
You must call ` set _ labels ` before this !""" | m = gtk . ListStore ( bool , str )
for item in order :
m . append ( ( item [ 'reverse' ] , item [ 'key' ] ) )
# TODO fill with _ _ labels missing in order .
self . set_model ( m ) |
def _resize ( self , shape , format = None , internalformat = None ) :
"""Internal method for resize .""" | shape = self . _normalize_shape ( shape )
# Check
if not self . _resizable :
raise RuntimeError ( "Texture is not resizable" )
# Determine format
if format is None :
format = self . _formats [ shape [ - 1 ] ]
# Keep current format if channels match
if self . _format and self . _inv_formats [ self . _format ] == self . _inv_formats [ format ] :
format = self . _format
else :
format = check_enum ( format )
if internalformat is None : # Keep current internalformat if channels match
if self . _internalformat and self . _inv_internalformats [ self . _internalformat ] == shape [ - 1 ] :
internalformat = self . _internalformat
else :
internalformat = check_enum ( internalformat )
# Check
if format not in self . _inv_formats :
raise ValueError ( 'Invalid texture format: %r.' % format )
elif shape [ - 1 ] != self . _inv_formats [ format ] :
raise ValueError ( 'Format does not match with given shape. ' '(format expects %d elements, data has %d)' % ( self . _inv_formats [ format ] , shape [ - 1 ] ) )
if internalformat is None :
pass
elif internalformat not in self . _inv_internalformats :
raise ValueError ( 'Invalid texture internalformat: %r. Allowed formats: %r' % ( internalformat , self . _inv_internalformats ) )
elif shape [ - 1 ] != self . _inv_internalformats [ internalformat ] :
raise ValueError ( 'Internalformat does not match with given shape.' )
# Store and send GLIR command
self . _shape = shape
self . _format = format
self . _internalformat = internalformat
self . _glir . command ( 'SIZE' , self . _id , self . _shape , self . _format , self . _internalformat ) |
def pca ( data : Union [ AnnData , np . ndarray , spmatrix ] , n_comps : int = N_PCS , zero_center : Optional [ bool ] = True , svd_solver : str = 'auto' , random_state : int = 0 , return_info : bool = False , use_highly_variable : Optional [ bool ] = None , dtype : str = 'float32' , copy : bool = False , chunked : bool = False , chunk_size : Optional [ int ] = None , ) -> Union [ AnnData , np . ndarray , spmatrix ] :
"""Principal component analysis [ Pedregosa11 ] _ .
Computes PCA coordinates , loadings and variance decomposition . Uses the
implementation of * scikit - learn * [ Pedregosa11 ] _ .
Parameters
data
The ( annotated ) data matrix of shape ` ` n _ obs ` ` × ` ` n _ vars ` ` .
Rows correspond to cells and columns to genes .
n _ comps
Number of principal components to compute .
zero _ center
If ` True ` , compute standard PCA from covariance matrix .
If ` ` False ` ` , omit zero - centering variables
( uses : class : ` ~ sklearn . decomposition . TruncatedSVD ` ) ,
which allows to handle sparse input efficiently .
Passing ` ` None ` ` decides automatically based on sparseness of the data .
svd _ solver
SVD solver to use :
` ` ' arpack ' ` `
for the ARPACK wrapper in SciPy ( : func : ` ~ scipy . sparse . linalg . svds ` )
` ` ' randomized ' ` `
for the randomized algorithm due to Halko ( 2009 ) .
` ` ' auto ' ` ` ( the default )
chooses automatically depending on the size of the problem .
random _ state
Change to use different initial states for the optimization .
return _ info
Only relevant when not passing an : class : ` ~ anndata . AnnData ` :
see “ * * Returns * * ” .
use _ highly _ variable
Whether to use highly variable genes only , stored in
` ` . var [ ' highly _ variable ' ] ` ` .
By default uses them if they have been determined beforehand .
dtype
Numpy data type string to which to convert the result .
copy
If an : class : ` ~ anndata . AnnData ` is passed , determines whether a copy
is returned . Is ignored otherwise .
chunked
If ` ` True ` ` , perform an incremental PCA on segments of ` ` chunk _ size ` ` .
The incremental PCA automatically zero centers and ignores settings of
` ` random _ seed ` ` and ` ` svd _ solver ` ` . If ` ` False ` ` , perform a full PCA .
chunk _ size
Number of observations to include in each chunk .
Required if ` ` chunked = True ` ` was passed .
Returns
X _ pca : : class : ` scipy . sparse . spmatrix ` or : class : ` numpy . ndarray `
If ` data ` is array - like and ` ` return _ info = False ` ` was passed ,
this function only returns ` X _ pca ` . . .
adata : anndata . AnnData
. . . otherwise if ` ` copy = True ` ` it returns or else adds fields to ` ` adata ` ` :
` ` . obsm [ ' X _ pca ' ] ` `
PCA representation of data .
` ` . varm [ ' PCs ' ] ` `
The principal components containing the loadings .
` ` . uns [ ' pca ' ] [ ' variance _ ratio ' ] ` ` )
Ratio of explained variance .
` ` . uns [ ' pca ' ] [ ' variance ' ] ` `
Explained variance , equivalent to the eigenvalues of the covariance matrix .""" | # chunked calculation is not randomized , anyways
if svd_solver in { 'auto' , 'randomized' } and not chunked :
logg . info ( 'Note that scikit-learn\'s randomized PCA might not be exactly ' 'reproducible across different computational platforms. For exact ' 'reproducibility, choose `svd_solver=\'arpack\'.` This will likely ' 'become the Scanpy default in the future.' )
data_is_AnnData = isinstance ( data , AnnData )
if data_is_AnnData :
adata = data . copy ( ) if copy else data
else :
adata = AnnData ( data )
logg . info ( 'computing PCA with n_comps =' , n_comps , r = True )
if adata . n_vars < n_comps :
n_comps = adata . n_vars - 1
logg . msg ( 'reducing number of computed PCs to' , n_comps , 'as dim of data is only' , adata . n_vars , v = 4 )
if use_highly_variable is True and 'highly_variable' not in adata . var . keys ( ) :
raise ValueError ( 'Did not find adata.var[\'highly_variable\']. ' 'Either your data already only consists of highly-variable genes ' 'or consider running `pp.filter_genes_dispersion` first.' )
if use_highly_variable is None :
use_highly_variable = True if 'highly_variable' in adata . var . keys ( ) else False
if use_highly_variable :
logg . info ( 'computing PCA on highly variable genes' )
adata_comp = adata [ : , adata . var [ 'highly_variable' ] ] if use_highly_variable else adata
if chunked :
if not zero_center or random_state or svd_solver != 'auto' :
logg . msg ( 'Ignoring zero_center, random_state, svd_solver' , v = 4 )
from sklearn . decomposition import IncrementalPCA
X_pca = np . zeros ( ( adata_comp . X . shape [ 0 ] , n_comps ) , adata_comp . X . dtype )
pca_ = IncrementalPCA ( n_components = n_comps )
for chunk , _ , _ in adata_comp . chunked_X ( chunk_size ) :
chunk = chunk . toarray ( ) if issparse ( chunk ) else chunk
pca_ . partial_fit ( chunk )
for chunk , start , end in adata_comp . chunked_X ( chunk_size ) :
chunk = chunk . toarray ( ) if issparse ( chunk ) else chunk
X_pca [ start : end ] = pca_ . transform ( chunk )
else :
if zero_center is None :
zero_center = not issparse ( adata_comp . X )
if zero_center :
from sklearn . decomposition import PCA
if issparse ( adata_comp . X ) :
logg . msg ( ' as `zero_center=True`, ' 'sparse input is densified and may ' 'lead to huge memory consumption' , v = 4 )
X = adata_comp . X . toarray ( )
# Copying the whole adata _ comp . X here , could cause memory problems
else :
X = adata_comp . X
pca_ = PCA ( n_components = n_comps , svd_solver = svd_solver , random_state = random_state )
else :
from sklearn . decomposition import TruncatedSVD
logg . msg ( ' without zero-centering: \n' ' the explained variance does not correspond to the exact statistical defintion\n' ' the first component, e.g., might be heavily influenced by different means\n' ' the following components often resemble the exact PCA very closely' , v = 4 )
pca_ = TruncatedSVD ( n_components = n_comps , random_state = random_state )
X = adata_comp . X
X_pca = pca_ . fit_transform ( X )
if X_pca . dtype . descr != np . dtype ( dtype ) . descr :
X_pca = X_pca . astype ( dtype )
if data_is_AnnData :
adata . obsm [ 'X_pca' ] = X_pca
if use_highly_variable :
adata . varm [ 'PCs' ] = np . zeros ( shape = ( adata . n_vars , n_comps ) )
adata . varm [ 'PCs' ] [ adata . var [ 'highly_variable' ] ] = pca_ . components_ . T
else :
adata . varm [ 'PCs' ] = pca_ . components_ . T
adata . uns [ 'pca' ] = { }
adata . uns [ 'pca' ] [ 'variance' ] = pca_ . explained_variance_
adata . uns [ 'pca' ] [ 'variance_ratio' ] = pca_ . explained_variance_ratio_
logg . info ( ' finished' , t = True )
logg . msg ( 'and added\n' ' \'X_pca\', the PCA coordinates (adata.obs)\n' ' \'PC1\', \'PC2\', ..., the loadings (adata.var)\n' ' \'pca_variance\', the variance / eigenvalues (adata.uns)\n' ' \'pca_variance_ratio\', the variance ratio (adata.uns)' , v = 4 )
return adata if copy else None
else :
logg . info ( ' finished' , t = True )
if return_info :
return X_pca , pca_ . components_ , pca_ . explained_variance_ratio_ , pca_ . explained_variance_
else :
return X_pca |
def _BuildUrl ( self , url , path_elements = None , extra_params = None ) :
"""Taken from : https : / / github . com / bear / python - twitter / blob / master / twitter / api . py # L3814 - L3836
: param url :
: param path _ elements :
: param extra _ params :
: return :""" | # Break url into constituent parts
( scheme , netloc , path , params , query , fragment ) = urlparse ( url )
# Add any additional path elements to the path
if path_elements : # Filter out the path elements that have a value of None
p = [ i for i in path_elements if i ]
if not path . endswith ( '/' ) :
path += '/'
path += '/' . join ( p )
# Add any additional query parameters to the query string
if extra_params and len ( extra_params ) > 0 :
extra_query = self . _EncodeParameters ( extra_params )
# Add it to the existing query
if query :
query += '&' + extra_query
else :
query = extra_query
# Return the rebuilt URL
return urlunparse ( ( scheme , netloc , path , params , query , fragment ) ) |
def fill_tree_from_xml ( tag , ar_tree , namespace ) : # type : ( _ Element , ArTree , str ) - > None
"""Parse the xml tree into ArTree objects .""" | for child in tag : # type : _ Element
name_elem = child . find ( './' + namespace + 'SHORT-NAME' )
# long _ name = child . find ( ' . / ' + namespace + ' LONG - NAME ' )
if name_elem is not None and child is not None :
fill_tree_from_xml ( child , ar_tree . append_child ( name_elem . text , child ) , namespace )
if name_elem is None and child is not None :
fill_tree_from_xml ( child , ar_tree , namespace ) |
def filter_transcription_factor ( stmts_in , ** kwargs ) :
"""Filter out RegulateAmounts where subject is not a transcription factor .
Parameters
stmts _ in : list [ indra . statements . Statement ]
A list of statements to filter .
save : Optional [ str ]
The name of a pickle file to save the results ( stmts _ out ) into .
Returns
stmts _ out : list [ indra . statements . Statement ]
A list of filtered statements .""" | logger . info ( 'Filtering %d statements to remove ' % len ( stmts_in ) + 'amount regulations by non-transcription-factors...' )
path = os . path . dirname ( os . path . abspath ( __file__ ) )
tf_table = read_unicode_csv ( path + '/../resources/transcription_factors.csv' )
gene_names = [ lin [ 1 ] for lin in list ( tf_table ) [ 1 : ] ]
stmts_out = [ ]
for st in stmts_in :
if isinstance ( st , RegulateAmount ) :
if st . subj is not None :
if st . subj . name in gene_names :
stmts_out . append ( st )
else :
stmts_out . append ( st )
logger . info ( '%d statements after filter...' % len ( stmts_out ) )
dump_pkl = kwargs . get ( 'save' )
if dump_pkl :
dump_statements ( stmts_out , dump_pkl )
return stmts_out |
def fix_line_range ( source_code , start , end , options ) :
"""Apply autopep8 ( and docformatter ) between the lines start and end of
source .""" | # TODO confirm behaviour outside range ( indexing starts at 1)
start = max ( start , 1 )
options . line_range = [ start , end ]
from autopep8 import fix_code
fixed = fix_code ( source_code , options )
try :
if options . docformatter :
from docformatter import format_code
fixed = format_code ( fixed , summary_wrap_length = options . max_line_length - 1 , description_wrap_length = ( options . max_line_length - 2 * options . indent_size ) , pre_summary_newline = options . pre_summary_newline , post_description_blank = options . post_description_blank , force_wrap = options . force_wrap , line_range = [ start , end ] )
except AttributeError : # e . g . using autopep8 . parse _ args , pragma : no cover
pass
return fixed |
def create ( self ) :
"""Launches a new server instance .""" | self . server_attrs = self . consul . create_server ( "%s-%s" % ( self . stack . name , self . name ) , self . disk_image_id , self . instance_type , self . ssh_key_name , tags = self . tags , availability_zone = self . availability_zone , timeout_s = self . launch_timeout_s , security_groups = self . security_groups , ** self . provider_extras )
log . debug ( 'Post launch delay: %d s' % self . post_launch_delay_s )
time . sleep ( self . post_launch_delay_s ) |
def plotE ( self , * args , ** kwargs ) :
"""NAME :
plotE
PURPOSE :
plot E ( . ) along the orbit
INPUT :
pot = Potential instance or list of instances in which the orbit was integrated
d1 = plot Ez vs d1 : e . g . , ' t ' , ' z ' , ' R ' , ' vR ' , ' vT ' , ' vz '
normed = if set , plot E ( t ) / E ( 0 ) rather than E ( t )
ro = ( Object - wide default ) physical scale for distances to use to convert ( can be Quantity )
vo = ( Object - wide default ) physical scale for velocities to use to convert ( can be Quantity )
use _ physical = use to override Object - wide default for using a physical scale for output
+ bovy _ plot . bovy _ plot inputs
OUTPUT :
figure to output device
HISTORY :
2010-07-10 - Written - Bovy ( NYU )
2014-06-16 - Changed to actually plot E rather than E / E0 - Bovy ( IAS )""" | if not kwargs . get ( 'pot' , None ) is None :
kwargs [ 'pot' ] = flatten_potential ( kwargs . get ( 'pot' ) )
return self . _orb . plotE ( * args , ** kwargs ) |
def _runcog ( options , files , uncog = False ) :
"""Common function for the cog and runcog tasks .""" | options . order ( 'cog' , 'sphinx' , add_rest = True )
c = Cog ( )
if uncog :
c . options . bNoGenerate = True
c . options . bReplace = True
c . options . bDeleteCode = options . get ( "delete_code" , False )
includedir = options . get ( 'includedir' , None )
if includedir :
include = Includer ( includedir , cog = c , include_markers = options . get ( "include_markers" ) )
# load cog ' s namespace with our convenience functions .
c . options . defines [ 'include' ] = include
c . options . defines [ 'sh' ] = _cogsh ( c )
c . options . sBeginSpec = options . get ( 'beginspec' , '[[[cog' )
c . options . sEndSpec = options . get ( 'endspec' , ']]]' )
c . options . sEndOutput = options . get ( 'endoutput' , '[[[end]]]' )
basedir = options . get ( 'basedir' , None )
if basedir is None :
basedir = ( path ( options . get ( 'docroot' , "docs" ) ) / options . get ( 'sourcedir' , "" ) )
basedir = path ( basedir )
if not files :
pattern = options . get ( "pattern" , "*.rst" )
if pattern :
files = basedir . walkfiles ( pattern )
else :
files = basedir . walkfiles ( )
for f in sorted ( files ) :
dry ( "cog %s" % f , c . processOneFile , f ) |
def iglob ( pathname , * , recursive = False ) :
"""Return an iterator which yields the paths matching a pathname pattern .
The pattern may contain simple shell - style wildcards a la
fnmatch . However , unlike fnmatch , filenames starting with a
dot are special cases that are not matched by ' * ' and ' ? '
patterns .
If recursive is true , the pattern ' * * ' will match any files and
zero or more directories and subdirectories .""" | it = _iglob ( pathname , recursive )
if recursive and _isrecursive ( pathname ) :
s = next ( it )
# skip empty string
assert not s
return it |
def _remove_redundancy_routers ( self , context , router_ids , ports , delete_ha_groups = False ) :
"""Deletes all interfaces of the specified redundancy routers
and then the redundancy routers themselves .""" | subnets_info = [ { 'subnet_id' : port [ 'fixed_ips' ] [ 0 ] [ 'subnet_id' ] } for port in ports ]
for r_id in router_ids :
for i in range ( len ( subnets_info ) ) :
self . remove_router_interface ( context , r_id , subnets_info [ i ] )
LOG . debug ( "Removed interface on %(s_id)s to redundancy router " "with %(r_id)s" , { 's_id' : ports [ i ] [ 'network_id' ] , 'r_id' : r_id } )
# There is only one ha group per network so only delete once
if delete_ha_groups and r_id == router_ids [ 0 ] :
self . _delete_ha_group ( context , ports [ i ] [ 'id' ] )
self . delete_router ( context , r_id )
LOG . debug ( "Deleted redundancy router %s" , r_id ) |
def _load_resource ( self , source_r , abs_path = False ) :
"""The CSV package has no reseources , so we just need to resolve the URLs to them . Usually , the
CSV package is built from a file system ackage on a publically acessible server .""" | r = self . doc . resource ( source_r . name )
r . url = self . resource_root . join ( r . url ) . inner |
def tagcount ( sam , out , genemap , output_evidence_table , positional , minevidence , cb_histogram , cb_cutoff , no_scale_evidence , subsample , sparse , parse_tags , gene_tags ) :
'''Count up evidence for tagged molecules''' | from pysam import AlignmentFile
from io import StringIO
import pandas as pd
from utils import weigh_evidence
logger . info ( 'Reading optional files' )
gene_map = None
if genemap :
with open ( genemap ) as fh :
try :
gene_map = dict ( p . strip ( ) . split ( ) for p in fh )
except ValueError :
logger . error ( 'Incorrectly formatted gene_map, need to be tsv.' )
sys . exit ( )
if positional :
tuple_template = '{0},{1},{2},{3}'
else :
tuple_template = '{0},{1},{3}'
if not cb_cutoff :
cb_cutoff = 0
if cb_histogram and cb_cutoff == "auto" :
cb_cutoff = guess_depth_cutoff ( cb_histogram )
cb_cutoff = int ( cb_cutoff )
cb_hist = None
filter_cb = False
if cb_histogram :
cb_hist = pd . read_csv ( cb_histogram , index_col = 0 , header = - 1 , squeeze = True , sep = "\t" )
total_num_cbs = cb_hist . shape [ 0 ]
cb_hist = cb_hist [ cb_hist > cb_cutoff ]
logger . info ( 'Keeping {} out of {} cellular barcodes.' . format ( cb_hist . shape [ 0 ] , total_num_cbs ) )
filter_cb = True
parser_re = re . compile ( '.*:CELL_(?P<CB>.*):UMI_(?P<MB>.*)' )
if subsample :
logger . info ( 'Creating reservoir of subsampled reads ({} per cell)' . format ( subsample ) )
start_sampling = time . time ( )
reservoir = collections . defaultdict ( list )
cb_hist_sampled = 0 * cb_hist
cb_obs = 0 * cb_hist
track = stream_bamfile ( sam )
current_read = 'none_observed_yet'
for i , aln in enumerate ( track ) :
if aln . qname == current_read :
continue
current_read = aln . qname
if parse_tags :
CB = aln . get_tag ( 'CR' )
else :
match = parser_re . match ( aln . qname )
CB = match . group ( 'CB' )
if CB not in cb_hist . index :
continue
cb_obs [ CB ] += 1
if len ( reservoir [ CB ] ) < subsample :
reservoir [ CB ] . append ( i )
cb_hist_sampled [ CB ] += 1
else :
s = pd . np . random . randint ( 0 , cb_obs [ CB ] )
if s < subsample :
reservoir [ CB ] [ s ] = i
index_filter = set ( itertools . chain . from_iterable ( reservoir . values ( ) ) )
sam_file . close ( )
sampling_time = time . time ( ) - start_sampling
logger . info ( 'Sampling done - {:.3}s' . format ( sampling_time ) )
evidence = collections . defaultdict ( int )
logger . info ( 'Tallying evidence' )
start_tally = time . time ( )
sam_mode = 'r' if sam . endswith ( ".sam" ) else 'rb'
sam_file = AlignmentFile ( sam , mode = sam_mode )
targets = [ x [ "SN" ] for x in sam_file . header [ "SQ" ] ]
track = sam_file . fetch ( until_eof = True )
count = 0
unmapped = 0
kept = 0
nomatchcb = 0
current_read = 'none_observed_yet'
count_this_read = True
missing_transcripts = set ( )
for i , aln in enumerate ( track ) :
if count and not count % 1000000 :
logger . info ( "Processed %d alignments, kept %d." % ( count , kept ) )
logger . info ( "%d were filtered for being unmapped." % unmapped )
if filter_cb :
logger . info ( "%d were filtered for not matching known barcodes." % nomatchcb )
count += 1
if aln . is_unmapped :
unmapped += 1
continue
if gene_tags and not aln . has_tag ( 'GX' ) :
unmapped += 1
continue
if aln . qname != current_read :
current_read = aln . qname
if subsample and i not in index_filter :
count_this_read = False
continue
else :
count_this_read = True
else :
if not count_this_read :
continue
if parse_tags :
CB = aln . get_tag ( 'CR' )
else :
match = parser_re . match ( aln . qname )
CB = match . group ( 'CB' )
if filter_cb :
if CB not in cb_hist . index :
nomatchcb += 1
continue
if parse_tags :
MB = aln . get_tag ( 'UM' )
else :
MB = match . group ( 'MB' )
if gene_tags :
target_name = aln . get_tag ( 'GX' ) . split ( ',' ) [ 0 ]
else :
txid = sam_file . getrname ( aln . reference_id )
if gene_map :
if txid in gene_map :
target_name = gene_map [ txid ]
else :
missing_transcripts . add ( txid )
target_name = txid
else :
target_name = txid
e_tuple = tuple_template . format ( CB , target_name , aln . pos , MB )
# Scale evidence by number of hits
if no_scale_evidence :
evidence [ e_tuple ] += 1.0
else :
evidence [ e_tuple ] += weigh_evidence ( aln . tags )
kept += 1
tally_time = time . time ( ) - start_tally
if missing_transcripts :
logger . warn ( 'The following transcripts were missing gene_ids, so we added them as the transcript ids: %s' % str ( missing_transcripts ) )
logger . info ( 'Tally done - {:.3}s, {:,} alns/min' . format ( tally_time , int ( 60. * count / tally_time ) ) )
logger . info ( 'Collapsing evidence' )
logger . info ( 'Writing evidence' )
with tempfile . NamedTemporaryFile ( 'w+t' ) as out_handle :
for key in evidence :
line = '{},{}\n' . format ( key , evidence [ key ] )
out_handle . write ( line )
out_handle . flush ( )
out_handle . seek ( 0 )
evidence_table = pd . read_csv ( out_handle , header = None )
del evidence
evidence_query = 'evidence >= %f' % minevidence
if positional :
evidence_table . columns = [ 'cell' , 'gene' , 'umi' , 'pos' , 'evidence' ]
collapsed = evidence_table . query ( evidence_query ) . groupby ( [ 'cell' , 'gene' ] ) [ 'umi' , 'pos' ] . size ( )
else :
evidence_table . columns = [ 'cell' , 'gene' , 'umi' , 'evidence' ]
collapsed = evidence_table . query ( evidence_query ) . groupby ( [ 'cell' , 'gene' ] ) [ 'umi' ] . size ( )
expanded = collapsed . unstack ( ) . T
if gene_map : # This Series is just for sorting the index
genes = pd . Series ( index = set ( gene_map . values ( ) ) )
genes = genes . sort_index ( )
# Now genes is assigned to a DataFrame
genes = expanded . ix [ genes . index ]
elif gene_tags :
expanded . sort_index ( )
genes = expanded
else : # make data frame have a complete accounting of transcripts
targets = pd . Series ( index = set ( targets ) )
targets = targets . sort_index ( )
expanded = expanded . reindex ( targets . index . values , fill_value = 0 )
genes = expanded
genes . fillna ( 0 , inplace = True )
genes = genes . astype ( int )
genes . index . name = "gene"
logger . info ( 'Output results' )
if subsample :
cb_hist_sampled . to_csv ( 'ss_{}_' . format ( subsample ) + os . path . basename ( cb_histogram ) , sep = '\t' )
if output_evidence_table :
import shutil
buf . seek ( 0 )
with open ( output_evidence_table , 'w' ) as etab_fh :
shutil . copyfileobj ( buf , etab_fh )
if sparse :
pd . Series ( genes . index ) . to_csv ( out + ".rownames" , index = False , header = False )
pd . Series ( genes . columns . values ) . to_csv ( out + ".colnames" , index = False , header = False )
with open ( out , "w+b" ) as out_handle :
scipy . io . mmwrite ( out_handle , scipy . sparse . csr_matrix ( genes ) )
else :
genes . to_csv ( out ) |
def add_all ( self , items , overflow_policy = OVERFLOW_POLICY_OVERWRITE ) :
"""Adds all of the item in the specified collection to the tail of the Ringbuffer . An add _ all is likely to
outperform multiple calls to add ( object ) due to better io utilization and a reduced number of executed
operations . The items are added in the order of the Iterator of the collection .
If there is no space in the Ringbuffer , the action is determined by overflow policy as : const : ` OVERFLOW _ POLICY _ OVERWRITE `
or : const : ` OVERFLOW _ POLICY _ FAIL ` .
: param items : ( Collection ) , the specified collection which contains the items to be added .
: param overflow _ policy : ( int ) , the OverflowPolicy to be used when there is no space ( optional ) .
: return : ( long ) , the sequenceId of the last written item , or - 1 of the last write is failed .""" | check_not_empty ( items , "items can't be empty" )
if len ( items ) > MAX_BATCH_SIZE :
raise AssertionError ( "Batch size can't be greater than %d" % MAX_BATCH_SIZE )
for item in items :
check_not_none ( item , "item can't be None" )
item_list = [ self . _to_data ( x ) for x in items ]
return self . _encode_invoke ( ringbuffer_add_all_codec , value_list = item_list , overflow_policy = overflow_policy ) |
def print_table ( rows , override_headers = None , uppercase_headers = True ) :
"""All rows need to be a list of dictionaries , all with the same keys .""" | if len ( rows ) == 0 :
return
keys = list ( rows [ 0 ] . keys ( ) )
headers = override_headers or keys
if uppercase_headers :
rows = [ dict ( zip ( keys , map ( lambda x : x . upper ( ) , headers ) ) ) , None ] + rows
else :
rows = [ dict ( zip ( keys , headers ) ) , None ] + rows
lengths = [ max ( len ( str ( row [ k ] ) ) for row in rows if hasattr ( row , '__iter__' ) ) for k in keys ]
tmp = [ '{%s:%i}' % ( h , l ) for h , l in zip ( keys [ : - 1 ] , lengths [ : - 1 ] ) ]
tmp . append ( '{%s}' % keys [ - 1 ] )
template = ( ' ' * 4 ) . join ( tmp )
for row in rows :
if type ( row ) == str :
print ( row )
elif row is None :
print ( )
elif isinstance ( row , dict ) :
row = { k : v if v is not None else 'None' for k , v in row . items ( ) }
print ( template . format ( ** row ) )
else :
print ( "Unhandled row type:" , row ) |
def decrypt ( self , encrypted_wrapped_data_key , encryption_context ) :
"""Decrypts a wrapped , encrypted , data key .
: param encrypted _ wrapped _ data _ key : Encrypted , wrapped , data key
: type encrypted _ wrapped _ data _ key : aws _ encryption _ sdk . internal . structures . EncryptedData
: param dict encryption _ context : Encryption context to use in decryption
: returns : Plaintext of data key
: rtype : bytes""" | if self . wrapping_key_type is EncryptionKeyType . PUBLIC :
raise IncorrectMasterKeyError ( "Public key cannot decrypt" )
if self . wrapping_key_type is EncryptionKeyType . PRIVATE :
return self . _wrapping_key . decrypt ( ciphertext = encrypted_wrapped_data_key . ciphertext , padding = self . wrapping_algorithm . padding )
serialized_encryption_context = serialize_encryption_context ( encryption_context = encryption_context )
return decrypt ( algorithm = self . wrapping_algorithm . algorithm , key = self . _derived_wrapping_key , encrypted_data = encrypted_wrapped_data_key , associated_data = serialized_encryption_context , ) |
def requires ( self ) :
"""Index all pages .""" | for url in NEWSPAPERS :
yield IndexPage ( url = url , date = self . date ) |
def sniff_extension ( file_path , verbose = True ) :
'''sniff _ extension will attempt to determine the file type based on the extension ,
and return the proper mimetype
: param file _ path : the full path to the file to sniff
: param verbose : print stuff out''' | mime_types = { "xls" : 'application/vnd.ms-excel' , "xlsx" : 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' , "xml" : 'text/xml' , "ods" : 'application/vnd.oasis.opendocument.spreadsheet' , "csv" : 'text/plain' , "tmpl" : 'text/plain' , "pdf" : 'application/pdf' , "php" : 'application/x-httpd-php' , "jpg" : 'image/jpeg' , "png" : 'image/png' , "gif" : 'image/gif' , "bmp" : 'image/bmp' , "txt" : 'text/plain' , "doc" : 'application/msword' , "js" : 'text/js' , "swf" : 'application/x-shockwave-flash' , "mp3" : 'audio/mpeg' , "zip" : 'application/zip' , "simg" : 'application/zip' , "rar" : 'application/rar' , "tar" : 'application/tar' , "arj" : 'application/arj' , "cab" : 'application/cab' , "html" : 'text/html' , "htm" : 'text/html' , "default" : 'application/octet-stream' , "folder" : 'application/vnd.google-apps.folder' , "img" : "application/octet-stream" }
ext = os . path . basename ( file_path ) . split ( '.' ) [ - 1 ]
mime_type = mime_types . get ( ext , None )
if mime_type == None :
mime_type = mime_types [ 'txt' ]
if verbose == True :
bot . info ( "%s --> %s" % ( file_path , mime_type ) )
return mime_type |
def update ( self ) :
"""Update layout to match the layout as described in the
WindowArrangement .""" | # Start with an empty frames list everytime , to avoid memory leaks .
existing_frames = self . _frames
self . _frames = { }
def create_layout_from_node ( node ) :
if isinstance ( node , window_arrangement . Window ) : # Create frame for Window , or reuse it , if we had one already .
key = ( node , node . editor_buffer )
frame = existing_frames . get ( key )
if frame is None :
frame , pt_window = self . _create_window_frame ( node . editor_buffer )
# Link layout Window to arrangement .
node . pt_window = pt_window
self . _frames [ key ] = frame
return frame
elif isinstance ( node , window_arrangement . VSplit ) :
return VSplit ( [ create_layout_from_node ( n ) for n in node ] , padding = 1 , padding_char = self . get_vertical_border_char ( ) , padding_style = 'class:frameborder' )
if isinstance ( node , window_arrangement . HSplit ) :
return HSplit ( [ create_layout_from_node ( n ) for n in node ] )
layout = create_layout_from_node ( self . window_arrangement . active_tab . root )
self . _fc . content = layout |
def is_absolute ( self ) :
"""Return True if xllcorner = = yllcorner = = 0 indicating that points
in question are absolute .""" | # FIXME ( Ole ) : It is unfortunate that decision about whether points
# are absolute or not lies with the georeference object . Ross pointed this out .
# Moreover , this little function is responsible for a large fraction of the time
# using in data fitting ( something in like 40 - 50 % .
# This was due to the repeated calls to allclose .
# With the flag method fitting is much faster ( 18 Mar 2009 ) .
# FIXME ( Ole ) : HACK to be able to reuse data already cached ( 18 Mar 2009 ) .
# Remove at some point
if not hasattr ( self , 'absolute' ) :
self . absolute = num . allclose ( [ self . xllcorner , self . yllcorner ] , 0 )
# Return absolute flag
return self . absolute |
def folderName ( self , folder ) :
"""gets / set the current folder""" | if folder == "" or folder == "/" :
self . _currentURL = self . _url
self . _services = None
self . _description = None
self . _folderName = None
self . _webEncrypted = None
self . __init ( )
self . _folderName = folder
elif folder in self . folders :
self . _currentURL = self . _url + "/%s" % folder
self . _services = None
self . _description = None
self . _folderName = None
self . _webEncrypted = None
self . __init ( )
self . _folderName = folder |
def list ( self , ** params ) :
"""Retrieve all deals
Returns all deals available to the user according to the parameters provided
: calls : ` ` get / deals ` `
: param dict params : ( optional ) Search options .
: return : List of dictionaries that support attriubte - style access , which represent collection of Deals .
: rtype : list""" | _ , _ , deals = self . http_client . get ( "/deals" , params = params )
for deal in deals :
deal [ 'value' ] = Coercion . to_decimal ( deal [ 'value' ] )
return deals |
def read ( self , count = None , block = None , consumer = None ) :
"""Read unseen messages from all streams in the consumer group . Wrapper
for : py : class : ` Database . xreadgroup ` method .
: param int count : limit number of messages returned
: param int block : milliseconds to block , 0 for indefinitely .
: param consumer : consumer name
: returns : a list of ( stream key , messages ) tuples , where messages is
a list of ( message id , data ) 2 - tuples .""" | if consumer is None :
consumer = self . _consumer
return self . database . xreadgroup ( self . name , consumer , self . _read_keys , count , block ) |
def tokenize ( self , tokenizer = None ) :
"""Return a list of tokens , using ` ` tokenizer ` ` .
: param tokenizer : ( optional ) A tokenizer object . If None , defaults to
this blob ' s default tokenizer .""" | t = tokenizer if tokenizer is not None else self . tokenizer
return WordList ( t . tokenize ( self . raw ) ) |
def get_json_schema ( filename ) :
"""Get a JSON Schema by filename .""" | file_path = os . path . join ( "schemas" , filename )
with open ( file_path ) as f :
schema = yaml . load ( f )
return schema |
def outputpairedstats ( fname , writemode , name1 , n1 , m1 , se1 , min1 , max1 , name2 , n2 , m2 , se2 , min2 , max2 , statname , stat , prob ) :
"""Prints or write to a file stats for two groups , using the name , n ,
mean , sterr , min and max for each group , as well as the statistic name ,
its value , and the associated p - value .
Usage : outputpairedstats ( fname , writemode ,
name1 , n1 , mean1 , stderr1 , min1 , max1,
name2 , n2 , mean2 , stderr2 , min2 , max2,
statname , stat , prob )
Returns : None""" | suffix = ''
# for * s after the p - value
try :
x = prob . shape
prob = prob [ 0 ]
except :
pass
if prob < 0.001 :
suffix = ' ***'
elif prob < 0.01 :
suffix = ' **'
elif prob < 0.05 :
suffix = ' *'
title = [ [ 'Name' , 'N' , 'Mean' , 'SD' , 'Min' , 'Max' ] ]
lofl = title + [ [ name1 , n1 , round ( m1 , 3 ) , round ( math . sqrt ( se1 ) , 3 ) , min1 , max1 ] , [ name2 , n2 , round ( m2 , 3 ) , round ( math . sqrt ( se2 ) , 3 ) , min2 , max2 ] ]
if type ( fname ) != StringType or len ( fname ) == 0 :
print ( )
print ( statname )
print ( )
pstat . printcc ( lofl )
print ( )
try :
if stat . shape == ( ) :
stat = stat [ 0 ]
if prob . shape == ( ) :
prob = prob [ 0 ]
except :
pass
print ( 'Test statistic = ' , round ( stat , 3 ) , ' p = ' , round ( prob , 3 ) , suffix )
print ( )
else :
file = open ( fname , writemode )
file . write ( '\n' + statname + '\n\n' )
file . close ( )
writecc ( lofl , fname , 'a' )
file = open ( fname , 'a' )
try :
if stat . shape == ( ) :
stat = stat [ 0 ]
if prob . shape == ( ) :
prob = prob [ 0 ]
except :
pass
file . write ( pstat . list2string ( [ '\nTest statistic = ' , round ( stat , 4 ) , ' p = ' , round ( prob , 4 ) , suffix , '\n\n' ] ) )
file . close ( )
return None |
def complete_query ( self , name , query , page_size , language_codes = None , company_name = None , scope = None , type_ = None , retry = google . api_core . gapic_v1 . method . DEFAULT , timeout = google . api_core . gapic_v1 . method . DEFAULT , metadata = None , ) :
"""Completes the specified prefix with keyword suggestions .
Intended for use by a job search auto - complete search box .
Example :
> > > from google . cloud import talent _ v4beta1
> > > client = talent _ v4beta1 . CompletionClient ( )
> > > name = client . project _ path ( ' [ PROJECT ] ' )
> > > # TODO : Initialize ` query ` :
> > > query = ' '
> > > # TODO : Initialize ` page _ size ` :
> > > page _ size = 0
> > > response = client . complete _ query ( name , query , page _ size )
Args :
name ( str ) : Required .
Resource name of project the completion is performed within .
The format is " projects / { project \ _ id } " , for example ,
" projects / api - test - project " .
query ( str ) : Required .
The query used to generate suggestions .
The maximum number of allowed characters is 255.
page _ size ( int ) : Required .
Completion result count .
The maximum allowed page size is 10.
language _ codes ( list [ str ] ) : Optional .
The list of languages of the query . This is the BCP - 47 language code ,
such as " en - US " or " sr - Latn " . For more information , see ` Tags for
Identifying Languages < https : / / tools . ietf . org / html / bcp47 > ` _ _ .
For ` ` CompletionType . JOB _ TITLE ` ` type , only open jobs with the same
` ` language _ codes ` ` are returned .
For ` ` CompletionType . COMPANY _ NAME ` ` type , only companies having open
jobs with the same ` ` language _ codes ` ` are returned .
For ` ` CompletionType . COMBINED ` ` type , only open jobs with the same
` ` language _ codes ` ` or companies having open jobs with the same
` ` language _ codes ` ` are returned .
The maximum number of allowed characters is 255.
company _ name ( str ) : Optional .
If provided , restricts completion to specified company .
The format is " projects / { project \ _ id } / companies / { company \ _ id } " , for
example , " projects / api - test - project / companies / foo " .
scope ( ~ google . cloud . talent _ v4beta1 . types . CompletionScope ) : Optional .
The scope of the completion . The defaults is ` ` CompletionScope . PUBLIC ` ` .
type _ ( ~ google . cloud . talent _ v4beta1 . types . CompletionType ) : Optional .
The completion topic . The default is ` ` CompletionType . COMBINED ` ` .
retry ( Optional [ google . api _ core . retry . Retry ] ) : A retry object used
to retry requests . If ` ` None ` ` is specified , requests will not
be retried .
timeout ( Optional [ float ] ) : The amount of time , in seconds , to wait
for the request to complete . Note that if ` ` retry ` ` is
specified , the timeout applies to each individual attempt .
metadata ( Optional [ Sequence [ Tuple [ str , str ] ] ] ) : Additional metadata
that is provided to the method .
Returns :
A : class : ` ~ google . cloud . talent _ v4beta1 . types . CompleteQueryResponse ` instance .
Raises :
google . api _ core . exceptions . GoogleAPICallError : If the request
failed for any reason .
google . api _ core . exceptions . RetryError : If the request failed due
to a retryable error and retry attempts failed .
ValueError : If the parameters are invalid .""" | # Wrap the transport method to add retry and timeout logic .
if "complete_query" not in self . _inner_api_calls :
self . _inner_api_calls [ "complete_query" ] = google . api_core . gapic_v1 . method . wrap_method ( self . transport . complete_query , default_retry = self . _method_configs [ "CompleteQuery" ] . retry , default_timeout = self . _method_configs [ "CompleteQuery" ] . timeout , client_info = self . _client_info , )
request = completion_service_pb2 . CompleteQueryRequest ( name = name , query = query , page_size = page_size , language_codes = language_codes , company_name = company_name , scope = scope , type = type_ , )
return self . _inner_api_calls [ "complete_query" ] ( request , retry = retry , timeout = timeout , metadata = metadata ) |
def request_token ( self ) -> None :
"""Requests a new Client Credentials Flow authentication token from the Spotify API
and stores it in the ` token ` property of the object .
Raises :
requests . HTTPError : If an HTTP error occurred during the request .""" | response : requests . Response = requests . post ( self . _TOKEN_URL , auth = HTTPBasicAuth ( self . _client_id , self . _client_key ) , data = { "grant_type" : self . _GRANT_TYPE } , verify = True )
response . raise_for_status ( )
self . _token = response . json ( )
self . _token_expires_at = time . time ( ) + self . _token [ "expires_in" ] |
def dict_to_json ( xcol , ycols , labels , value_columns ) :
"""Converts a list of dicts from datamodel query results
to google chart json data .
: param xcol :
The name of a string column to be used has X axis on chart
: param ycols :
A list with the names of series cols , that can be used as numeric
: param labels :
A dict with the columns labels .
: param value _ columns :
A list of dicts with the values to convert""" | json_data = dict ( )
json_data [ 'cols' ] = [ { 'id' : xcol , 'label' : as_unicode ( labels [ xcol ] ) , 'type' : 'string' } ]
for ycol in ycols :
json_data [ 'cols' ] . append ( { 'id' : ycol , 'label' : as_unicode ( labels [ ycol ] ) , 'type' : 'number' } )
json_data [ 'rows' ] = [ ]
for value in value_columns :
row = { 'c' : [ ] }
if isinstance ( value [ xcol ] , datetime . date ) :
row [ 'c' ] . append ( { 'v' : ( str ( value [ xcol ] ) ) } )
else :
row [ 'c' ] . append ( { 'v' : ( value [ xcol ] ) } )
for ycol in ycols :
if value [ ycol ] :
row [ 'c' ] . append ( { 'v' : ( value [ ycol ] ) } )
else :
row [ 'c' ] . append ( { 'v' : 0 } )
json_data [ 'rows' ] . append ( row )
return json_data |
def value ( self , new_value ) :
"""Set the value of this measurement .
Raises :
AttributeError : if the new value isn ' t of the correct units .""" | if self . unit != units . Undefined and new_value . unit != self . unit :
raise AttributeError ( "%s must be in %s" % ( self . __class__ , self . unit ) )
self . _value = new_value |
def or_filter ( self , ** filters ) :
"""Works like " filter " but joins given filters with OR operator .
Args :
* * filters : Query filters as keyword arguments .
Returns :
Self . Queryset object .
Example :
> > > Person . objects . or _ filter ( age _ _ gte = 16 , name _ _ startswith = ' jo ' )""" | clone = copy . deepcopy ( self )
clone . adapter . add_query ( [ ( "OR_QRY" , filters ) ] )
return clone |
def fixed_point_density_preserving ( points , cells , * args , ** kwargs ) :
"""Idea :
Move interior mesh points into the weighted averages of the circumcenters
of their adjacent cells . If a triangle cell switches orientation in the
process , don ' t move quite so far .""" | def get_new_points ( mesh ) : # Get circumcenters everywhere except at cells adjacent to the boundary ;
# barycenters there .
cc = mesh . cell_circumcenters
bc = mesh . cell_barycenters
# Find all cells with a boundary edge
boundary_cell_ids = mesh . edges_cells [ 1 ] [ : , 0 ]
cc [ boundary_cell_ids ] = bc [ boundary_cell_ids ]
return get_new_points_count_averaged ( mesh , cc )
mesh = MeshTri ( points , cells )
runner ( get_new_points , mesh , * args , ** kwargs )
return mesh . node_coords , mesh . cells [ "nodes" ] |
def append_rez_path ( self ) :
"""Append rez path to $ PATH .""" | if system . rez_bin_path :
self . env . PATH . append ( system . rez_bin_path ) |
def print_genl_hdr ( ofd , start ) :
"""https : / / github . com / thom311 / libnl / blob / libnl3_2_25 / lib / msg . c # L821.
Positional arguments :
ofd - - function to call with arguments similar to ` logging . debug ` .
start - - bytearray ( ) or bytearray _ ptr ( ) instance .""" | ghdr = genlmsghdr ( start )
ofd ( ' [GENERIC NETLINK HEADER] %d octets' , GENL_HDRLEN )
ofd ( ' .cmd = %d' , ghdr . cmd )
ofd ( ' .version = %d' , ghdr . version )
ofd ( ' .unused = %#d' , ghdr . reserved ) |
def followers_qs ( self , actor , flag = '' ) :
"""Returns a queryset of User objects who are following the given actor ( eg my followers ) .""" | check ( actor )
queryset = self . filter ( content_type = ContentType . objects . get_for_model ( actor ) , object_id = actor . pk ) . select_related ( 'user' )
if flag :
queryset = queryset . filter ( flag = flag )
return queryset |
def to_bytes ( instance , encoding = 'utf-8' , error = 'strict' ) :
'''Convert an instance recursively to bytes .''' | if isinstance ( instance , bytes ) :
return instance
elif hasattr ( instance , 'encode' ) :
return instance . encode ( encoding , error )
elif isinstance ( instance , list ) :
return list ( [ to_bytes ( item , encoding , error ) for item in instance ] )
elif isinstance ( instance , tuple ) :
return tuple ( [ to_bytes ( item , encoding , error ) for item in instance ] )
elif isinstance ( instance , dict ) :
return dict ( [ ( to_bytes ( key , encoding , error ) , to_bytes ( value , encoding , error ) ) for key , value in instance . items ( ) ] )
else :
return instance |
def disable_cpu ( self , rg ) :
'''Disable cpus
rg : range or list of threads to disable''' | if type ( rg ) == int :
rg = [ rg ]
to_disable = set ( rg ) & set ( self . __get_ranges ( "online" ) )
for cpu in to_disable :
fpath = path . join ( "cpu%i" % cpu , "online" )
self . __write_cpu_file ( fpath , b"0" ) |
def updateJoin ( self ) :
"""Updates the joining method used by the system .""" | text = self . uiJoinSBTN . currentAction ( ) . text ( )
if text == 'AND' :
joiner = QueryCompound . Op . And
else :
joiner = QueryCompound . Op . Or
self . _containerWidget . setCurrentJoiner ( self . joiner ( ) ) |
def __recognize_user_classes ( self , node : yaml . Node , expected_type : Type ) -> RecResult :
"""Recognize a user - defined class in the node .
This returns a list of classes from the inheritance hierarchy headed by expected _ type which match the given node and which do not have a registered derived class that matches the given node . So , the returned classes are the most derived matching classes that inherit from expected _ type .
This function recurses down the user ' s inheritance hierarchy .
Args :
node : The node to recognize .
expected _ type : A user - defined class .
Returns :
A list containing matched user - defined classes .""" | # Let the user override with an explicit tag
if node . tag in self . __registered_classes :
return [ self . __registered_classes [ node . tag ] ] , ''
recognized_subclasses = [ ]
message = ''
for other_class in self . __registered_classes . values ( ) :
if expected_type in other_class . __bases__ :
sub_subclasses , msg = self . __recognize_user_classes ( node , other_class )
recognized_subclasses . extend ( sub_subclasses )
if len ( sub_subclasses ) == 0 :
message += msg
if len ( recognized_subclasses ) == 0 :
recognized_subclasses , msg = self . __recognize_user_class ( node , expected_type )
if len ( recognized_subclasses ) == 0 :
message += msg
if len ( recognized_subclasses ) == 0 :
message = ( 'Failed to recognize a {}\n{}\nbecause of the following' ' error(s):\n{}' ) . format ( expected_type . __name__ , node . start_mark , indent ( msg , ' ' ) )
return [ ] , message
if len ( recognized_subclasses ) > 1 :
message = ( '{}{} Could not determine which of the following types' ' this is: {}' ) . format ( node . start_mark , os . linesep , recognized_subclasses )
return recognized_subclasses , message
return recognized_subclasses , '' |
def get_sort_cmd ( tmp_dir = None ) :
"""Retrieve GNU coreutils sort command , using version - sort if available .
Recent versions of sort have alpha - numeric sorting , which provides
more natural sorting of chromosomes ( chr1 , chr2 ) instead of ( chr1 , chr10 ) .
This also fixes versions of sort , like 8.22 in CentOS 7.1 , that have broken
sorting without version sorting specified .
https : / / github . com / bcbio / bcbio - nextgen / issues / 624
https : / / github . com / bcbio / bcbio - nextgen / issues / 1017""" | has_versionsort = subprocess . check_output ( "sort --help | grep version-sort; exit 0" , shell = True ) . strip ( )
if has_versionsort :
cmd = "sort -V"
else :
cmd = "sort"
if tmp_dir and os . path . exists ( tmp_dir ) and os . path . isdir ( tmp_dir ) :
cmd += " -T %s" % tmp_dir
return cmd |
def import_from_stdlib ( name ) :
"""Copied from pdbpp https : / / bitbucket . org / antocuni / pdb""" | import os
import types
import code
# arbitrary module which stays in the same dir as pdb
stdlibdir , _ = os . path . split ( code . __file__ )
pyfile = os . path . join ( stdlibdir , name + '.py' )
result = types . ModuleType ( name )
exec ( compile ( open ( pyfile ) . read ( ) , pyfile , 'exec' ) , result . __dict__ )
return result |
def count_letters_digits ( input_string : str ) -> tuple :
"""Function to calculate the quantity of alphabetic characters and numeric digits in a string .
Args :
input _ string : A string that can contain any type of characters .
Returns :
A tuple of two integers . The first is the count of alphabetic characters and
the second is the count of numeric digits .
Examples :
> > > count _ letters _ digits ( ' python ' )
(6 , 0)
> > > count _ letters _ digits ( ' program ' )
(7 , 0)
> > > count _ letters _ digits ( ' python3.0 ' )
(6 , 2)""" | letters = digits = 0
for char in input_string :
if char . isalpha ( ) :
letters += 1
elif char . isdigit ( ) :
digits += 1
return ( letters , digits ) |
def _scope_vars ( scope , trainable_only = False ) :
"""Get variables inside a scope
The scope can be specified as a string
Parameters
scope : str or VariableScope
scope in which the variables reside .
trainable _ only : bool
whether or not to return only the variables that were marked as
trainable .
Returns
vars : [ tf . Variable ]
list of variables in ` scope ` .""" | return tf . get_collection ( tf . GraphKeys . TRAINABLE_VARIABLES if trainable_only else tf . GraphKeys . VARIABLES , scope = scope if isinstance ( scope , str ) else scope . name ) |
def check_array_struct ( array ) :
"""Check to ensure arrays are symmetrical , for example :
[ [ 1 , 2 , 3 ] , [ 1 , 2 ] ] is invalid""" | # If a list is transformed into a numpy array and the sub elements
# of this array are still lists , then numpy failed to fully convert
# the list , meaning it is not symmetrical .
try :
arr = np . array ( array )
except :
raise HydraError ( "Array %s is not valid." % ( array , ) )
if type ( arr [ 0 ] ) is list :
raise HydraError ( "Array %s is not valid." % ( array , ) ) |
def autoLayout ( self ) :
"""Automatically lays out the contents for this widget .""" | try :
direction = self . currentSlide ( ) . scene ( ) . direction ( )
except AttributeError :
direction = QtGui . QBoxLayout . TopToBottom
size = self . size ( )
self . _slideshow . resize ( size )
prev = self . _previousButton
next = self . _nextButton
if direction == QtGui . QBoxLayout . BottomToTop :
y = 9
else :
y = size . height ( ) - prev . height ( ) - 9
prev . move ( 9 , y )
next . move ( size . width ( ) - next . width ( ) - 9 , y )
# update the layout for the slides
for i in range ( self . _slideshow . count ( ) ) :
widget = self . _slideshow . widget ( i )
widget . scene ( ) . autoLayout ( size ) |
def rpc ( self , cmd , ** kwargs ) :
"""Generic helper function to call an RPC method .""" | func = getattr ( self . client , cmd )
try :
if self . credentials is None :
return func ( kwargs )
else :
return func ( self . credentials , kwargs )
except socket . error as e :
raise BackendConnectionError ( e )
except ( xmlrpclib . ProtocolError , BadStatusLine ) as e :
log . error ( e )
raise BackendError ( "Error reaching backend." ) |
def create_withdrawal ( self , asset , amount , private_key ) :
"""Function to create a withdrawal request by generating a withdrawal ID request from the Switcheo API .
Execution of this function is as follows : :
create _ withdrawal ( asset = " SWTH " , amount = 1.1 , private _ key = kp )
The expected return result for this function is as follows : :
' id ' : ' a5a4d396 - fa9f - 4191 - bf50-39a3d06d5e0d '
: param asset : Script Hash of asset ID from the available products .
: type asset : str
: param amount : The amount of coins / tokens to be withdrawn .
: type amount : float
: param private _ key : The Private Key ( ETH ) or KeyPair ( NEO ) for the wallet being used to sign deposit message .
: type private _ key : KeyPair or str
: return : Dictionary with the withdrawal ID generated by the Switcheo API .""" | signable_params = { 'blockchain' : self . blockchain , 'asset_id' : asset , 'amount' : str ( self . blockchain_amount [ self . blockchain ] ( amount ) ) , 'timestamp' : get_epoch_milliseconds ( ) , 'contract_hash' : self . contract_hash }
api_params = self . sign_create_withdrawal_function [ self . blockchain ] ( signable_params , private_key )
return self . request . post ( path = '/withdrawals' , json_data = api_params ) |
def _extgrad ( xarr , alpha = 100 , axis = None ) :
'''Given an array xarr of values , return the gradient of the smooth min / max
swith respect to each entry in the array''' | term1 = ( np . exp ( alpha * xarr ) / np . sum ( np . exp ( alpha * xarr ) , axis = axis , keepdims = True ) )
term2 = 1 + alpha * ( xarr - _extalg ( xarr , alpha , axis = axis ) )
return term1 * term2 |
def sort_buses ( self , tokens ) :
"""Sorts bus list according to name ( bus _ no ) .""" | self . case . buses . sort ( key = lambda obj : obj . name ) |
def contextDoc ( self ) :
"""Get the doc from an xpathContext""" | ret = libxml2mod . xmlXPathGetContextDoc ( self . _o )
if ret is None :
raise xpathError ( 'xmlXPathGetContextDoc() failed' )
__tmp = xmlDoc ( _obj = ret )
return __tmp |
def _destinations_in_two_columns ( pdf , destinations , cutoff = 3 ) :
"""Check if the named destinations are organized along two columns ( heuristic )
@ param pdf : a PdfFileReader object
@ param destinations :
' cutoff ' is used to tune the heuristic : if ' cutoff ' destinations in the
would - be second column start at the same position , return True""" | # iterator for the x coordinates of refs in the would - be second column
xpositions = ( _destination_position ( pdf , dest ) [ 3 ] for ( _ , dest ) in destinations if _destination_position ( pdf , dest ) [ 1 ] == 1 )
xpos_count = { }
for xpos in xpositions :
xpos_count [ xpos ] = xpos_count . get ( xpos , 0 ) + 1
if xpos_count [ xpos ] >= cutoff :
return True
return False |
def install_wic ( self , wic_slot_id , wic ) :
"""Installs a WIC on this adapter .
: param wic _ slot _ id : WIC slot ID ( integer )
: param wic : WIC instance""" | self . _wics [ wic_slot_id ] = wic
# Dynamips WICs ports start on a multiple of 16 + port number
# WIC1 port 1 = 16 , WIC1 port 2 = 17
# WIC2 port 1 = 32 , WIC2 port 2 = 33
# WIC3 port 1 = 48 , WIC3 port 2 = 49
base = 16 * ( wic_slot_id + 1 )
for wic_port in range ( 0 , wic . interfaces ) :
port_number = base + wic_port
self . _ports [ port_number ] = None |
def path ( self , which = None ) :
"""Extend ` ` nailgun . entity _ mixins . Entity . path ` ` .
The format of the returned path depends on the value of ` ` which ` ` :
sync
/ products / < product _ id > / sync
` ` super ` ` is called otherwise .""" | if which == 'sync' :
return '{0}/{1}' . format ( super ( Product , self ) . path ( which = 'self' ) , which , )
return super ( Product , self ) . path ( which ) |
def compute_video_metrics_from_png_files ( output_dirs , problem_name , video_length , frame_shape ) :
"""Computes the average of all the metric for one decoding .
This function assumes that all the predicted and target frames
have been saved on the disk and sorting them by name will result
to consecutive frames saved in order .
Args :
output _ dirs : directory with all the saved frames .
problem _ name : prefix of the saved frames usually name of the problem .
video _ length : length of the videos .
frame _ shape : shape of each frame in HxWxC format .
Returns :
Dictionary which contains the average of each metric per frame .""" | ssim_all_decodes , psnr_all_decodes = [ ] , [ ]
for output_dir in output_dirs :
output_files , target_files = get_target_and_output_filepatterns ( output_dir , problem_name )
args = get_zipped_dataset_from_png_files ( output_files , target_files , video_length , frame_shape )
psnr_single , ssim_single = compute_one_decoding_video_metrics ( * args )
psnr_all_decodes . append ( psnr_single )
ssim_all_decodes . append ( ssim_single )
psnr_all_decodes = np . array ( psnr_all_decodes )
ssim_all_decodes = np . array ( ssim_all_decodes )
all_results = { "PSNR" : psnr_all_decodes , "SSIM" : ssim_all_decodes }
return compute_all_metrics_statistics ( all_results ) |
def convert_to_experiment_list ( experiments ) :
"""Produces a list of Experiment objects .
Converts input from dict , single experiment , or list of
experiments to list of experiments . If input is None ,
will return an empty list .
Arguments :
experiments ( Experiment | list | dict ) : Experiments to run .
Returns :
List of experiments .""" | exp_list = experiments
# Transform list if necessary
if experiments is None :
exp_list = [ ]
elif isinstance ( experiments , Experiment ) :
exp_list = [ experiments ]
elif type ( experiments ) is dict :
exp_list = [ Experiment . from_json ( name , spec ) for name , spec in experiments . items ( ) ]
# Validate exp _ list
if ( type ( exp_list ) is list and all ( isinstance ( exp , Experiment ) for exp in exp_list ) ) :
if len ( exp_list ) > 1 :
logger . warning ( "All experiments will be " "using the same SearchAlgorithm." )
else :
raise TuneError ( "Invalid argument: {}" . format ( experiments ) )
return exp_list |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.