idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
15,500 | def codebox ( msg = "" , title = " " , text = "" ) : return tb . textbox ( msg , title , text , codebox = 1 ) | Display some text in a monospaced font with no line wrapping . This function is suitable for displaying code and text that is formatted using spaces . |
15,501 | def _calculateEncodingKey ( comparator ) : encodingName = None for k , v in list ( _encodings . items ( ) ) : if v == comparator : encodingName = k break return encodingName | Gets the first key of all available encodings where the corresponding value matches the comparator . |
15,502 | def _initUI ( self ) : self . semicolonRadioButton = QtGui . QRadioButton ( 'Semicolon' ) self . commaRadioButton = QtGui . QRadioButton ( 'Comma' ) self . tabRadioButton = QtGui . QRadioButton ( 'Tab' ) self . otherRadioButton = QtGui . QRadioButton ( 'Other' ) self . commaRadioButton . setChecked ( True ) self . otherSeparatorLineEdit = QtGui . QLineEdit ( self ) self . otherSeparatorLineEdit . setEnabled ( False ) self . semicolonRadioButton . toggled . connect ( self . _delimiter ) self . commaRadioButton . toggled . connect ( self . _delimiter ) self . tabRadioButton . toggled . connect ( self . _delimiter ) self . otherRadioButton . toggled . connect ( self . _enableLine ) self . otherSeparatorLineEdit . textChanged . connect ( lambda : self . _delimiter ( True ) ) self . otherSeparatorLineEdit . setValidator ( DelimiterValidator ( self ) ) currentLayout = self . layout ( ) if currentLayout is not None : del currentLayout layout = QtGui . QHBoxLayout ( ) layout . addWidget ( self . semicolonRadioButton ) layout . addWidget ( self . commaRadioButton ) layout . addWidget ( self . tabRadioButton ) layout . addWidget ( self . otherRadioButton ) layout . addWidget ( self . otherSeparatorLineEdit ) self . setLayout ( layout ) | Creates the inital layout with all subwidgets . |
15,503 | def currentSelected ( self ) : if self . commaRadioButton . isChecked ( ) : return ',' elif self . semicolonRadioButton . isChecked ( ) : return ';' elif self . tabRadioButton . isChecked ( ) : return '\t' elif self . otherRadioButton . isChecked ( ) : return self . otherSeparatorLineEdit . text ( ) return | Returns the currently selected delimiter character . |
15,504 | def _openFile ( self ) : file_types = "Comma Separated Values (*.csv);;Text files (*.txt);;All Files (*)" ret = QtGui . QFileDialog . getOpenFileName ( self , self . tr ( 'open file' ) , filter = file_types ) if isinstance ( ret , tuple ) : ret = ret [ 0 ] if ret : self . _filenameLineEdit . setText ( ret ) self . _updateFilename ( ) | Opens a file dialog and sets a value for the QLineEdit widget . |
15,505 | def _updateFilename ( self ) : self . _filename = self . _filenameLineEdit . text ( ) self . _guessEncoding ( self . _filename ) self . _previewFile ( ) | Calls several methods after the filename changed . |
15,506 | def _guessEncoding ( self , path ) : if os . path . exists ( path ) and path . lower ( ) . endswith ( 'csv' ) : encoding = None if encoding is not None : if encoding . startswith ( 'utf' ) : encoding = encoding . replace ( '-' , '' ) encoding = encoding . replace ( '-' , '_' ) viewValue = _encodings . get ( encoding ) self . _encodingKey = encoding index = self . _encodingComboBox . findText ( viewValue . upper ( ) ) self . _encodingComboBox . setCurrentIndex ( index ) | Opens a file from the given path and checks the file encoding . |
15,507 | def _updateEncoding ( self , index ) : encoding = self . _encodingComboBox . itemText ( index ) encoding = encoding . lower ( ) self . _encodingKey = _calculateEncodingKey ( encoding ) self . _previewFile ( ) | Changes the value of the encoding combo box to the value of given index . |
15,508 | def _previewFile ( self ) : dataFrame = self . _loadCSVDataFrame ( ) dataFrameModel = DataFrameModel ( dataFrame , filePath = self . _filename ) dataFrameModel . enableEditing ( True ) self . _previewTableView . setModel ( dataFrameModel ) columnModel = dataFrameModel . columnDtypeModel ( ) columnModel . changeFailed . connect ( self . updateStatusBar ) self . _datatypeTableView . setModel ( columnModel ) | Updates the preview widgets with new models for both tab panes . |
15,509 | def _loadCSVDataFrame ( self ) : if self . _filename and os . path . exists ( self . _filename ) : encoding = self . _encodingKey or 'UTF_8' try : dataFrame = superReadFile ( self . _filename , sep = self . _delimiter , first_codec = encoding , header = self . _header ) dataFrame = dataFrame . apply ( fillNoneValues ) dataFrame = dataFrame . apply ( convertTimestamps ) except Exception as err : self . updateStatusBar ( str ( err ) ) print ( err ) return pandas . DataFrame ( ) self . updateStatusBar ( 'Preview generated.' ) return dataFrame self . updateStatusBar ( 'File could not be read.' ) return pandas . DataFrame ( ) | Loads the given csv file with pandas and generate a new dataframe . |
15,510 | def accepted ( self ) : model = self . _previewTableView . model ( ) if model is not None : df = model . dataFrame ( ) . copy ( ) dfModel = DataFrameModel ( df ) self . load . emit ( dfModel , self . _filename ) print ( ( "Emitted model for {}" . format ( self . _filename ) ) ) self . _resetWidgets ( ) self . accept ( ) | Successfully close the widget and return the loaded model . |
15,511 | def encode_pin ( self , pin , matrix = None ) : if matrix is None : _ , matrix = self . read_pin ( ) return "" . join ( [ str ( matrix . index ( p ) + 1 ) for p in pin ] ) | Transform correct PIN according to the displayed matrix . |
15,512 | def _xdr_read_asset ( unpacker ) : asset = messages . StellarAssetType ( type = unpacker . unpack_uint ( ) ) if asset . type == ASSET_TYPE_ALPHA4 : asset . code = unpacker . unpack_fstring ( 4 ) asset . issuer = _xdr_read_address ( unpacker ) if asset . type == ASSET_TYPE_ALPHA12 : asset . code = unpacker . unpack_fstring ( 12 ) asset . issuer = _xdr_read_address ( unpacker ) return asset | Reads a stellar Asset from unpacker |
15,513 | def _crc16_checksum ( bytes ) : crc = 0x0000 polynomial = 0x1021 for byte in bytes : for i in range ( 8 ) : bit = ( byte >> ( 7 - i ) & 1 ) == 1 c15 = ( crc >> 15 & 1 ) == 1 crc <<= 1 if c15 ^ bit : crc ^= polynomial return crc & 0xFFFF | Returns the CRC - 16 checksum of bytearray bytes |
15,514 | def b58encode ( v ) : long_value = 0 for c in v : long_value = long_value * 256 + c result = "" while long_value >= __b58base : div , mod = divmod ( long_value , __b58base ) result = __b58chars [ mod ] + result long_value = div result = __b58chars [ long_value ] + result nPad = 0 for c in v : if c == 0 : nPad += 1 else : break return ( __b58chars [ 0 ] * nPad ) + result | encode v which is a string of bytes to base58 . |
15,515 | def normalize_nfc ( txt ) : if isinstance ( txt , bytes ) : txt = txt . decode ( ) return unicodedata . normalize ( "NFC" , txt ) . encode ( ) | Normalize message to NFC and return bytes suitable for protobuf . This seems to be bitcoin - qt standard of doing things . |
15,516 | def get_protocol ( handle : Handle , want_v2 : bool ) -> Protocol : force_v1 = int ( os . environ . get ( "TREZOR_PROTOCOL_V1" , 1 ) ) if want_v2 and not force_v1 : return ProtocolV2 ( handle ) else : return ProtocolV1 ( handle ) | Make a Protocol instance for the given handle . |
15,517 | def _ping ( self ) -> bool : assert self . socket is not None resp = None try : self . socket . sendall ( b"PINGPING" ) resp = self . socket . recv ( 8 ) except Exception : pass return resp == b"PONGPONG" | Test if the device is listening . |
15,518 | def combine_keys ( pks : Iterable [ Ed25519PublicPoint ] ) -> Ed25519PublicPoint : P = [ _ed25519 . decodepoint ( pk ) for pk in pks ] combine = reduce ( _ed25519 . edwards_add , P ) return Ed25519PublicPoint ( _ed25519 . encodepoint ( combine ) ) | Combine a list of Ed25519 points into a global CoSi key . |
15,519 | def combine_sig ( global_R : Ed25519PublicPoint , sigs : Iterable [ Ed25519Signature ] ) -> Ed25519Signature : S = [ _ed25519 . decodeint ( si ) for si in sigs ] s = sum ( S ) % _ed25519 . l sig = global_R + _ed25519 . encodeint ( s ) return Ed25519Signature ( sig ) | Combine a list of signatures into a single CoSi signature . |
15,520 | def get_nonce ( sk : Ed25519PrivateKey , data : bytes , ctr : int = 0 ) -> Tuple [ int , Ed25519PublicPoint ] : h = _ed25519 . H ( sk ) bytesize = _ed25519 . b // 8 assert len ( h ) == bytesize * 2 r = _ed25519 . Hint ( h [ bytesize : ] + data + ctr . to_bytes ( 4 , "big" ) ) R = _ed25519 . scalarmult ( _ed25519 . B , r ) return r , Ed25519PublicPoint ( _ed25519 . encodepoint ( R ) ) | Calculate CoSi nonces for given data . These differ from Ed25519 deterministic nonces in that there is a counter appended at end . |
15,521 | def verify ( signature : Ed25519Signature , digest : bytes , pub_key : Ed25519PublicPoint ) -> None : _ed25519 . checkvalid ( signature , digest , pub_key ) | Verify Ed25519 signature . Raise exception if the signature is invalid . |
15,522 | def sign_with_privkey ( digest : bytes , privkey : Ed25519PrivateKey , global_pubkey : Ed25519PublicPoint , nonce : int , global_commit : Ed25519PublicPoint , ) -> Ed25519Signature : h = _ed25519 . H ( privkey ) a = _ed25519 . decodecoord ( h ) S = ( nonce + _ed25519 . Hint ( global_commit + global_pubkey + digest ) * a ) % _ed25519 . l return Ed25519Signature ( _ed25519 . encodeint ( S ) ) | Create a CoSi signature of digest with the supplied private key . This function needs to know the global public key and global commitment . |
15,523 | def _patch_prebuild ( cls ) : orig_run = cls . run def new_run ( self ) : self . run_command ( "prebuild" ) orig_run ( self ) cls . run = new_run | Patch a setuptools command to depend on prebuild |
15,524 | def get_default_client ( path = None , ui = None , ** kwargs ) : from . transport import get_transport from . ui import ClickUI transport = get_transport ( path , prefix_search = True ) if ui is None : ui = ClickUI ( ) return TrezorClient ( transport , ui , ** kwargs ) | Get a client for a connected Trezor device . |
15,525 | def sel_entries ( self ) : ENTIRE_RECORD = 0xff rsp = self . send_message_with_name ( 'GetSelInfo' ) if rsp . entries == 0 : return reservation_id = self . get_sel_reservation_id ( ) next_record_id = 0 while True : req = create_request_by_name ( 'GetSelEntry' ) req . reservation_id = reservation_id req . record_id = next_record_id req . offset = 0 self . max_req_len = ENTIRE_RECORD record_data = ByteBuffer ( ) while True : req . length = self . max_req_len if ( self . max_req_len != 0xff and ( req . offset + req . length ) > 16 ) : req . length = 16 - req . offset rsp = self . send_message ( req ) if rsp . completion_code == constants . CC_CANT_RET_NUM_REQ_BYTES : if self . max_req_len == 0xff : self . max_req_len = 16 else : self . max_req_len -= 1 continue else : check_completion_code ( rsp . completion_code ) record_data . extend ( rsp . record_data ) req . offset = len ( record_data ) if len ( record_data ) >= 16 : break next_record_id = rsp . next_record_id yield SelEntry ( record_data ) if next_record_id == 0xffff : break | Generator which returns all SEL entries . |
15,526 | def initiate_upgrade_action_and_wait ( self , components_mask , action , timeout = 2 , interval = 0.1 ) : try : self . initiate_upgrade_action ( components_mask , action ) except CompletionCodeError as e : if e . cc == CC_LONG_DURATION_CMD_IN_PROGRESS : self . wait_for_long_duration_command ( constants . CMDID_HPM_INITIATE_UPGRADE_ACTION , timeout , interval ) else : raise HpmError ( 'initiate_upgrade_action CC=0x%02x' % e . cc ) | Initiate Upgrade Action and wait for long running command . |
15,527 | def upload_binary ( self , binary , timeout = 2 , interval = 0.1 ) : block_number = 0 block_size = self . _determine_max_block_size ( ) for chunk in chunks ( binary , block_size ) : try : self . upload_firmware_block ( block_number , chunk ) except CompletionCodeError as e : if e . cc == CC_LONG_DURATION_CMD_IN_PROGRESS : self . wait_for_long_duration_command ( constants . CMDID_HPM_UPLOAD_FIRMWARE_BLOCK , timeout , interval ) else : raise HpmError ( 'upload_firmware_block CC=0x%02x' % e . cc ) block_number += 1 block_number &= 0xff | Upload all firmware blocks from binary and wait for long running command . |
15,528 | def finish_upload_and_wait ( self , component , length , timeout = 2 , interval = 0.1 ) : try : rsp = self . finish_firmware_upload ( component , length ) check_completion_code ( rsp . completion_code ) except CompletionCodeError as e : if e . cc == CC_LONG_DURATION_CMD_IN_PROGRESS : self . wait_for_long_duration_command ( constants . CMDID_HPM_FINISH_FIRMWARE_UPLOAD , timeout , interval ) else : raise HpmError ( 'finish_firmware_upload CC=0x%02x' % e . cc ) | Finish the firmware upload process and wait for long running command . |
15,529 | def activate_firmware_and_wait ( self , rollback_override = None , timeout = 2 , interval = 1 ) : try : self . activate_firmware ( rollback_override ) except CompletionCodeError as e : if e . cc == CC_LONG_DURATION_CMD_IN_PROGRESS : self . wait_for_long_duration_command ( constants . CMDID_HPM_ACTIVATE_FIRMWARE , timeout , interval ) else : raise HpmError ( 'activate_firmware CC=0x%02x' % e . cc ) except IpmiTimeoutError : pass | Activate the new uploaded firmware and wait for long running command . |
15,530 | def _decode_data ( self , data ) : self . major = data [ 0 ] if data [ 1 ] is 0xff : self . minor = data [ 1 ] elif data [ 1 ] <= 0x99 : self . minor = int ( data [ 1 : 2 ] . tostring ( ) . decode ( 'bcd+' ) ) else : raise DecodingError ( ) | data is array . array |
15,531 | def set_routing ( self , routing ) : if is_string ( routing ) : routing = ast . literal_eval ( routing ) self . routing = [ Routing ( * route ) for route in routing ] | Set the path over which a target is reachable . |
15,532 | def raw_command ( self , lun , netfn , raw_bytes ) : return self . interface . send_and_receive_raw ( self . target , lun , netfn , raw_bytes ) | Send the raw command data and return the raw response . |
15,533 | def _send_and_receive ( self , target , lun , netfn , cmdid , payload ) : self . _inc_sequence_number ( ) header = IpmbHeaderReq ( ) header . netfn = netfn header . rs_lun = lun header . rs_sa = target . ipmb_address header . rq_seq = self . next_sequence_number header . rq_lun = 0 header . rq_sa = self . slave_address header . cmd_id = cmdid if target . routing : tx_data = encode_bridged_message ( target . routing , header , payload , self . next_sequence_number ) else : tx_data = encode_ipmb_msg ( header , payload ) self . _send_ipmi_msg ( tx_data ) received = False while received is False : if not self . _q . empty ( ) : rx_data = self . _q . get ( ) else : rx_data = self . _receive_ipmi_msg ( ) if array ( 'B' , rx_data ) [ 5 ] == constants . CMDID_SEND_MESSAGE : rx_data = decode_bridged_message ( rx_data ) received = rx_filter ( header , rx_data ) if not received : self . _q . put ( rx_data ) return rx_data [ 6 : - 1 ] | Send and receive data using RMCP interface . |
15,534 | def send_and_receive_raw ( self , target , lun , netfn , raw_bytes ) : return self . _send_and_receive ( target = target , lun = lun , netfn = netfn , cmdid = array ( 'B' , raw_bytes ) [ 0 ] , payload = raw_bytes [ 1 : ] ) | Interface function to send and receive raw message . |
15,535 | def send_and_receive ( self , req ) : rx_data = self . _send_and_receive ( target = req . target , lun = req . lun , netfn = req . netfn , cmdid = req . cmdid , payload = encode_message ( req ) ) rsp = create_message ( req . netfn + 1 , req . cmdid , req . group_extension ) decode_message ( rsp , rx_data ) return rsp | Interface function to send and receive an IPMI message . |
15,536 | def delete_sdr ( self , record_id ) : reservation_id = self . reserve_device_sdr_repository ( ) rsp = self . send_message_with_name ( 'DeleteSdr' , reservation_id = reservation_id , record_id = record_id ) return rsp . record_id | Deletes the sensor record specified by record_id . |
15,537 | def get_sdr_data_helper ( reserve_fn , get_fn , record_id , reservation_id = None ) : if reservation_id is None : reservation_id = reserve_fn ( ) ( next_id , data ) = get_fn ( reservation_id , record_id , 0 , 5 ) header = ByteBuffer ( data ) record_id = header . pop_unsigned_int ( 2 ) record_version = header . pop_unsigned_int ( 1 ) record_type = header . pop_unsigned_int ( 1 ) record_payload_length = header . pop_unsigned_int ( 1 ) record_length = record_payload_length + 5 record_data = ByteBuffer ( data ) offset = len ( record_data ) max_req_len = 20 retry = 20 while True : retry -= 1 if retry == 0 : raise RetryError ( ) length = max_req_len if ( offset + length ) > record_length : length = record_length - offset try : ( next_id , data ) = get_fn ( reservation_id , record_id , offset , length ) except CompletionCodeError as e : if e . cc == constants . CC_CANT_RET_NUM_REQ_BYTES : max_req_len -= 4 if max_req_len <= 0 : retry = 0 else : raise CompletionCodeError ( e . cc ) record_data . extend ( data [ : ] ) offset = len ( record_data ) if len ( record_data ) >= record_length : break return ( next_id , record_data ) | Helper function to retrieve the sdr data using the specified functions . |
15,538 | def clear_repository_helper ( reserve_fn , clear_fn , retry = 5 , reservation = None ) : if reservation is None : reservation = reserve_fn ( ) reservation = _clear_repository ( reserve_fn , clear_fn , INITIATE_ERASE , retry , reservation ) time . sleep ( 0.5 ) reservation = _clear_repository ( reserve_fn , clear_fn , GET_ERASE_STATUS , retry , reservation ) | Helper function to start repository erasure and wait until finish . This helper is used by clear_sel and clear_sdr_repository . |
15,539 | def encode_ipmb_msg ( header , data ) : msg = array ( 'B' ) msg . fromstring ( header . encode ( ) ) if data is not None : a = array ( 'B' ) a . fromstring ( data ) msg . extend ( a ) msg . append ( checksum ( msg [ 3 : ] ) ) return msg . tostring ( ) | Encode an IPMB message . |
15,540 | def encode_send_message ( payload , rq_sa , rs_sa , channel , seq , tracking = 1 ) : req = create_request_by_name ( 'SendMessage' ) req . channel . number = channel req . channel . tracking = tracking data = encode_message ( req ) header = IpmbHeaderReq ( ) header . netfn = req . __netfn__ header . rs_lun = 0 header . rs_sa = rs_sa header . rq_seq = seq header . rq_lun = 0 header . rq_sa = rq_sa header . cmd_id = req . __cmdid__ return encode_ipmb_msg ( header , data + payload ) | Encode a send message command and embedd the message to be send . |
15,541 | def rx_filter ( header , data ) : rsp_header = IpmbHeaderRsp ( ) rsp_header . decode ( data ) data = array ( 'B' , data ) checks = [ ( checksum ( data [ 0 : 3 ] ) , 0 , 'Header checksum failed' ) , ( checksum ( data [ 3 : ] ) , 0 , 'payload checksum failed' ) , ( rsp_header . netfn , header . netfn | 1 , 'NetFn mismatch' ) , ( rsp_header . rs_lun , header . rs_lun , 'responder LUN mismatch' ) , ( rsp_header . rq_seq , header . rq_seq , 'sequence number mismatch' ) , ( rsp_header . cmd_id , header . cmd_id , 'command id mismatch' ) , ] match = True for left , right , msg in checks : if left != right : log ( ) . debug ( '{:s}: {:d} {:d}' . format ( msg , left , right ) ) match = False return match | Check if the message in rx_data matches to the information in header . |
15,542 | def _pack ( self ) : data = ByteBuffer ( ) if not hasattr ( self , '__fields__' ) : return data . array for field in self . __fields__ : field . encode ( self , data ) return data . array | Pack the message and return an array . |
15,543 | def _encode ( self ) : data = ByteBuffer ( ) if not hasattr ( self , '__fields__' ) : return data . tostring ( ) for field in self . __fields__ : field . encode ( self , data ) return data . tostring ( ) | Encode the message and return a bytestring . |
15,544 | def _decode ( self , data ) : if not hasattr ( self , '__fields__' ) : raise NotImplementedError ( 'You have to overwrite this method' ) data = ByteBuffer ( data ) cc = None for field in self . __fields__ : try : field . decode ( self , data ) except CompletionCodeError as e : cc = e . cc break if ( cc is None or cc == 0 ) and len ( data ) > 0 : raise DecodingError ( 'Data has extra bytes' ) | Decode the bytestring message . |
15,545 | def _send_and_receive ( self , target , lun , netfn , cmdid , payload ) : self . _inc_sequence_number ( ) header = IpmbHeaderReq ( ) header . netfn = netfn header . rs_lun = lun header . rs_sa = target . ipmb_address header . rq_seq = self . next_sequence_number header . rq_lun = 0 header . rq_sa = self . slave_address header . cmd_id = cmdid retries = 0 while retries < self . max_retries : try : self . _send_raw ( header , payload ) rx_data = self . _receive_raw ( header ) break except IpmiTimeoutError : log ( ) . warning ( 'I2C transaction timed out' ) , retries += 1 else : raise IpmiTimeoutError ( ) return rx_data . tostring ( ) [ 5 : - 1 ] | Send and receive data using aardvark interface . |
15,546 | def get_device_sdr ( self , record_id , reservation_id = None ) : ( next_id , record_data ) = get_sdr_data_helper ( self . reserve_device_sdr_repository , self . _get_device_sdr_chunk , record_id , reservation_id ) return sdr . SdrCommon . from_data ( record_data , next_id ) | Collects all data from the sensor device to get the SDR specified by record id . |
15,547 | def get_sensor_reading ( self , sensor_number , lun = 0 ) : rsp = self . send_message_with_name ( 'GetSensorReading' , sensor_number = sensor_number , lun = lun ) reading = rsp . sensor_reading if rsp . config . initial_update_in_progress : reading = None states = None if rsp . states1 is not None : states = rsp . states1 if rsp . states2 is not None : states |= ( rsp . states2 << 8 ) return ( reading , states ) | Returns the sensor reading at the assertion states for the given sensor number . |
15,548 | def set_sensor_thresholds ( self , sensor_number , lun = 0 , unr = None , ucr = None , unc = None , lnc = None , lcr = None , lnr = None ) : req = create_request_by_name ( 'SetSensorThresholds' ) req . sensor_number = sensor_number req . lun = lun thresholds = dict ( unr = unr , ucr = ucr , unc = unc , lnc = lnc , lcr = lcr , lnr = lnr ) for key , value in thresholds . items ( ) : if value is not None : setattr ( req . set_mask , key , 1 ) setattr ( req . threshold , key , value ) rsp = self . send_message ( req ) check_completion_code ( rsp . completion_code ) | Set the sensor thresholds that are not None |
15,549 | def _generate_feature ( feature_type , feature_size , signal_magnitude , thickness = 1 ) : if feature_size <= 2 : feature_type = 'cube' if feature_type == 'cube' : signal = np . ones ( ( feature_size , feature_size , feature_size ) ) elif feature_type == 'loop' : signal = np . zeros ( ( feature_size , feature_size , feature_size ) ) seq = np . linspace ( 0 , feature_size - 1 , feature_size ) xx , yy = np . meshgrid ( seq , seq ) xxmesh = ( xx - ( ( feature_size - 1 ) / 2 ) ) ** 2 yymesh = ( yy - ( ( feature_size - 1 ) / 2 ) ) ** 2 disk = xxmesh + yymesh outer_lim = disk [ int ( ( feature_size - 1 ) / 2 ) , 0 ] inner_lim = disk [ int ( ( feature_size - 1 ) / 2 ) , thickness ] outer = disk <= outer_lim inner = disk <= inner_lim loop = outer != inner if np . all ( inner is False ) : logger . warning ( 'Loop feature reduces to a disk because the loop ' 'is too thick' ) if np . all ( loop is False ) : loop = outer signal [ 0 : feature_size , 0 : feature_size , int ( np . round ( feature_size / 2 ) ) ] = loop elif feature_type == 'sphere' or feature_type == 'cavity' : seq = np . linspace ( 0 , feature_size - 1 , feature_size ) xx , yy , zz = np . meshgrid ( seq , seq , seq ) signal = ( ( xx - ( ( feature_size - 1 ) / 2 ) ) ** 2 + ( yy - ( ( feature_size - 1 ) / 2 ) ) ** 2 + ( zz - ( ( feature_size - 1 ) / 2 ) ) ** 2 ) outer_lim = signal [ int ( ( feature_size - 1 ) / 2 ) , int ( ( feature_size - 1 ) / 2 ) , 0 ] inner_lim = signal [ int ( ( feature_size - 1 ) / 2 ) , int ( ( feature_size - 1 ) / 2 ) , thickness ] if feature_type == 'sphere' : signal = signal <= outer_lim else : outer = signal <= outer_lim inner = signal <= inner_lim signal = outer != inner if np . all ( inner is False ) : logger . warning ( 'Cavity feature reduces to a sphere because ' 'the cavity is too thick' ) if np . all ( signal is False ) : signal = outer signal = signal * signal_magnitude return signal | Generate features corresponding to signal |
15,550 | def _insert_idxs ( feature_centre , feature_size , dimensions ) : x_idx = [ int ( feature_centre [ 0 ] - ( feature_size / 2 ) ) + 1 , int ( feature_centre [ 0 ] - ( feature_size / 2 ) + feature_size ) + 1 ] y_idx = [ int ( feature_centre [ 1 ] - ( feature_size / 2 ) ) + 1 , int ( feature_centre [ 1 ] - ( feature_size / 2 ) + feature_size ) + 1 ] z_idx = [ int ( feature_centre [ 2 ] - ( feature_size / 2 ) ) + 1 , int ( feature_centre [ 2 ] - ( feature_size / 2 ) + feature_size ) + 1 ] if 0 > x_idx [ 0 ] : x_idx [ 0 ] = 0 if 0 > y_idx [ 0 ] : y_idx [ 0 ] = 0 if 0 > z_idx [ 0 ] : z_idx [ 0 ] = 0 if dimensions [ 0 ] < x_idx [ 1 ] : x_idx [ 1 ] = dimensions [ 0 ] if dimensions [ 1 ] < y_idx [ 1 ] : y_idx [ 1 ] = dimensions [ 1 ] if dimensions [ 2 ] < z_idx [ 1 ] : z_idx [ 1 ] = dimensions [ 2 ] return x_idx , y_idx , z_idx | Returns the indices of where to put the signal into the signal volume |
15,551 | def generate_signal ( dimensions , feature_coordinates , feature_size , feature_type , signal_magnitude = [ 1 ] , signal_constant = 1 , ) : volume_signal = np . zeros ( dimensions ) feature_quantity = round ( feature_coordinates . shape [ 0 ] ) if len ( feature_size ) == 1 : feature_size = feature_size * feature_quantity if len ( feature_type ) == 1 : feature_type = feature_type * feature_quantity if len ( signal_magnitude ) == 1 : signal_magnitude = signal_magnitude * feature_quantity for signal_counter in range ( feature_quantity ) : if len ( feature_size ) > 1 : feature_centre = np . asarray ( feature_coordinates [ signal_counter , ] ) else : feature_centre = np . asarray ( feature_coordinates ) [ 0 ] signal = _generate_feature ( feature_type [ signal_counter ] , feature_size [ signal_counter ] , signal_magnitude [ signal_counter ] , ) if signal_constant == 0 : signal = signal * np . random . random ( [ feature_size [ signal_counter ] , feature_size [ signal_counter ] , feature_size [ signal_counter ] ] ) x_idx , y_idx , z_idx = _insert_idxs ( feature_centre , feature_size [ signal_counter ] , dimensions ) volume_signal [ x_idx [ 0 ] : x_idx [ 1 ] , y_idx [ 0 ] : y_idx [ 1 ] , z_idx [ 0 ] : z_idx [ 1 ] ] = signal return volume_signal | Generate volume containing signal |
15,552 | def generate_stimfunction ( onsets , event_durations , total_time , weights = [ 1 ] , timing_file = None , temporal_resolution = 100.0 , ) : if timing_file is not None : with open ( timing_file ) as f : text = f . readlines ( ) onsets = list ( ) event_durations = list ( ) weights = list ( ) for line in text : onset , duration , weight = line . strip ( ) . split ( ) upsampled_onset = float ( onset ) * temporal_resolution if np . allclose ( upsampled_onset , np . round ( upsampled_onset ) ) == 0 : warning = 'Your onset: ' + str ( onset ) + ' has more decimal ' 'points than the ' 'specified temporal ' 'resolution can ' 'resolve. This means' ' that events might' ' be missed. ' 'Consider increasing' ' the temporal ' 'resolution.' logger . warning ( warning ) onsets . append ( float ( onset ) ) event_durations . append ( float ( duration ) ) weights . append ( float ( weight ) ) if len ( event_durations ) == 1 : event_durations = event_durations * len ( onsets ) if len ( weights ) == 1 : weights = weights * len ( onsets ) if np . max ( onsets ) > total_time : raise ValueError ( 'Onsets outside of range of total time.' ) stimfunction = np . zeros ( ( int ( round ( total_time * temporal_resolution ) ) , 1 ) ) for onset_counter in list ( range ( len ( onsets ) ) ) : onset_idx = int ( np . floor ( onsets [ onset_counter ] * temporal_resolution ) ) offset_idx = int ( np . floor ( ( onsets [ onset_counter ] + event_durations [ onset_counter ] ) * temporal_resolution ) ) stimfunction [ onset_idx : offset_idx , 0 ] = [ weights [ onset_counter ] ] return stimfunction | Return the function for the timecourse events |
15,553 | def export_3_column ( stimfunction , filename , temporal_resolution = 100.0 ) : stim_counter = 0 event_counter = 0 while stim_counter < stimfunction . shape [ 0 ] : if stimfunction [ stim_counter , 0 ] != 0 : event_onset = str ( stim_counter / temporal_resolution ) weight = str ( stimfunction [ stim_counter , 0 ] ) event_duration = 0 while stimfunction [ stim_counter , 0 ] != 0 & stim_counter <= stimfunction . shape [ 0 ] : event_duration = event_duration + 1 stim_counter = stim_counter + 1 event_duration = str ( event_duration / temporal_resolution ) with open ( filename , "a" ) as file : file . write ( event_onset + '\t' + event_duration + '\t' + weight + '\n' ) event_counter = event_counter + 1 stim_counter = stim_counter + 1 | Output a tab separated three column timing file |
15,554 | def export_epoch_file ( stimfunction , filename , tr_duration , temporal_resolution = 100.0 ) : epoch_file = [ 0 ] * len ( stimfunction ) for ppt_counter in range ( len ( stimfunction ) ) : stimfunction_ppt = np . abs ( stimfunction [ ppt_counter ] ) > 0 stride = tr_duration * temporal_resolution stimfunction_downsampled = stimfunction_ppt [ : : int ( stride ) , : ] epochs = 0 conditions = stimfunction_ppt . shape [ 1 ] for condition_counter in range ( conditions ) : weight_change = ( np . diff ( stimfunction_downsampled [ : , condition_counter ] , 1 , 0 ) != 0 ) if stimfunction_downsampled [ 0 , condition_counter ] == 1 : weight_change [ 0 ] = True if stimfunction_downsampled [ - 1 , condition_counter ] == 1 : weight_change [ - 1 ] = True epochs += int ( np . max ( np . sum ( weight_change , 0 ) ) / 2 ) trs = stimfunction_downsampled . shape [ 0 ] epoch_file [ ppt_counter ] = np . zeros ( ( conditions , epochs , trs ) ) epoch_counter = 0 tr_counter = 0 while tr_counter < stimfunction_downsampled . shape [ 0 ] : for condition_counter in range ( conditions ) : if tr_counter < stimfunction_downsampled . shape [ 0 ] and stimfunction_downsampled [ tr_counter , condition_counter ] == 1 : epoch_file [ ppt_counter ] [ condition_counter , epoch_counter , tr_counter ] = 1 end_idx = np . where ( stimfunction_downsampled [ tr_counter : , condition_counter ] == 0 ) [ 0 ] [ 0 ] tr_idxs = list ( range ( tr_counter , tr_counter + end_idx ) ) epoch_file [ ppt_counter ] [ condition_counter , epoch_counter , tr_idxs ] = 1 tr_counter += end_idx epoch_counter += 1 tr_counter += 1 epoch_file [ ppt_counter ] = epoch_file [ ppt_counter ] . astype ( 'bool' ) np . save ( filename , epoch_file ) | Output an epoch file necessary for some inputs into brainiak |
15,555 | def _double_gamma_hrf ( response_delay = 6 , undershoot_delay = 12 , response_dispersion = 0.9 , undershoot_dispersion = 0.9 , response_scale = 1 , undershoot_scale = 0.035 , temporal_resolution = 100.0 , ) : hrf_length = 30 hrf = [ 0 ] * int ( hrf_length * temporal_resolution ) response_peak = response_delay * response_dispersion undershoot_peak = undershoot_delay * undershoot_dispersion for hrf_counter in list ( range ( len ( hrf ) - 1 ) ) : resp_pow = math . pow ( ( hrf_counter / temporal_resolution ) / response_peak , response_delay ) resp_exp = math . exp ( - ( ( hrf_counter / temporal_resolution ) - response_peak ) / response_dispersion ) response_model = response_scale * resp_pow * resp_exp undershoot_pow = math . pow ( ( hrf_counter / temporal_resolution ) / undershoot_peak , undershoot_delay ) undershoot_exp = math . exp ( - ( ( hrf_counter / temporal_resolution ) - undershoot_peak / undershoot_dispersion ) ) undershoot_model = undershoot_scale * undershoot_pow * undershoot_exp hrf [ hrf_counter ] = response_model - undershoot_model return hrf | Create the double gamma HRF with the timecourse evoked activity . Default values are based on Glover 1999 and Walvaert Durnez Moerkerke Verdoolaege and Rosseel 2011 |
15,556 | def apply_signal ( signal_function , volume_signal , ) : timepoints = signal_function . shape [ 0 ] timecourses = signal_function . shape [ 1 ] signal = np . zeros ( [ volume_signal . shape [ 0 ] , volume_signal . shape [ 1 ] , volume_signal . shape [ 2 ] , timepoints ] ) idxs = np . where ( volume_signal != 0 ) if timecourses == 1 : signal_function = np . matlib . repmat ( signal_function , 1 , len ( idxs [ 0 ] ) ) elif len ( idxs [ 0 ] ) != timecourses : raise IndexError ( 'The number of non-zero voxels in the volume and ' 'the number of timecourses does not match. Aborting' ) for idx_counter in range ( len ( idxs [ 0 ] ) ) : x = idxs [ 0 ] [ idx_counter ] y = idxs [ 1 ] [ idx_counter ] z = idxs [ 2 ] [ idx_counter ] signal_function_temp = signal_function [ : , idx_counter ] signal [ x , y , z , : ] = volume_signal [ x , y , z ] * signal_function_temp return signal | Combine the signal volume with its timecourse |
15,557 | def _calc_sfnr ( volume , mask , ) : brain_voxels = volume [ mask > 0 ] mean_voxels = np . nanmean ( brain_voxels , 1 ) order = 2 seq = np . linspace ( 1 , brain_voxels . shape [ 1 ] , brain_voxels . shape [ 1 ] ) detrend_poly = np . polyfit ( seq , brain_voxels . transpose ( ) , order ) detrend_voxels = np . zeros ( brain_voxels . shape ) for voxel in range ( brain_voxels . shape [ 0 ] ) : trend = detrend_poly [ 0 , voxel ] * seq ** 2 + detrend_poly [ 1 , voxel ] * seq + detrend_poly [ 2 , voxel ] detrend_voxels [ voxel , : ] = brain_voxels [ voxel , : ] - trend std_voxels = np . nanstd ( detrend_voxels , 1 ) sfnr_voxels = mean_voxels / std_voxels return np . mean ( sfnr_voxels ) | Calculate the the SFNR of a volume Calculates the Signal to Fluctuation Noise Ratio the mean divided by the detrended standard deviation of each brain voxel . Based on Friedman and Glover 2006 |
15,558 | def _calc_snr ( volume , mask , dilation = 5 , reference_tr = None , ) : if reference_tr is None : reference_tr = list ( range ( volume . shape [ 3 ] ) ) if dilation > 0 : mask_dilated = ndimage . morphology . binary_dilation ( mask , iterations = dilation ) else : mask_dilated = mask brain_voxels = volume [ mask > 0 ] [ : , reference_tr ] nonbrain_voxels = ( volume [ : , : , : , reference_tr ] ) . astype ( 'float64' ) if len ( brain_voxels . shape ) > 1 : brain_voxels = np . mean ( brain_voxels , 1 ) nonbrain_voxels = np . mean ( nonbrain_voxels , 3 ) nonbrain_voxels = nonbrain_voxels [ mask_dilated == 0 ] mean_voxels = np . nanmean ( brain_voxels ) std_voxels = np . nanstd ( nonbrain_voxels ) return mean_voxels / std_voxels | Calculate the the SNR of a volume Calculates the Signal to Noise Ratio the mean of brain voxels divided by the standard deviation across non - brain voxels . Specify a TR value to calculate the mean and standard deviation for that TR . To calculate the standard deviation of non - brain voxels we can subtract any baseline structure away first hence getting at deviations due to the system noise and not something like high baseline values in non - brain parts of the body . |
15,559 | def _calc_ARMA_noise ( volume , mask , auto_reg_order = 1 , ma_order = 1 , sample_num = 100 , ) : if len ( volume . shape ) > 1 : brain_timecourse = volume [ mask > 0 ] else : brain_timecourse = volume . reshape ( 1 , len ( volume ) ) voxel_idxs = list ( range ( brain_timecourse . shape [ 0 ] ) ) np . random . shuffle ( voxel_idxs ) if len ( voxel_idxs ) < sample_num : sample_num = len ( voxel_idxs ) auto_reg_rho_all = np . zeros ( ( sample_num , auto_reg_order ) ) ma_all = np . zeros ( ( sample_num , ma_order ) ) for voxel_counter in range ( sample_num ) : timecourse = brain_timecourse [ voxel_idxs [ voxel_counter ] , : ] demeaned_timecourse = timecourse - timecourse . mean ( ) try : model = ARMA ( demeaned_timecourse , [ auto_reg_order , ma_order ] ) model_fit = model . fit ( disp = False ) params = model_fit . params except ( ValueError , LinAlgError ) : params = np . ones ( auto_reg_order + ma_order + 1 ) * np . nan auto_reg_rho_all [ voxel_counter , : ] = params [ 1 : auto_reg_order + 1 ] ma_all [ voxel_counter , : ] = params [ auto_reg_order + 1 : ] auto_reg_rho = np . nanmean ( auto_reg_rho_all , 0 ) . tolist ( ) ma_rho = np . nanmean ( ma_all , 0 ) . tolist ( ) return auto_reg_rho , ma_rho | Calculate the the ARMA noise of a volume This calculates the autoregressive and moving average noise of the volume over time by sampling brain voxels and averaging them . |
15,560 | def calc_noise ( volume , mask , template , noise_dict = None , ) : if template . max ( ) > 1.1 : raise ValueError ( 'Template out of range' ) if mask is None : raise ValueError ( 'Mask not supplied' ) if noise_dict is None : noise_dict = { 'voxel_size' : [ 1.0 , 1.0 , 1.0 ] } elif 'voxel_size' not in noise_dict : noise_dict [ 'voxel_size' ] = [ 1.0 , 1.0 , 1.0 ] noise_dict [ 'max_activity' ] = np . nanmax ( np . mean ( volume , 3 ) ) noise_dict [ 'auto_reg_rho' ] , noise_dict [ 'ma_rho' ] = _calc_ARMA_noise ( volume , mask ) noise_dict [ 'auto_reg_sigma' ] = 1 noise_dict [ 'physiological_sigma' ] = 0 noise_dict [ 'task_sigma' ] = 0 noise_dict [ 'drift_sigma' ] = 0 noise_dict [ 'sfnr' ] = _calc_sfnr ( volume , mask , ) if volume . shape [ 3 ] > 100 : trs = np . random . choice ( volume . shape [ 3 ] , size = 100 , replace = False ) else : trs = list ( range ( 0 , volume . shape [ 3 ] ) ) fwhm = [ 0 ] * len ( trs ) for tr in range ( len ( trs ) ) : fwhm [ tr ] = _calc_fwhm ( volume [ : , : , : , trs [ tr ] ] , mask , noise_dict [ 'voxel_size' ] , ) noise_dict [ 'fwhm' ] = np . mean ( fwhm ) noise_dict [ 'snr' ] = _calc_snr ( volume , mask , ) return noise_dict | Calculates the noise properties of the volume supplied . This estimates what noise properties the volume has . For instance it determines the spatial smoothness the autoregressive noise system noise etc . Read the doc string for generate_noise to understand how these different types of noise interact . |
15,561 | def _generate_noise_system ( dimensions_tr , spatial_sd , temporal_sd , spatial_noise_type = 'gaussian' , temporal_noise_type = 'gaussian' , ) : def noise_volume ( dimensions , noise_type , ) : if noise_type == 'rician' : noise = stats . rice . rvs ( b = 0 , loc = 0 , scale = 1.527 , size = dimensions ) elif noise_type == 'exponential' : noise = stats . expon . rvs ( 0 , scale = 1 , size = dimensions ) elif noise_type == 'gaussian' : noise = np . random . randn ( np . prod ( dimensions ) ) . reshape ( dimensions ) return noise dimensions = np . asarray ( [ dimensions_tr [ 0 ] , dimensions_tr [ 1 ] , dimensions_tr [ 2 ] , 1 ] ) spatial_noise = noise_volume ( dimensions , spatial_noise_type ) temporal_noise = noise_volume ( dimensions_tr , temporal_noise_type ) spatial_noise *= spatial_sd temporal_noise *= temporal_sd temporal_noise_mean = np . mean ( temporal_noise , 3 ) . reshape ( dimensions [ 0 ] , dimensions [ 1 ] , dimensions [ 2 ] , 1 ) temporal_noise = temporal_noise - temporal_noise_mean system_noise = spatial_noise + temporal_noise return system_noise | Generate the scanner noise |
15,562 | def _generate_noise_temporal_task ( stimfunction_tr , motion_noise = 'gaussian' , ) : stimfunction_tr = stimfunction_tr != 0 if motion_noise == 'gaussian' : noise = stimfunction_tr * np . random . normal ( 0 , 1 , size = stimfunction_tr . shape ) elif motion_noise == 'rician' : noise = stimfunction_tr * stats . rice . rvs ( 0 , 1 , size = stimfunction_tr . shape ) noise_task = stimfunction_tr + noise noise_task = stats . zscore ( noise_task ) . flatten ( ) return noise_task | Generate the signal dependent noise |
15,563 | def _generate_noise_temporal_drift ( trs , tr_duration , basis = "discrete_cos" , period = 150 , ) : if basis == 'discrete_cos' : timepoints = np . linspace ( 0 , trs - 1 , trs ) timepoints = ( ( timepoints * tr_duration ) / period ) * 2 * np . pi duration = trs * tr_duration basis_funcs = int ( np . floor ( duration / period ) ) if basis_funcs == 0 : err_msg = 'Too few timepoints (' + str ( trs ) + ') to accurately ' 'model drift' logger . warning ( err_msg ) basis_funcs = 1 noise_drift = np . zeros ( ( timepoints . shape [ 0 ] , basis_funcs ) ) for basis_counter in list ( range ( 1 , basis_funcs + 1 ) ) : timepoints_basis = ( timepoints / basis_counter ) + ( np . random . rand ( ) * np . pi * 2 ) noise_drift [ : , basis_counter - 1 ] = np . cos ( timepoints_basis ) noise_drift = np . mean ( noise_drift , 1 ) elif basis == 'sine' : cycles = trs * tr_duration / period timepoints = np . linspace ( 0 , trs - 1 , trs ) phaseshift = np . pi * 2 * np . random . random ( ) phase = ( timepoints / ( trs - 1 ) * cycles * 2 * np . pi ) + phaseshift noise_drift = np . sin ( phase ) noise_drift = stats . zscore ( noise_drift ) return noise_drift | Generate the drift noise |
15,564 | def _generate_noise_temporal_autoregression ( timepoints , noise_dict , dimensions , mask , ) : auto_reg_rho = noise_dict [ 'auto_reg_rho' ] ma_rho = noise_dict [ 'ma_rho' ] auto_reg_order = len ( auto_reg_rho ) ma_order = len ( ma_rho ) if ma_order > auto_reg_order : msg = 'MA order (%d) is greater than AR order (%d). Cannot run.' % ( ma_order , auto_reg_order ) raise ValueError ( msg ) noise_autoregression = np . zeros ( ( dimensions [ 0 ] , dimensions [ 1 ] , dimensions [ 2 ] , len ( timepoints ) ) ) err_vols = np . zeros ( ( dimensions [ 0 ] , dimensions [ 1 ] , dimensions [ 2 ] , len ( timepoints ) ) ) for tr_counter in range ( len ( timepoints ) ) : noise = _generate_noise_spatial ( dimensions = dimensions , mask = mask , fwhm = noise_dict [ 'fwhm' ] , ) err_vols [ : , : , : , tr_counter ] = noise if tr_counter == 0 : noise_autoregression [ : , : , : , tr_counter ] = noise else : AR_vol = np . zeros ( ( dimensions [ 0 ] , dimensions [ 1 ] , dimensions [ 2 ] ) ) for pCounter in list ( range ( 1 , auto_reg_order + 1 ) ) : past_TR = int ( tr_counter - pCounter ) if tr_counter - pCounter >= 0 : past_vols = noise_autoregression [ : , : , : , past_TR ] AR_vol += past_vols * auto_reg_rho [ pCounter - 1 ] if ma_order >= pCounter : past_noise = err_vols [ : , : , : , past_TR ] AR_vol += past_noise * ma_rho [ pCounter - 1 ] noise_autoregression [ : , : , : , tr_counter ] = AR_vol + noise noise_autoregression = stats . zscore ( noise_autoregression , 3 ) return noise_autoregression | Generate the autoregression noise Make a slowly drifting timecourse with the given autoregression parameters . This can take in both AR and MA components |
15,565 | def _generate_noise_temporal_phys ( timepoints , resp_freq = 0.2 , heart_freq = 1.17 , ) : resp_phase = ( np . random . rand ( 1 ) * 2 * np . pi ) [ 0 ] heart_phase = ( np . random . rand ( 1 ) * 2 * np . pi ) [ 0 ] resp_rate = ( resp_freq * 2 * np . pi ) heart_rate = ( heart_freq * 2 * np . pi ) resp_radians = np . multiply ( timepoints , resp_rate ) + resp_phase heart_radians = np . multiply ( timepoints , heart_rate ) + heart_phase noise_phys = np . cos ( resp_radians ) + np . sin ( heart_radians ) noise_phys = stats . zscore ( noise_phys ) return noise_phys | Generate the physiological noise . Create noise representing the heart rate and respiration of the data . Default values based on Walvaert Durnez Moerkerke Verdoolaege and Rosseel 2011 |
15,566 | def _generate_noise_spatial ( dimensions , mask = None , fwhm = 4.0 , ) : if len ( dimensions ) == 4 : logger . warning ( '4 dimensions have been supplied, only using 3' ) dimensions = dimensions [ 0 : 3 ] if dimensions [ 0 ] != dimensions [ 1 ] or dimensions [ 1 ] != dimensions [ 2 ] : max_dim = np . max ( dimensions ) new_dim = ( max_dim , max_dim , max_dim ) else : new_dim = dimensions def _logfunc ( x , a , b , c ) : return ( np . log ( x + a ) / np . log ( b ) ) + c def _fftIndgen ( n ) : ascending = np . linspace ( 0 , int ( n / 2 ) , int ( n / 2 + 1 ) ) elements = int ( np . ceil ( n / 2 - 1 ) ) descending = np . linspace ( - elements , - 1 , elements ) return np . concatenate ( ( ascending , descending ) ) def _Pk2 ( idxs , sigma ) : amp_start = np . array ( ( 0 ) ) amp_end = np . sqrt ( np . sqrt ( np . sum ( idxs [ : , 1 : ] ** 2 , 0 ) ) ** ( - 1 * sigma ) ) amplitude = np . append ( amp_start , amp_end ) return amplitude spatial_sigma = _logfunc ( fwhm , - 0.36778719 , 2.10601011 , 2.15439247 ) noise = np . fft . fftn ( np . random . normal ( size = new_dim ) ) fft_vol = np . meshgrid ( _fftIndgen ( new_dim [ 0 ] ) , _fftIndgen ( new_dim [ 1 ] ) , _fftIndgen ( new_dim [ 2 ] ) ) fft_vec = np . asarray ( ( fft_vol [ 0 ] . flatten ( ) , fft_vol [ 1 ] . flatten ( ) , fft_vol [ 2 ] . flatten ( ) ) ) amp_vec = _Pk2 ( fft_vec , spatial_sigma ) amplitude = amp_vec . reshape ( new_dim ) noise_fft = ( np . fft . ifftn ( noise * amplitude ) ) . real noise_spatial = noise_fft [ : dimensions [ 0 ] , : dimensions [ 1 ] , : dimensions [ 2 ] ] if mask is not None : noise_spatial *= mask noise_spatial [ mask > 0 ] = stats . zscore ( noise_spatial [ mask > 0 ] ) else : grand_mean = ( noise_spatial ) . mean ( ) grand_std = ( noise_spatial ) . std ( ) noise_spatial = ( noise_spatial - grand_mean ) / grand_std return noise_spatial | Generate code for Gaussian Random Fields . |
15,567 | def _generate_noise_temporal ( stimfunction_tr , tr_duration , dimensions , template , mask , noise_dict ) : trs = len ( stimfunction_tr ) timepoints = list ( np . linspace ( 0 , ( trs - 1 ) * tr_duration , trs ) ) noise_volume = np . zeros ( ( dimensions [ 0 ] , dimensions [ 1 ] , dimensions [ 2 ] , trs ) ) if noise_dict [ 'drift_sigma' ] != 0 : noise = _generate_noise_temporal_drift ( trs , tr_duration , ) volume = np . ones ( dimensions ) noise_volume += np . multiply . outer ( volume , noise ) * noise_dict [ 'drift_sigma' ] if noise_dict [ 'physiological_sigma' ] != 0 : noise = _generate_noise_temporal_phys ( timepoints , ) volume = _generate_noise_spatial ( dimensions = dimensions , mask = mask , fwhm = noise_dict [ 'fwhm' ] , ) noise_volume += np . multiply . outer ( volume , noise ) * noise_dict [ 'physiological_sigma' ] if noise_dict [ 'auto_reg_sigma' ] != 0 : noise = _generate_noise_temporal_autoregression ( timepoints , noise_dict , dimensions , mask , ) noise_volume += noise * noise_dict [ 'auto_reg_sigma' ] if noise_dict [ 'task_sigma' ] != 0 and np . sum ( stimfunction_tr ) > 0 : noise = _generate_noise_temporal_task ( stimfunction_tr , ) volume = _generate_noise_spatial ( dimensions = dimensions , mask = mask , fwhm = noise_dict [ 'fwhm' ] , ) noise_volume += np . multiply . outer ( volume , noise ) * noise_dict [ 'task_sigma' ] noise_volume = stats . zscore ( noise_volume , 3 ) noise_volume [ np . isnan ( noise_volume ) ] = 0 return noise_volume | Generate the temporal noise Generate the time course of the average brain voxel . To change the relative mixing of the noise components change the sigma s specified below . |
15,568 | def _noise_dict_update ( noise_dict ) : default_dict = { 'task_sigma' : 0 , 'drift_sigma' : 0 , 'auto_reg_sigma' : 1 , 'auto_reg_rho' : [ 0.5 ] , 'ma_rho' : [ 0.0 ] , 'physiological_sigma' : 0 , 'sfnr' : 90 , 'snr' : 50 , 'max_activity' : 1000 , 'voxel_size' : [ 1.0 , 1.0 , 1.0 ] , 'fwhm' : 4 , 'matched' : 1 } for default_key in default_dict : if default_key not in noise_dict : noise_dict [ default_key ] = default_dict [ default_key ] return noise_dict | Update the noise dictionary parameters with default values in case any were missing |
15,569 | def _fit_spatial ( noise , noise_temporal , mask , template , spatial_sd , temporal_sd , noise_dict , fit_thresh , fit_delta , iterations , ) : dim_tr = noise . shape base = template * noise_dict [ 'max_activity' ] base = base . reshape ( dim_tr [ 0 ] , dim_tr [ 1 ] , dim_tr [ 2 ] , 1 ) mean_signal = ( base [ mask > 0 ] ) . mean ( ) target_snr = noise_dict [ 'snr' ] spat_sd_orig = np . copy ( spatial_sd ) iteration = 0 for iteration in list ( range ( iterations ) ) : new_snr = _calc_snr ( noise , mask ) diff_snr = abs ( new_snr - target_snr ) / target_snr if diff_snr < fit_thresh : logger . info ( 'Terminated SNR fit after ' + str ( iteration ) + ' iterations.' ) break spat_sd_new = mean_signal / new_snr spatial_sd -= ( ( spat_sd_new - spat_sd_orig ) * fit_delta ) if spatial_sd < 0 or np . isnan ( spatial_sd ) : spatial_sd = 10e-3 noise_system = _generate_noise_system ( dimensions_tr = dim_tr , spatial_sd = spatial_sd , temporal_sd = temporal_sd , ) noise = base + ( noise_temporal * temporal_sd ) + noise_system noise [ noise < 0 ] = 0 if iterations == 0 : logger . info ( 'No fitting iterations were run' ) elif iteration == iterations : logger . warning ( 'SNR failed to converge.' ) return noise , spatial_sd | Fit the noise model to match the SNR of the data |
15,570 | def _fit_temporal ( noise , mask , template , stimfunction_tr , tr_duration , spatial_sd , temporal_proportion , temporal_sd , noise_dict , fit_thresh , fit_delta , iterations , ) : dim_tr = noise . shape dim = dim_tr [ 0 : 3 ] base = template * noise_dict [ 'max_activity' ] base = base . reshape ( dim [ 0 ] , dim [ 1 ] , dim [ 2 ] , 1 ) mean_signal = ( base [ mask > 0 ] ) . mean ( ) temp_sd_orig = np . copy ( temporal_sd ) new_nd = copy . deepcopy ( noise_dict ) target_sfnr = noise_dict [ 'sfnr' ] target_ar = noise_dict [ 'auto_reg_rho' ] [ 0 ] for iteration in list ( range ( iterations ) ) : new_sfnr = _calc_sfnr ( noise , mask ) new_ar , _ = _calc_ARMA_noise ( noise , mask , len ( noise_dict [ 'auto_reg_rho' ] ) , len ( noise_dict [ 'ma_rho' ] ) , ) sfnr_diff = abs ( new_sfnr - target_sfnr ) / target_sfnr ar_diff = new_ar [ 0 ] - target_ar if ( abs ( ar_diff ) / target_ar ) < fit_thresh and sfnr_diff < fit_thresh : msg = 'Terminated AR fit after ' + str ( iteration ) + ' iterations.' logger . info ( msg ) break temp_sd_new = mean_signal / new_sfnr temporal_sd -= ( ( temp_sd_new - temp_sd_orig ) * fit_delta ) if temporal_sd < 0 or np . isnan ( temporal_sd ) : temporal_sd = 10e-3 temp_sd_system_new = np . sqrt ( ( temporal_sd ** 2 ) * temporal_proportion ) new_nd [ 'auto_reg_rho' ] [ 0 ] -= ( ar_diff * fit_delta ) if new_nd [ 'auto_reg_rho' ] [ 0 ] >= 1 : new_nd [ 'auto_reg_rho' ] [ 0 ] = 0.99 noise_temporal = _generate_noise_temporal ( stimfunction_tr , tr_duration , dim , template , mask , new_nd , ) noise_system = _generate_noise_system ( dimensions_tr = dim_tr , spatial_sd = spatial_sd , temporal_sd = temp_sd_system_new , ) noise = base + ( noise_temporal * temporal_sd ) + noise_system noise [ noise < 0 ] = 0 if iterations == 0 : logger . info ( 'No fitting iterations were run' ) elif iteration == iterations : logger . warning ( 'AR failed to converge.' ) return noise | Fit the noise model to match the SFNR and AR of the data |
15,571 | def load_images_from_dir ( in_dir : Union [ str , Path ] , suffix : str = "nii.gz" , ) -> Iterable [ SpatialImage ] : if isinstance ( in_dir , str ) : in_dir = Path ( in_dir ) files = sorted ( in_dir . glob ( "*" + suffix ) ) for f in files : logger . debug ( 'Starting to read file %s' , f ) yield nib . load ( str ( f ) ) | Load images from directory . |
15,572 | def load_images ( image_paths : Iterable [ Union [ str , Path ] ] ) -> Iterable [ SpatialImage ] : for image_path in image_paths : if isinstance ( image_path , Path ) : string_path = str ( image_path ) else : string_path = image_path logger . debug ( 'Starting to read file %s' , string_path ) yield nib . load ( string_path ) | Load images from paths . |
15,573 | def load_boolean_mask ( path : Union [ str , Path ] , predicate : Callable [ [ np . ndarray ] , np . ndarray ] = None ) -> np . ndarray : if not isinstance ( path , str ) : path = str ( path ) data = nib . load ( path ) . get_data ( ) if predicate is not None : mask = predicate ( data ) else : mask = data . astype ( np . bool ) return mask | Load boolean nibabel . SpatialImage mask . |
15,574 | def load_labels ( path : Union [ str , Path ] ) -> List [ SingleConditionSpec ] : condition_specs = np . load ( str ( path ) ) return [ c . view ( SingleConditionSpec ) for c in condition_specs ] | Load labels files . |
15,575 | def save_as_nifti_file ( data : np . ndarray , affine : np . ndarray , path : Union [ str , Path ] ) -> None : if not isinstance ( path , str ) : path = str ( path ) img = Nifti1Pair ( data , affine ) nib . nifti1 . save ( img , path ) | Create a Nifti file and save it . |
15,576 | def _mse_converged ( self ) : prior = self . global_prior_ [ 0 : self . prior_size ] posterior = self . global_posterior_ [ 0 : self . prior_size ] mse = mean_squared_error ( prior , posterior , multioutput = 'uniform_average' ) if mse > self . threshold : return False , mse else : return True , mse | Check convergence based on mean squared difference between prior and posterior |
15,577 | def _get_gather_offset ( self , size ) : gather_size = np . zeros ( size ) . astype ( int ) gather_offset = np . zeros ( size ) . astype ( int ) num_local_subjs = np . zeros ( size ) . astype ( int ) subject_map = { } for idx , s in enumerate ( np . arange ( self . n_subj ) ) : cur_rank = idx % size gather_size [ cur_rank ] += self . prior_size subject_map [ idx ] = ( cur_rank , num_local_subjs [ cur_rank ] ) num_local_subjs [ cur_rank ] += 1 for idx in np . arange ( size - 1 ) + 1 : gather_offset [ idx ] = gather_offset [ idx - 1 ] + gather_size [ idx - 1 ] tuple_size = tuple ( gather_size ) tuple_offset = tuple ( gather_offset ) return tuple_size , tuple_offset , subject_map | Calculate the offset for gather result from this process |
15,578 | def _get_weight_size ( self , data , n_local_subj ) : weight_size = np . zeros ( 1 ) . astype ( int ) local_weight_offset = np . zeros ( n_local_subj ) . astype ( int ) for idx , subj_data in enumerate ( data ) : if idx > 0 : local_weight_offset [ idx ] = weight_size [ 0 ] weight_size [ 0 ] += self . K * subj_data . shape [ 1 ] return weight_size , local_weight_offset | Calculate the size of weight for this process |
15,579 | def _get_subject_info ( self , n_local_subj , data ) : max_sample_tr = np . zeros ( n_local_subj ) . astype ( int ) max_sample_voxel = np . zeros ( n_local_subj ) . astype ( int ) for idx in np . arange ( n_local_subj ) : nvoxel = data [ idx ] . shape [ 0 ] ntr = data [ idx ] . shape [ 1 ] max_sample_voxel [ idx ] = min ( self . max_voxel , int ( self . voxel_ratio * nvoxel ) ) max_sample_tr [ idx ] = min ( self . max_tr , int ( self . tr_ratio * ntr ) ) return max_sample_tr , max_sample_voxel | Calculate metadata for subjects allocated to this process |
15,580 | def _get_mpi_info ( self ) : rank = self . comm . Get_rank ( ) size = self . comm . Get_size ( ) return rank , size | get basic MPI info |
15,581 | def _init_prior_posterior ( self , rank , R , n_local_subj ) : if rank == 0 : idx = np . random . choice ( n_local_subj , 1 ) self . global_prior_ , self . global_centers_cov , self . global_widths_var = self . get_template ( R [ idx [ 0 ] ] ) self . global_centers_cov_scaled = self . global_centers_cov / float ( self . n_subj ) self . global_widths_var_scaled = self . global_widths_var / float ( self . n_subj ) self . gather_posterior = np . zeros ( self . n_subj * self . prior_size ) self . global_posterior_ = np . zeros ( self . prior_size ) else : self . global_prior_ = np . zeros ( self . prior_bcast_size ) self . global_posterior_ = None self . gather_posterior = None return self | set prior for this subject |
15,582 | def _assign_posterior ( self ) : prior_centers = self . get_centers ( self . global_prior_ ) posterior_centers = self . get_centers ( self . global_posterior_ ) posterior_widths = self . get_widths ( self . global_posterior_ ) posterior_centers_mean_cov = self . get_centers_mean_cov ( self . global_posterior_ ) posterior_widths_mean_var = self . get_widths_mean_var ( self . global_posterior_ ) cost = distance . cdist ( prior_centers , posterior_centers , 'euclidean' ) _ , col_ind = linear_sum_assignment ( cost ) self . set_centers ( self . global_posterior_ , posterior_centers ) self . set_widths ( self . global_posterior_ , posterior_widths ) self . set_centers_mean_cov ( self . global_posterior_ , posterior_centers_mean_cov [ col_ind ] ) self . set_widths_mean_var ( self . global_posterior_ , posterior_widths_mean_var [ col_ind ] ) return self | assign posterior to the right prior based on Hungarian algorithm |
15,583 | def _update_global_posterior ( self , rank , m , outer_converged ) : if rank == 0 : self . _map_update_posterior ( ) self . _assign_posterior ( ) is_converged , _ = self . _converged ( ) if is_converged : logger . info ( "converged at %d outer iter" % ( m ) ) outer_converged [ 0 ] = 1 else : self . global_prior_ = self . global_posterior_ return outer_converged | Update global posterior and then check convergence |
15,584 | def _update_weight ( self , data , R , n_local_subj , local_weight_offset ) : for s , subj_data in enumerate ( data ) : base = s * self . prior_size centers = self . local_posterior_ [ base : base + self . K * self . n_dim ] . reshape ( ( self . K , self . n_dim ) ) start_idx = base + self . K * self . n_dim end_idx = base + self . prior_size widths = self . local_posterior_ [ start_idx : end_idx ] . reshape ( ( self . K , 1 ) ) unique_R , inds = self . get_unique_R ( R [ s ] ) F = self . get_factors ( unique_R , inds , centers , widths ) start_idx = local_weight_offset [ s ] if s == n_local_subj - 1 : self . local_weights_ [ start_idx : ] = self . get_weights ( subj_data , F ) . ravel ( ) else : end_idx = local_weight_offset [ s + 1 ] self . local_weights_ [ start_idx : end_idx ] = self . get_weights ( subj_data , F ) . ravel ( ) return self | update local weight |
15,585 | def _fit_htfa ( self , data , R ) : rank , size = self . _get_mpi_info ( ) use_gather = True if self . n_subj % size == 0 else False n_local_subj = len ( R ) max_sample_tr , max_sample_voxel = self . _get_subject_info ( n_local_subj , data ) tfa = [ ] for s , subj_data in enumerate ( data ) : tfa . append ( TFA ( max_iter = self . max_local_iter , threshold = self . threshold , K = self . K , nlss_method = self . nlss_method , nlss_loss = self . nlss_loss , x_scale = self . x_scale , tr_solver = self . tr_solver , weight_method = self . weight_method , upper_ratio = self . upper_ratio , lower_ratio = self . lower_ratio , verbose = self . verbose , max_num_tr = max_sample_tr [ s ] , max_num_voxel = max_sample_voxel [ s ] ) ) gather_size , gather_offset , subject_map = self . _get_gather_offset ( size ) self . local_posterior_ = np . zeros ( n_local_subj * self . prior_size ) self . _init_prior_posterior ( rank , R , n_local_subj ) node_weight_size , local_weight_offset = self . _get_weight_size ( data , n_local_subj ) self . local_weights_ = np . zeros ( node_weight_size [ 0 ] ) m = 0 outer_converged = np . array ( [ 0 ] ) while m < self . max_global_iter and not outer_converged [ 0 ] : if ( self . verbose ) : logger . info ( "HTFA global iter %d " % ( m ) ) self . comm . Bcast ( self . global_prior_ , root = 0 ) for s , subj_data in enumerate ( data ) : tfa [ s ] . set_prior ( self . global_prior_ [ 0 : self . prior_size ] . copy ( ) ) tfa [ s ] . set_seed ( m * self . max_local_iter ) tfa [ s ] . fit ( subj_data , R = R [ s ] , template_prior = self . global_prior_ . copy ( ) ) tfa [ s ] . _assign_posterior ( ) start_idx = s * self . prior_size end_idx = ( s + 1 ) * self . prior_size self . local_posterior_ [ start_idx : end_idx ] = tfa [ s ] . local_posterior_ self . _gather_local_posterior ( use_gather , gather_size , gather_offset ) outer_converged = self . _update_global_posterior ( rank , m , outer_converged ) self . comm . Bcast ( outer_converged , root = 0 ) m += 1 self . _update_weight ( data , R , n_local_subj , local_weight_offset ) return self | HTFA main algorithm |
15,586 | def _check_input ( self , X , R ) : if not isinstance ( X , list ) : raise TypeError ( "Input data should be a list" ) if not isinstance ( R , list ) : raise TypeError ( "Coordinates should be a list" ) if len ( X ) < 1 : raise ValueError ( "Need at leat one subject to train the model.\ Got {0:d}" . format ( len ( X ) ) ) for idx , x in enumerate ( X ) : if not isinstance ( x , np . ndarray ) : raise TypeError ( "Each subject data should be an array" ) if x . ndim != 2 : raise TypeError ( "Each subject data should be 2D array" ) if not isinstance ( R [ idx ] , np . ndarray ) : raise TypeError ( "Each scanner coordinate matrix should be an array" ) if R [ idx ] . ndim != 2 : raise TypeError ( "Each scanner coordinate matrix should be 2D array" ) if x . shape [ 0 ] != R [ idx ] . shape [ 0 ] : raise TypeError ( "n_voxel should be the same in X[idx] and R[idx]" ) return self | Check whether input data and coordinates in right type |
15,587 | def get_sigma ( x , min_limit = - np . inf , max_limit = np . inf ) : z = np . append ( x , [ min_limit , max_limit ] ) sigma = np . ones ( x . shape ) for i in range ( x . size ) : xleft = z [ np . argmin ( [ ( x [ i ] - k ) if k < x [ i ] else np . inf for k in z ] ) ] xright = z [ np . argmin ( [ ( k - x [ i ] ) if k > x [ i ] else np . inf for k in z ] ) ] sigma [ i ] = max ( x [ i ] - xleft , xright - x [ i ] ) if sigma [ i ] == np . inf : sigma [ i ] = min ( x [ i ] - xleft , xright - x [ i ] ) if ( sigma [ i ] == - np . inf ) : sigma [ i ] = 1.0 return sigma | Compute the standard deviations around the points for a 1D GMM . |
15,588 | def get_next_sample ( x , y , min_limit = - np . inf , max_limit = np . inf ) : z = np . array ( list ( zip ( x , y ) ) , dtype = np . dtype ( [ ( 'x' , float ) , ( 'y' , float ) ] ) ) z = np . sort ( z , order = 'y' ) n = y . shape [ 0 ] g = int ( np . round ( np . ceil ( 0.15 * n ) ) ) ldata = z [ 0 : g ] gdata = z [ g : n ] lymin = ldata [ 'y' ] . min ( ) lymax = ldata [ 'y' ] . max ( ) weights = ( lymax - ldata [ 'y' ] ) / ( lymax - lymin ) lx = gmm_1d_distribution ( ldata [ 'x' ] , min_limit = min_limit , max_limit = max_limit , weights = weights ) gx = gmm_1d_distribution ( gdata [ 'x' ] , min_limit = min_limit , max_limit = max_limit ) samples = lx . get_samples ( n = 1000 ) ei = lx ( samples ) / gx ( samples ) h = ( x . max ( ) - x . min ( ) ) / ( 10 * x . size ) s = 0 while ( np . abs ( x - samples [ ei . argmax ( ) ] ) . min ( ) < h ) : ei [ ei . argmax ( ) ] = 0 s = s + 1 if ( s == samples . size ) : break xnext = samples [ ei . argmax ( ) ] return xnext | Get the next point to try given the previous samples . |
15,589 | def fmin ( loss_fn , space , max_evals , trials , init_random_evals = 30 , explore_prob = 0.2 ) : for s in space : if not hasattr ( space [ s ] [ 'dist' ] , 'rvs' ) : raise ValueError ( 'Unknown distribution type for variable' ) if 'lo' not in space [ s ] : space [ s ] [ 'lo' ] = - np . inf if 'hi' not in space [ s ] : space [ s ] [ 'hi' ] = np . inf if len ( trials ) > init_random_evals : init_random_evals = 0 for t in range ( max_evals ) : sdict = { } if t >= init_random_evals and np . random . random ( ) > explore_prob : use_random_sampling = False else : use_random_sampling = True yarray = np . array ( [ tr [ 'loss' ] for tr in trials ] ) for s in space : sarray = np . array ( [ tr [ s ] for tr in trials ] ) if use_random_sampling : sdict [ s ] = space [ s ] [ 'dist' ] . rvs ( ) else : sdict [ s ] = get_next_sample ( sarray , yarray , min_limit = space [ s ] [ 'lo' ] , max_limit = space [ s ] [ 'hi' ] ) logger . debug ( 'Explore' if use_random_sampling else 'Exploit' ) logger . info ( 'Next point ' , t , ' = ' , sdict ) y = loss_fn ( sdict ) sdict [ 'loss' ] = y trials . append ( sdict ) yarray = np . array ( [ tr [ 'loss' ] for tr in trials ] ) yargmin = yarray . argmin ( ) logger . info ( 'Best point so far = ' , trials [ yargmin ] ) return trials [ yargmin ] | Find the minimum of function through hyper parameter optimization . |
15,590 | def get_gmm_pdf ( self , x ) : def my_norm_pdf ( xt , mu , sigma ) : z = ( xt - mu ) / sigma return ( math . exp ( - 0.5 * z * z ) / ( math . sqrt ( 2. * np . pi ) * sigma ) ) y = 0 if ( x < self . min_limit ) : return 0 if ( x > self . max_limit ) : return 0 for _x in range ( self . points . size ) : y += ( my_norm_pdf ( x , self . points [ _x ] , self . sigma [ _x ] ) * self . weights [ _x ] ) / self . W_sum return y | Calculate the GMM likelihood for a single point . |
15,591 | def get_samples ( self , n ) : normalized_w = self . weights / np . sum ( self . weights ) get_rand_index = st . rv_discrete ( values = ( range ( self . N ) , normalized_w ) ) . rvs ( size = n ) samples = np . zeros ( n ) k = 0 j = 0 while ( k < n ) : i = get_rand_index [ j ] j = j + 1 if ( j == n ) : get_rand_index = st . rv_discrete ( values = ( range ( self . N ) , normalized_w ) ) . rvs ( size = n ) j = 0 v = np . random . normal ( loc = self . points [ i ] , scale = self . sigma [ i ] ) if ( v > self . max_limit or v < self . min_limit ) : continue else : samples [ k ] = v k = k + 1 if ( k == n ) : break return samples | Sample the GMM distribution . |
15,592 | def _separate_epochs ( activity_data , epoch_list ) : time1 = time . time ( ) raw_data = [ ] labels = [ ] for sid in range ( len ( epoch_list ) ) : epoch = epoch_list [ sid ] for cond in range ( epoch . shape [ 0 ] ) : sub_epoch = epoch [ cond , : , : ] for eid in range ( epoch . shape [ 1 ] ) : r = np . sum ( sub_epoch [ eid , : ] ) if r > 0 : mat = activity_data [ sid ] [ : , sub_epoch [ eid , : ] == 1 ] mat = np . ascontiguousarray ( mat . T ) mat = zscore ( mat , axis = 0 , ddof = 0 ) mat = np . nan_to_num ( mat ) mat = mat / math . sqrt ( r ) raw_data . append ( mat ) labels . append ( cond ) time2 = time . time ( ) logger . debug ( 'epoch separation done, takes %.2f s' % ( time2 - time1 ) ) return raw_data , labels | create data epoch by epoch |
15,593 | def _randomize_single_subject ( data , seed = None ) : if seed is not None : np . random . seed ( seed ) np . random . shuffle ( data ) | Randomly permute the voxels of the subject . |
15,594 | def _randomize_subject_list ( data_list , random ) : if random == RandomType . REPRODUCIBLE : for i in range ( len ( data_list ) ) : _randomize_single_subject ( data_list [ i ] , seed = i ) elif random == RandomType . UNREPRODUCIBLE : for data in data_list : _randomize_single_subject ( data ) | Randomly permute the voxels of a subject list . |
15,595 | def prepare_fcma_data ( images , conditions , mask1 , mask2 = None , random = RandomType . NORANDOM , comm = MPI . COMM_WORLD ) : rank = comm . Get_rank ( ) labels = [ ] raw_data1 = [ ] raw_data2 = [ ] if rank == 0 : logger . info ( 'start to apply masks and separate epochs' ) if mask2 is not None : masks = ( mask1 , mask2 ) activity_data1 , activity_data2 = zip ( * multimask_images ( images , masks , np . float32 ) ) _randomize_subject_list ( activity_data2 , random ) raw_data2 , _ = _separate_epochs ( activity_data2 , conditions ) else : activity_data1 = list ( mask_images ( images , mask1 , np . float32 ) ) _randomize_subject_list ( activity_data1 , random ) raw_data1 , labels = _separate_epochs ( activity_data1 , conditions ) time1 = time . time ( ) raw_data_length = len ( raw_data1 ) raw_data_length = comm . bcast ( raw_data_length ) for i in range ( raw_data_length ) : if rank != 0 : raw_data1 . append ( None ) if mask2 is not None : raw_data2 . append ( None ) raw_data1 [ i ] = comm . bcast ( raw_data1 [ i ] , root = 0 ) if mask2 is not None : raw_data2 [ i ] = comm . bcast ( raw_data2 [ i ] , root = 0 ) if comm . Get_size ( ) > 1 : labels = comm . bcast ( labels , root = 0 ) if rank == 0 : time2 = time . time ( ) logger . info ( 'data broadcasting done, takes %.2f s' % ( time2 - time1 ) ) if mask2 is None : raw_data2 = None return raw_data1 , raw_data2 , labels | Prepare data for correlation - based computation and analysis . |
15,596 | def generate_epochs_info ( epoch_list ) : time1 = time . time ( ) epoch_info = [ ] for sid , epoch in enumerate ( epoch_list ) : for cond in range ( epoch . shape [ 0 ] ) : sub_epoch = epoch [ cond , : , : ] for eid in range ( epoch . shape [ 1 ] ) : r = np . sum ( sub_epoch [ eid , : ] ) if r > 0 : start = np . nonzero ( sub_epoch [ eid , : ] ) [ 0 ] [ 0 ] epoch_info . append ( ( cond , sid , start , start + r ) ) time2 = time . time ( ) logger . debug ( 'epoch separation done, takes %.2f s' % ( time2 - time1 ) ) return epoch_info | use epoch_list to generate epoch_info defined below |
15,597 | def prepare_mvpa_data ( images , conditions , mask ) : activity_data = list ( mask_images ( images , mask , np . float32 ) ) epoch_info = generate_epochs_info ( conditions ) num_epochs = len ( epoch_info ) ( d1 , _ ) = activity_data [ 0 ] . shape processed_data = np . empty ( [ d1 , num_epochs ] ) labels = np . empty ( num_epochs ) subject_count = [ 0 ] cur_sid = - 1 for idx , epoch in enumerate ( epoch_info ) : labels [ idx ] = epoch [ 0 ] if cur_sid != epoch [ 1 ] : subject_count . append ( 0 ) cur_sid = epoch [ 1 ] subject_count [ - 1 ] += 1 processed_data [ : , idx ] = np . mean ( activity_data [ cur_sid ] [ : , epoch [ 2 ] : epoch [ 3 ] ] , axis = 1 ) cur_epoch = 0 for i in subject_count : if i > 1 : processed_data [ : , cur_epoch : cur_epoch + i ] = zscore ( processed_data [ : , cur_epoch : cur_epoch + i ] , axis = 1 , ddof = 0 ) cur_epoch += i processed_data = np . nan_to_num ( processed_data ) return processed_data , labels | Prepare data for activity - based model training and prediction . |
15,598 | def prepare_searchlight_mvpa_data ( images , conditions , data_type = np . float32 , random = RandomType . NORANDOM ) : time1 = time . time ( ) epoch_info = generate_epochs_info ( conditions ) num_epochs = len ( epoch_info ) processed_data = None logger . info ( 'there are %d subjects, and in total %d epochs' % ( len ( conditions ) , num_epochs ) ) labels = np . empty ( num_epochs ) for idx , epoch in enumerate ( epoch_info ) : labels [ idx ] = epoch [ 0 ] subject_count = np . zeros ( len ( conditions ) , dtype = np . int32 ) logger . info ( 'start to apply masks and separate epochs' ) for sid , f in enumerate ( images ) : data = f . get_data ( ) . astype ( data_type ) [ d1 , d2 , d3 , d4 ] = data . shape if random == RandomType . REPRODUCIBLE : data = data . reshape ( ( d1 * d2 * d3 , d4 ) ) _randomize_single_subject ( data , seed = sid ) data = data . reshape ( ( d1 , d2 , d3 , d4 ) ) elif random == RandomType . UNREPRODUCIBLE : data = data . reshape ( ( d1 * d2 * d3 , d4 ) ) _randomize_single_subject ( data ) data = data . reshape ( ( d1 , d2 , d3 , d4 ) ) if processed_data is None : processed_data = np . empty ( [ d1 , d2 , d3 , num_epochs ] , dtype = data_type ) for idx , epoch in enumerate ( epoch_info ) : if sid == epoch [ 1 ] : subject_count [ sid ] += 1 processed_data [ : , : , : , idx ] = np . mean ( data [ : , : , : , epoch [ 2 ] : epoch [ 3 ] ] , axis = 3 ) logger . debug ( 'file %s is loaded and processed, with data shape %s' , f . get_filename ( ) , data . shape ) cur_epoch = 0 for i in subject_count : if i > 1 : processed_data [ : , : , : , cur_epoch : cur_epoch + i ] = zscore ( processed_data [ : , : , : , cur_epoch : cur_epoch + i ] , axis = 3 , ddof = 0 ) cur_epoch += i processed_data = np . nan_to_num ( processed_data ) time2 = time . time ( ) logger . info ( 'data processed for activity-based voxel selection, takes %.2f s' % ( time2 - time1 ) ) return processed_data , labels | obtain the data for activity - based voxel selection using Searchlight |
15,599 | def from_tri_2_sym ( tri , dim ) : symm = np . zeros ( ( dim , dim ) ) symm [ np . triu_indices ( dim ) ] = tri return symm | convert a upper triangular matrix in 1D format to 2D symmetric matrix |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.