repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
kontron/python-ipmi | pyipmi/interfaces/rmcp.py | Rmcp.send_and_receive | def send_and_receive(self, req):
"""Interface function to send and receive an IPMI message.
target: IPMI target
req: IPMI message request
Returns the IPMI message response.
"""
rx_data = self._send_and_receive(target=req.target,
lun=req.lun,
netfn=req.netfn,
cmdid=req.cmdid,
payload=encode_message(req))
rsp = create_message(req.netfn + 1, req.cmdid, req.group_extension)
decode_message(rsp, rx_data)
return rsp | python | def send_and_receive(self, req):
"""Interface function to send and receive an IPMI message.
target: IPMI target
req: IPMI message request
Returns the IPMI message response.
"""
rx_data = self._send_and_receive(target=req.target,
lun=req.lun,
netfn=req.netfn,
cmdid=req.cmdid,
payload=encode_message(req))
rsp = create_message(req.netfn + 1, req.cmdid, req.group_extension)
decode_message(rsp, rx_data)
return rsp | [
"def",
"send_and_receive",
"(",
"self",
",",
"req",
")",
":",
"rx_data",
"=",
"self",
".",
"_send_and_receive",
"(",
"target",
"=",
"req",
".",
"target",
",",
"lun",
"=",
"req",
".",
"lun",
",",
"netfn",
"=",
"req",
".",
"netfn",
",",
"cmdid",
"=",
... | Interface function to send and receive an IPMI message.
target: IPMI target
req: IPMI message request
Returns the IPMI message response. | [
"Interface",
"function",
"to",
"send",
"and",
"receive",
"an",
"IPMI",
"message",
"."
] | ce46da47a37dd683615f32d04a10eda069aa569a | https://github.com/kontron/python-ipmi/blob/ce46da47a37dd683615f32d04a10eda069aa569a/pyipmi/interfaces/rmcp.py#L557-L572 | train | 204,400 |
kontron/python-ipmi | pyipmi/sdr.py | Sdr.delete_sdr | def delete_sdr(self, record_id):
"""
Deletes the sensor record specified by 'record_id'.
"""
reservation_id = self.reserve_device_sdr_repository()
rsp = self.send_message_with_name('DeleteSdr',
reservation_id=reservation_id,
record_id=record_id)
return rsp.record_id | python | def delete_sdr(self, record_id):
"""
Deletes the sensor record specified by 'record_id'.
"""
reservation_id = self.reserve_device_sdr_repository()
rsp = self.send_message_with_name('DeleteSdr',
reservation_id=reservation_id,
record_id=record_id)
return rsp.record_id | [
"def",
"delete_sdr",
"(",
"self",
",",
"record_id",
")",
":",
"reservation_id",
"=",
"self",
".",
"reserve_device_sdr_repository",
"(",
")",
"rsp",
"=",
"self",
".",
"send_message_with_name",
"(",
"'DeleteSdr'",
",",
"reservation_id",
"=",
"reservation_id",
",",
... | Deletes the sensor record specified by 'record_id'. | [
"Deletes",
"the",
"sensor",
"record",
"specified",
"by",
"record_id",
"."
] | ce46da47a37dd683615f32d04a10eda069aa569a | https://github.com/kontron/python-ipmi/blob/ce46da47a37dd683615f32d04a10eda069aa569a/pyipmi/sdr.py#L124-L133 | train | 204,401 |
kontron/python-ipmi | pyipmi/helper.py | get_sdr_data_helper | def get_sdr_data_helper(reserve_fn, get_fn, record_id, reservation_id=None):
"""Helper function to retrieve the sdr data using the specified
functions.
This can be used for SDRs from the Sensor Device or form the SDR
repository.
"""
if reservation_id is None:
reservation_id = reserve_fn()
(next_id, data) = get_fn(reservation_id, record_id, 0, 5)
header = ByteBuffer(data)
record_id = header.pop_unsigned_int(2)
record_version = header.pop_unsigned_int(1)
record_type = header.pop_unsigned_int(1)
record_payload_length = header.pop_unsigned_int(1)
record_length = record_payload_length + 5
record_data = ByteBuffer(data)
offset = len(record_data)
max_req_len = 20
retry = 20
# now get the other record data
while True:
retry -= 1
if retry == 0:
raise RetryError()
length = max_req_len
if (offset + length) > record_length:
length = record_length - offset
try:
(next_id, data) = get_fn(reservation_id, record_id, offset, length)
except CompletionCodeError as e:
if e.cc == constants.CC_CANT_RET_NUM_REQ_BYTES:
# reduce max lenght
max_req_len -= 4
if max_req_len <= 0:
retry = 0
else:
raise CompletionCodeError(e.cc)
record_data.extend(data[:])
offset = len(record_data)
if len(record_data) >= record_length:
break
return (next_id, record_data) | python | def get_sdr_data_helper(reserve_fn, get_fn, record_id, reservation_id=None):
"""Helper function to retrieve the sdr data using the specified
functions.
This can be used for SDRs from the Sensor Device or form the SDR
repository.
"""
if reservation_id is None:
reservation_id = reserve_fn()
(next_id, data) = get_fn(reservation_id, record_id, 0, 5)
header = ByteBuffer(data)
record_id = header.pop_unsigned_int(2)
record_version = header.pop_unsigned_int(1)
record_type = header.pop_unsigned_int(1)
record_payload_length = header.pop_unsigned_int(1)
record_length = record_payload_length + 5
record_data = ByteBuffer(data)
offset = len(record_data)
max_req_len = 20
retry = 20
# now get the other record data
while True:
retry -= 1
if retry == 0:
raise RetryError()
length = max_req_len
if (offset + length) > record_length:
length = record_length - offset
try:
(next_id, data) = get_fn(reservation_id, record_id, offset, length)
except CompletionCodeError as e:
if e.cc == constants.CC_CANT_RET_NUM_REQ_BYTES:
# reduce max lenght
max_req_len -= 4
if max_req_len <= 0:
retry = 0
else:
raise CompletionCodeError(e.cc)
record_data.extend(data[:])
offset = len(record_data)
if len(record_data) >= record_length:
break
return (next_id, record_data) | [
"def",
"get_sdr_data_helper",
"(",
"reserve_fn",
",",
"get_fn",
",",
"record_id",
",",
"reservation_id",
"=",
"None",
")",
":",
"if",
"reservation_id",
"is",
"None",
":",
"reservation_id",
"=",
"reserve_fn",
"(",
")",
"(",
"next_id",
",",
"data",
")",
"=",
... | Helper function to retrieve the sdr data using the specified
functions.
This can be used for SDRs from the Sensor Device or form the SDR
repository. | [
"Helper",
"function",
"to",
"retrieve",
"the",
"sdr",
"data",
"using",
"the",
"specified",
"functions",
"."
] | ce46da47a37dd683615f32d04a10eda069aa569a | https://github.com/kontron/python-ipmi/blob/ce46da47a37dd683615f32d04a10eda069aa569a/pyipmi/helper.py#L49-L100 | train | 204,402 |
kontron/python-ipmi | pyipmi/helper.py | clear_repository_helper | def clear_repository_helper(reserve_fn, clear_fn, retry=5, reservation=None):
"""Helper function to start repository erasure and wait until finish.
This helper is used by clear_sel and clear_sdr_repository.
"""
if reservation is None:
reservation = reserve_fn()
# start erasure
reservation = _clear_repository(reserve_fn, clear_fn,
INITIATE_ERASE, retry, reservation)
# give some time to clear
time.sleep(0.5)
# wait until finish
reservation = _clear_repository(reserve_fn, clear_fn,
GET_ERASE_STATUS, retry, reservation) | python | def clear_repository_helper(reserve_fn, clear_fn, retry=5, reservation=None):
"""Helper function to start repository erasure and wait until finish.
This helper is used by clear_sel and clear_sdr_repository.
"""
if reservation is None:
reservation = reserve_fn()
# start erasure
reservation = _clear_repository(reserve_fn, clear_fn,
INITIATE_ERASE, retry, reservation)
# give some time to clear
time.sleep(0.5)
# wait until finish
reservation = _clear_repository(reserve_fn, clear_fn,
GET_ERASE_STATUS, retry, reservation) | [
"def",
"clear_repository_helper",
"(",
"reserve_fn",
",",
"clear_fn",
",",
"retry",
"=",
"5",
",",
"reservation",
"=",
"None",
")",
":",
"if",
"reservation",
"is",
"None",
":",
"reservation",
"=",
"reserve_fn",
"(",
")",
"# start erasure",
"reservation",
"=",
... | Helper function to start repository erasure and wait until finish.
This helper is used by clear_sel and clear_sdr_repository. | [
"Helper",
"function",
"to",
"start",
"repository",
"erasure",
"and",
"wait",
"until",
"finish",
".",
"This",
"helper",
"is",
"used",
"by",
"clear_sel",
"and",
"clear_sdr_repository",
"."
] | ce46da47a37dd683615f32d04a10eda069aa569a | https://github.com/kontron/python-ipmi/blob/ce46da47a37dd683615f32d04a10eda069aa569a/pyipmi/helper.py#L134-L151 | train | 204,403 |
kontron/python-ipmi | pyipmi/interfaces/ipmb.py | encode_ipmb_msg | def encode_ipmb_msg(header, data):
"""Encode an IPMB message.
header: IPMB header object
data: IPMI message data as bytestring
Returns the message as bytestring.
"""
msg = array('B')
msg.fromstring(header.encode())
if data is not None:
a = array('B')
a.fromstring(data)
msg.extend(a)
msg.append(checksum(msg[3:]))
return msg.tostring() | python | def encode_ipmb_msg(header, data):
"""Encode an IPMB message.
header: IPMB header object
data: IPMI message data as bytestring
Returns the message as bytestring.
"""
msg = array('B')
msg.fromstring(header.encode())
if data is not None:
a = array('B')
a.fromstring(data)
msg.extend(a)
msg.append(checksum(msg[3:]))
return msg.tostring() | [
"def",
"encode_ipmb_msg",
"(",
"header",
",",
"data",
")",
":",
"msg",
"=",
"array",
"(",
"'B'",
")",
"msg",
".",
"fromstring",
"(",
"header",
".",
"encode",
"(",
")",
")",
"if",
"data",
"is",
"not",
"None",
":",
"a",
"=",
"array",
"(",
"'B'",
")... | Encode an IPMB message.
header: IPMB header object
data: IPMI message data as bytestring
Returns the message as bytestring. | [
"Encode",
"an",
"IPMB",
"message",
"."
] | ce46da47a37dd683615f32d04a10eda069aa569a | https://github.com/kontron/python-ipmi/blob/ce46da47a37dd683615f32d04a10eda069aa569a/pyipmi/interfaces/ipmb.py#L87-L103 | train | 204,404 |
kontron/python-ipmi | pyipmi/interfaces/ipmb.py | encode_send_message | def encode_send_message(payload, rq_sa, rs_sa, channel, seq, tracking=1):
"""Encode a send message command and embedd the message to be send.
payload: the message to be send as bytestring
rq_sa: the requester source address
rs_sa: the responder source address
channel: the channel
seq: the sequence number
tracking: tracking
Returns an encode send message as bytestring
"""
req = create_request_by_name('SendMessage')
req.channel.number = channel
req.channel.tracking = tracking
data = encode_message(req)
header = IpmbHeaderReq()
header.netfn = req.__netfn__
header.rs_lun = 0
header.rs_sa = rs_sa
header.rq_seq = seq
header.rq_lun = 0
header.rq_sa = rq_sa
header.cmd_id = req.__cmdid__
return encode_ipmb_msg(header, data + payload) | python | def encode_send_message(payload, rq_sa, rs_sa, channel, seq, tracking=1):
"""Encode a send message command and embedd the message to be send.
payload: the message to be send as bytestring
rq_sa: the requester source address
rs_sa: the responder source address
channel: the channel
seq: the sequence number
tracking: tracking
Returns an encode send message as bytestring
"""
req = create_request_by_name('SendMessage')
req.channel.number = channel
req.channel.tracking = tracking
data = encode_message(req)
header = IpmbHeaderReq()
header.netfn = req.__netfn__
header.rs_lun = 0
header.rs_sa = rs_sa
header.rq_seq = seq
header.rq_lun = 0
header.rq_sa = rq_sa
header.cmd_id = req.__cmdid__
return encode_ipmb_msg(header, data + payload) | [
"def",
"encode_send_message",
"(",
"payload",
",",
"rq_sa",
",",
"rs_sa",
",",
"channel",
",",
"seq",
",",
"tracking",
"=",
"1",
")",
":",
"req",
"=",
"create_request_by_name",
"(",
"'SendMessage'",
")",
"req",
".",
"channel",
".",
"number",
"=",
"channel"... | Encode a send message command and embedd the message to be send.
payload: the message to be send as bytestring
rq_sa: the requester source address
rs_sa: the responder source address
channel: the channel
seq: the sequence number
tracking: tracking
Returns an encode send message as bytestring | [
"Encode",
"a",
"send",
"message",
"command",
"and",
"embedd",
"the",
"message",
"to",
"be",
"send",
"."
] | ce46da47a37dd683615f32d04a10eda069aa569a | https://github.com/kontron/python-ipmi/blob/ce46da47a37dd683615f32d04a10eda069aa569a/pyipmi/interfaces/ipmb.py#L106-L132 | train | 204,405 |
kontron/python-ipmi | pyipmi/interfaces/ipmb.py | rx_filter | def rx_filter(header, data):
"""Check if the message in rx_data matches to the information in header.
The following checks are done:
- Header checksum
- Payload checksum
- NetFn matching
- LUN matching
- Command Id matching
header: the header to compare with
data: the received message as bytestring
"""
rsp_header = IpmbHeaderRsp()
rsp_header.decode(data)
data = array('B', data)
checks = [
(checksum(data[0:3]), 0, 'Header checksum failed'),
(checksum(data[3:]), 0, 'payload checksum failed'),
# rsp_header.rq_sa, header.rq_sa, 'slave address mismatch'),
(rsp_header.netfn, header.netfn | 1, 'NetFn mismatch'),
# rsp_header.rs_sa, header.rs_sa, 'target address mismatch'),
# rsp_header.rq_lun, header.rq_lun, 'request LUN mismatch'),
(rsp_header.rs_lun, header.rs_lun, 'responder LUN mismatch'),
(rsp_header.rq_seq, header.rq_seq, 'sequence number mismatch'),
(rsp_header.cmd_id, header.cmd_id, 'command id mismatch'),
]
match = True
for left, right, msg in checks:
if left != right:
log().debug('{:s}: {:d} {:d}'.format(msg, left, right))
match = False
return match | python | def rx_filter(header, data):
"""Check if the message in rx_data matches to the information in header.
The following checks are done:
- Header checksum
- Payload checksum
- NetFn matching
- LUN matching
- Command Id matching
header: the header to compare with
data: the received message as bytestring
"""
rsp_header = IpmbHeaderRsp()
rsp_header.decode(data)
data = array('B', data)
checks = [
(checksum(data[0:3]), 0, 'Header checksum failed'),
(checksum(data[3:]), 0, 'payload checksum failed'),
# rsp_header.rq_sa, header.rq_sa, 'slave address mismatch'),
(rsp_header.netfn, header.netfn | 1, 'NetFn mismatch'),
# rsp_header.rs_sa, header.rs_sa, 'target address mismatch'),
# rsp_header.rq_lun, header.rq_lun, 'request LUN mismatch'),
(rsp_header.rs_lun, header.rs_lun, 'responder LUN mismatch'),
(rsp_header.rq_seq, header.rq_seq, 'sequence number mismatch'),
(rsp_header.cmd_id, header.cmd_id, 'command id mismatch'),
]
match = True
for left, right, msg in checks:
if left != right:
log().debug('{:s}: {:d} {:d}'.format(msg, left, right))
match = False
return match | [
"def",
"rx_filter",
"(",
"header",
",",
"data",
")",
":",
"rsp_header",
"=",
"IpmbHeaderRsp",
"(",
")",
"rsp_header",
".",
"decode",
"(",
"data",
")",
"data",
"=",
"array",
"(",
"'B'",
",",
"data",
")",
"checks",
"=",
"[",
"(",
"checksum",
"(",
"data... | Check if the message in rx_data matches to the information in header.
The following checks are done:
- Header checksum
- Payload checksum
- NetFn matching
- LUN matching
- Command Id matching
header: the header to compare with
data: the received message as bytestring | [
"Check",
"if",
"the",
"message",
"in",
"rx_data",
"matches",
"to",
"the",
"information",
"in",
"header",
"."
] | ce46da47a37dd683615f32d04a10eda069aa569a | https://github.com/kontron/python-ipmi/blob/ce46da47a37dd683615f32d04a10eda069aa569a/pyipmi/interfaces/ipmb.py#L181-L219 | train | 204,406 |
kontron/python-ipmi | pyipmi/msgs/message.py | Message._pack | def _pack(self):
"""Pack the message and return an array."""
data = ByteBuffer()
if not hasattr(self, '__fields__'):
return data.array
for field in self.__fields__:
field.encode(self, data)
return data.array | python | def _pack(self):
"""Pack the message and return an array."""
data = ByteBuffer()
if not hasattr(self, '__fields__'):
return data.array
for field in self.__fields__:
field.encode(self, data)
return data.array | [
"def",
"_pack",
"(",
"self",
")",
":",
"data",
"=",
"ByteBuffer",
"(",
")",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'__fields__'",
")",
":",
"return",
"data",
".",
"array",
"for",
"field",
"in",
"self",
".",
"__fields__",
":",
"field",
".",
"encod... | Pack the message and return an array. | [
"Pack",
"the",
"message",
"and",
"return",
"an",
"array",
"."
] | ce46da47a37dd683615f32d04a10eda069aa569a | https://github.com/kontron/python-ipmi/blob/ce46da47a37dd683615f32d04a10eda069aa569a/pyipmi/msgs/message.py#L353-L361 | train | 204,407 |
kontron/python-ipmi | pyipmi/msgs/message.py | Message._encode | def _encode(self):
"""Encode the message and return a bytestring."""
data = ByteBuffer()
if not hasattr(self, '__fields__'):
return data.tostring()
for field in self.__fields__:
field.encode(self, data)
return data.tostring() | python | def _encode(self):
"""Encode the message and return a bytestring."""
data = ByteBuffer()
if not hasattr(self, '__fields__'):
return data.tostring()
for field in self.__fields__:
field.encode(self, data)
return data.tostring() | [
"def",
"_encode",
"(",
"self",
")",
":",
"data",
"=",
"ByteBuffer",
"(",
")",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'__fields__'",
")",
":",
"return",
"data",
".",
"tostring",
"(",
")",
"for",
"field",
"in",
"self",
".",
"__fields__",
":",
"fiel... | Encode the message and return a bytestring. | [
"Encode",
"the",
"message",
"and",
"return",
"a",
"bytestring",
"."
] | ce46da47a37dd683615f32d04a10eda069aa569a | https://github.com/kontron/python-ipmi/blob/ce46da47a37dd683615f32d04a10eda069aa569a/pyipmi/msgs/message.py#L363-L371 | train | 204,408 |
kontron/python-ipmi | pyipmi/msgs/message.py | Message._decode | def _decode(self, data):
"""Decode the bytestring message."""
if not hasattr(self, '__fields__'):
raise NotImplementedError('You have to overwrite this method')
data = ByteBuffer(data)
cc = None
for field in self.__fields__:
try:
field.decode(self, data)
except CompletionCodeError as e:
# stop decoding on completion code != 0
cc = e.cc
break
if (cc is None or cc == 0) and len(data) > 0:
raise DecodingError('Data has extra bytes') | python | def _decode(self, data):
"""Decode the bytestring message."""
if not hasattr(self, '__fields__'):
raise NotImplementedError('You have to overwrite this method')
data = ByteBuffer(data)
cc = None
for field in self.__fields__:
try:
field.decode(self, data)
except CompletionCodeError as e:
# stop decoding on completion code != 0
cc = e.cc
break
if (cc is None or cc == 0) and len(data) > 0:
raise DecodingError('Data has extra bytes') | [
"def",
"_decode",
"(",
"self",
",",
"data",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'__fields__'",
")",
":",
"raise",
"NotImplementedError",
"(",
"'You have to overwrite this method'",
")",
"data",
"=",
"ByteBuffer",
"(",
"data",
")",
"cc",
"=",... | Decode the bytestring message. | [
"Decode",
"the",
"bytestring",
"message",
"."
] | ce46da47a37dd683615f32d04a10eda069aa569a | https://github.com/kontron/python-ipmi/blob/ce46da47a37dd683615f32d04a10eda069aa569a/pyipmi/msgs/message.py#L373-L389 | train | 204,409 |
kontron/python-ipmi | pyipmi/interfaces/aardvark.py | Aardvark._send_and_receive | def _send_and_receive(self, target, lun, netfn, cmdid, payload):
"""Send and receive data using aardvark interface.
target:
lun:
netfn:
cmdid:
payload: IPMI message payload as bytestring
Returns the received data as bytestring
"""
self._inc_sequence_number()
# assemble IPMB header
header = IpmbHeaderReq()
header.netfn = netfn
header.rs_lun = lun
header.rs_sa = target.ipmb_address
header.rq_seq = self.next_sequence_number
header.rq_lun = 0
header.rq_sa = self.slave_address
header.cmd_id = cmdid
retries = 0
while retries < self.max_retries:
try:
self._send_raw(header, payload)
rx_data = self._receive_raw(header)
break
except IpmiTimeoutError:
log().warning('I2C transaction timed out'),
retries += 1
else:
raise IpmiTimeoutError()
return rx_data.tostring()[5:-1] | python | def _send_and_receive(self, target, lun, netfn, cmdid, payload):
"""Send and receive data using aardvark interface.
target:
lun:
netfn:
cmdid:
payload: IPMI message payload as bytestring
Returns the received data as bytestring
"""
self._inc_sequence_number()
# assemble IPMB header
header = IpmbHeaderReq()
header.netfn = netfn
header.rs_lun = lun
header.rs_sa = target.ipmb_address
header.rq_seq = self.next_sequence_number
header.rq_lun = 0
header.rq_sa = self.slave_address
header.cmd_id = cmdid
retries = 0
while retries < self.max_retries:
try:
self._send_raw(header, payload)
rx_data = self._receive_raw(header)
break
except IpmiTimeoutError:
log().warning('I2C transaction timed out'),
retries += 1
else:
raise IpmiTimeoutError()
return rx_data.tostring()[5:-1] | [
"def",
"_send_and_receive",
"(",
"self",
",",
"target",
",",
"lun",
",",
"netfn",
",",
"cmdid",
",",
"payload",
")",
":",
"self",
".",
"_inc_sequence_number",
"(",
")",
"# assemble IPMB header",
"header",
"=",
"IpmbHeaderReq",
"(",
")",
"header",
".",
"netfn... | Send and receive data using aardvark interface.
target:
lun:
netfn:
cmdid:
payload: IPMI message payload as bytestring
Returns the received data as bytestring | [
"Send",
"and",
"receive",
"data",
"using",
"aardvark",
"interface",
"."
] | ce46da47a37dd683615f32d04a10eda069aa569a | https://github.com/kontron/python-ipmi/blob/ce46da47a37dd683615f32d04a10eda069aa569a/pyipmi/interfaces/aardvark.py#L131-L168 | train | 204,410 |
kontron/python-ipmi | pyipmi/sensor.py | Sensor.get_device_sdr | def get_device_sdr(self, record_id, reservation_id=None):
"""Collects all data from the sensor device to get the SDR
specified by record id.
`record_id` the Record ID.
`reservation_id=None` can be set. if None the reservation ID will
be determined.
"""
(next_id, record_data) = \
get_sdr_data_helper(self.reserve_device_sdr_repository,
self._get_device_sdr_chunk,
record_id, reservation_id)
return sdr.SdrCommon.from_data(record_data, next_id) | python | def get_device_sdr(self, record_id, reservation_id=None):
"""Collects all data from the sensor device to get the SDR
specified by record id.
`record_id` the Record ID.
`reservation_id=None` can be set. if None the reservation ID will
be determined.
"""
(next_id, record_data) = \
get_sdr_data_helper(self.reserve_device_sdr_repository,
self._get_device_sdr_chunk,
record_id, reservation_id)
return sdr.SdrCommon.from_data(record_data, next_id) | [
"def",
"get_device_sdr",
"(",
"self",
",",
"record_id",
",",
"reservation_id",
"=",
"None",
")",
":",
"(",
"next_id",
",",
"record_data",
")",
"=",
"get_sdr_data_helper",
"(",
"self",
".",
"reserve_device_sdr_repository",
",",
"self",
".",
"_get_device_sdr_chunk",... | Collects all data from the sensor device to get the SDR
specified by record id.
`record_id` the Record ID.
`reservation_id=None` can be set. if None the reservation ID will
be determined. | [
"Collects",
"all",
"data",
"from",
"the",
"sensor",
"device",
"to",
"get",
"the",
"SDR",
"specified",
"by",
"record",
"id",
"."
] | ce46da47a37dd683615f32d04a10eda069aa569a | https://github.com/kontron/python-ipmi/blob/ce46da47a37dd683615f32d04a10eda069aa569a/pyipmi/sensor.py#L114-L127 | train | 204,411 |
kontron/python-ipmi | pyipmi/sensor.py | Sensor.get_sensor_reading | def get_sensor_reading(self, sensor_number, lun=0):
"""Returns the sensor reading at the assertion states for the given
sensor number.
`sensor_number`
Returns a tuple with `raw reading`and `assertion states`.
"""
rsp = self.send_message_with_name('GetSensorReading',
sensor_number=sensor_number,
lun=lun)
reading = rsp.sensor_reading
if rsp.config.initial_update_in_progress:
reading = None
states = None
if rsp.states1 is not None:
states = rsp.states1
if rsp.states2 is not None:
states |= (rsp.states2 << 8)
return (reading, states) | python | def get_sensor_reading(self, sensor_number, lun=0):
"""Returns the sensor reading at the assertion states for the given
sensor number.
`sensor_number`
Returns a tuple with `raw reading`and `assertion states`.
"""
rsp = self.send_message_with_name('GetSensorReading',
sensor_number=sensor_number,
lun=lun)
reading = rsp.sensor_reading
if rsp.config.initial_update_in_progress:
reading = None
states = None
if rsp.states1 is not None:
states = rsp.states1
if rsp.states2 is not None:
states |= (rsp.states2 << 8)
return (reading, states) | [
"def",
"get_sensor_reading",
"(",
"self",
",",
"sensor_number",
",",
"lun",
"=",
"0",
")",
":",
"rsp",
"=",
"self",
".",
"send_message_with_name",
"(",
"'GetSensorReading'",
",",
"sensor_number",
"=",
"sensor_number",
",",
"lun",
"=",
"lun",
")",
"reading",
... | Returns the sensor reading at the assertion states for the given
sensor number.
`sensor_number`
Returns a tuple with `raw reading`and `assertion states`. | [
"Returns",
"the",
"sensor",
"reading",
"at",
"the",
"assertion",
"states",
"for",
"the",
"given",
"sensor",
"number",
"."
] | ce46da47a37dd683615f32d04a10eda069aa569a | https://github.com/kontron/python-ipmi/blob/ce46da47a37dd683615f32d04a10eda069aa569a/pyipmi/sensor.py#L154-L175 | train | 204,412 |
kontron/python-ipmi | pyipmi/sensor.py | Sensor.set_sensor_thresholds | def set_sensor_thresholds(self, sensor_number, lun=0,
unr=None, ucr=None, unc=None,
lnc=None, lcr=None, lnr=None):
"""Set the sensor thresholds that are not 'None'
`sensor_number`
`unr` for upper non-recoverable
`ucr` for upper critical
`unc` for upper non-critical
`lnc` for lower non-critical
`lcr` for lower critical
`lnr` for lower non-recoverable
"""
req = create_request_by_name('SetSensorThresholds')
req.sensor_number = sensor_number
req.lun = lun
thresholds = dict(unr=unr, ucr=ucr, unc=unc, lnc=lnc, lcr=lcr, lnr=lnr)
for key, value in thresholds.items():
if value is not None:
setattr(req.set_mask, key, 1)
setattr(req.threshold, key, value)
rsp = self.send_message(req)
check_completion_code(rsp.completion_code) | python | def set_sensor_thresholds(self, sensor_number, lun=0,
unr=None, ucr=None, unc=None,
lnc=None, lcr=None, lnr=None):
"""Set the sensor thresholds that are not 'None'
`sensor_number`
`unr` for upper non-recoverable
`ucr` for upper critical
`unc` for upper non-critical
`lnc` for lower non-critical
`lcr` for lower critical
`lnr` for lower non-recoverable
"""
req = create_request_by_name('SetSensorThresholds')
req.sensor_number = sensor_number
req.lun = lun
thresholds = dict(unr=unr, ucr=ucr, unc=unc, lnc=lnc, lcr=lcr, lnr=lnr)
for key, value in thresholds.items():
if value is not None:
setattr(req.set_mask, key, 1)
setattr(req.threshold, key, value)
rsp = self.send_message(req)
check_completion_code(rsp.completion_code) | [
"def",
"set_sensor_thresholds",
"(",
"self",
",",
"sensor_number",
",",
"lun",
"=",
"0",
",",
"unr",
"=",
"None",
",",
"ucr",
"=",
"None",
",",
"unc",
"=",
"None",
",",
"lnc",
"=",
"None",
",",
"lcr",
"=",
"None",
",",
"lnr",
"=",
"None",
")",
":... | Set the sensor thresholds that are not 'None'
`sensor_number`
`unr` for upper non-recoverable
`ucr` for upper critical
`unc` for upper non-critical
`lnc` for lower non-critical
`lcr` for lower critical
`lnr` for lower non-recoverable | [
"Set",
"the",
"sensor",
"thresholds",
"that",
"are",
"not",
"None"
] | ce46da47a37dd683615f32d04a10eda069aa569a | https://github.com/kontron/python-ipmi/blob/ce46da47a37dd683615f32d04a10eda069aa569a/pyipmi/sensor.py#L177-L202 | train | 204,413 |
brainiak/brainiak | brainiak/utils/fmrisim.py | _generate_feature | def _generate_feature(feature_type,
feature_size,
signal_magnitude,
thickness=1):
"""Generate features corresponding to signal
Generate a single feature, that can be inserted into the signal volume.
A feature is a region of activation with a specific shape such as cube
or ring
Parameters
----------
feature_type : str
What shape signal is being inserted? Options are 'cube',
'loop' (aka ring), 'cavity' (aka hollow sphere), 'sphere'.
feature_size : int
How big is the signal in diameter?
signal_magnitude : float
Set the signal size, a value of 1 means the signal is one standard
deviation of the noise
thickness : int
How thick is the surface of the loop/cavity
Returns
----------
signal : 3 dimensional array
The volume representing the signal
"""
# If the size is equal to or less than 2 then all features are the same
if feature_size <= 2:
feature_type = 'cube'
# What kind of signal is it?
if feature_type == 'cube':
# Preset the size of the signal
signal = np.ones((feature_size, feature_size, feature_size))
elif feature_type == 'loop':
# First make a cube of zeros
signal = np.zeros((feature_size, feature_size, feature_size))
# Make a mesh grid of the space
seq = np.linspace(0, feature_size - 1,
feature_size)
xx, yy = np.meshgrid(seq, seq)
# Make a disk corresponding to the whole mesh grid
xxmesh = (xx - ((feature_size - 1) / 2)) ** 2
yymesh = (yy - ((feature_size - 1) / 2)) ** 2
disk = xxmesh + yymesh
# What are the limits of the rings being made
outer_lim = disk[int((feature_size - 1) / 2), 0]
inner_lim = disk[int((feature_size - 1) / 2), thickness]
# What is the outer disk
outer = disk <= outer_lim
# What is the inner disk
inner = disk <= inner_lim
# Subtract the two disks to get a loop
loop = outer != inner
# Check if the loop is a disk
if np.all(inner is False):
logger.warning('Loop feature reduces to a disk because the loop '
'is too thick')
# If there is complete overlap then make the signal just the
# outer one
if np.all(loop is False):
loop = outer
# store the loop
signal[0:feature_size, 0:feature_size, int(np.round(feature_size /
2))] = loop
elif feature_type == 'sphere' or feature_type == 'cavity':
# Make a mesh grid of the space
seq = np.linspace(0, feature_size - 1,
feature_size)
xx, yy, zz = np.meshgrid(seq, seq, seq)
# Make a disk corresponding to the whole mesh grid
signal = ((xx - ((feature_size - 1) / 2)) ** 2 +
(yy - ((feature_size - 1) / 2)) ** 2 +
(zz - ((feature_size - 1) / 2)) ** 2)
# What are the limits of the rings being made
outer_lim = signal[int((feature_size - 1) / 2), int((feature_size -
1) / 2), 0]
inner_lim = signal[int((feature_size - 1) / 2), int((feature_size -
1) / 2),
thickness]
# Is the signal a sphere or a cavity?
if feature_type == 'sphere':
signal = signal <= outer_lim
else:
# Get the inner and outer sphere
outer = signal <= outer_lim
inner = signal <= inner_lim
# Subtract the two disks to get a loop
signal = outer != inner
# Check if the cavity is a sphere
if np.all(inner is False):
logger.warning('Cavity feature reduces to a sphere because '
'the cavity is too thick')
# If there is complete overlap then make the signal just the
# outer one
if np.all(signal is False):
signal = outer
# Assign the signal magnitude
signal = signal * signal_magnitude
# Return the signal
return signal | python | def _generate_feature(feature_type,
feature_size,
signal_magnitude,
thickness=1):
"""Generate features corresponding to signal
Generate a single feature, that can be inserted into the signal volume.
A feature is a region of activation with a specific shape such as cube
or ring
Parameters
----------
feature_type : str
What shape signal is being inserted? Options are 'cube',
'loop' (aka ring), 'cavity' (aka hollow sphere), 'sphere'.
feature_size : int
How big is the signal in diameter?
signal_magnitude : float
Set the signal size, a value of 1 means the signal is one standard
deviation of the noise
thickness : int
How thick is the surface of the loop/cavity
Returns
----------
signal : 3 dimensional array
The volume representing the signal
"""
# If the size is equal to or less than 2 then all features are the same
if feature_size <= 2:
feature_type = 'cube'
# What kind of signal is it?
if feature_type == 'cube':
# Preset the size of the signal
signal = np.ones((feature_size, feature_size, feature_size))
elif feature_type == 'loop':
# First make a cube of zeros
signal = np.zeros((feature_size, feature_size, feature_size))
# Make a mesh grid of the space
seq = np.linspace(0, feature_size - 1,
feature_size)
xx, yy = np.meshgrid(seq, seq)
# Make a disk corresponding to the whole mesh grid
xxmesh = (xx - ((feature_size - 1) / 2)) ** 2
yymesh = (yy - ((feature_size - 1) / 2)) ** 2
disk = xxmesh + yymesh
# What are the limits of the rings being made
outer_lim = disk[int((feature_size - 1) / 2), 0]
inner_lim = disk[int((feature_size - 1) / 2), thickness]
# What is the outer disk
outer = disk <= outer_lim
# What is the inner disk
inner = disk <= inner_lim
# Subtract the two disks to get a loop
loop = outer != inner
# Check if the loop is a disk
if np.all(inner is False):
logger.warning('Loop feature reduces to a disk because the loop '
'is too thick')
# If there is complete overlap then make the signal just the
# outer one
if np.all(loop is False):
loop = outer
# store the loop
signal[0:feature_size, 0:feature_size, int(np.round(feature_size /
2))] = loop
elif feature_type == 'sphere' or feature_type == 'cavity':
# Make a mesh grid of the space
seq = np.linspace(0, feature_size - 1,
feature_size)
xx, yy, zz = np.meshgrid(seq, seq, seq)
# Make a disk corresponding to the whole mesh grid
signal = ((xx - ((feature_size - 1) / 2)) ** 2 +
(yy - ((feature_size - 1) / 2)) ** 2 +
(zz - ((feature_size - 1) / 2)) ** 2)
# What are the limits of the rings being made
outer_lim = signal[int((feature_size - 1) / 2), int((feature_size -
1) / 2), 0]
inner_lim = signal[int((feature_size - 1) / 2), int((feature_size -
1) / 2),
thickness]
# Is the signal a sphere or a cavity?
if feature_type == 'sphere':
signal = signal <= outer_lim
else:
# Get the inner and outer sphere
outer = signal <= outer_lim
inner = signal <= inner_lim
# Subtract the two disks to get a loop
signal = outer != inner
# Check if the cavity is a sphere
if np.all(inner is False):
logger.warning('Cavity feature reduces to a sphere because '
'the cavity is too thick')
# If there is complete overlap then make the signal just the
# outer one
if np.all(signal is False):
signal = outer
# Assign the signal magnitude
signal = signal * signal_magnitude
# Return the signal
return signal | [
"def",
"_generate_feature",
"(",
"feature_type",
",",
"feature_size",
",",
"signal_magnitude",
",",
"thickness",
"=",
"1",
")",
":",
"# If the size is equal to or less than 2 then all features are the same",
"if",
"feature_size",
"<=",
"2",
":",
"feature_type",
"=",
"'cub... | Generate features corresponding to signal
Generate a single feature, that can be inserted into the signal volume.
A feature is a region of activation with a specific shape such as cube
or ring
Parameters
----------
feature_type : str
What shape signal is being inserted? Options are 'cube',
'loop' (aka ring), 'cavity' (aka hollow sphere), 'sphere'.
feature_size : int
How big is the signal in diameter?
signal_magnitude : float
Set the signal size, a value of 1 means the signal is one standard
deviation of the noise
thickness : int
How thick is the surface of the loop/cavity
Returns
----------
signal : 3 dimensional array
The volume representing the signal | [
"Generate",
"features",
"corresponding",
"to",
"signal"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L110-L242 | train | 204,414 |
brainiak/brainiak | brainiak/utils/fmrisim.py | _insert_idxs | def _insert_idxs(feature_centre, feature_size, dimensions):
"""Returns the indices of where to put the signal into the signal volume
Parameters
----------
feature_centre : list, int
List of coordinates for the centre location of the signal
feature_size : list, int
How big is the signal's diameter.
dimensions : 3 length array, int
What are the dimensions of the volume you wish to create
Returns
----------
x_idxs : tuple
The x coordinates of where the signal is to be inserted
y_idxs : tuple
The y coordinates of where the signal is to be inserted
z_idxs : tuple
The z coordinates of where the signal is to be inserted
"""
# Set up the indexes within which to insert the signal
x_idx = [int(feature_centre[0] - (feature_size / 2)) + 1,
int(feature_centre[0] - (feature_size / 2) +
feature_size) + 1]
y_idx = [int(feature_centre[1] - (feature_size / 2)) + 1,
int(feature_centre[1] - (feature_size / 2) +
feature_size) + 1]
z_idx = [int(feature_centre[2] - (feature_size / 2)) + 1,
int(feature_centre[2] - (feature_size / 2) +
feature_size) + 1]
# Check for out of bounds
# Min Boundary
if 0 > x_idx[0]:
x_idx[0] = 0
if 0 > y_idx[0]:
y_idx[0] = 0
if 0 > z_idx[0]:
z_idx[0] = 0
# Max Boundary
if dimensions[0] < x_idx[1]:
x_idx[1] = dimensions[0]
if dimensions[1] < y_idx[1]:
y_idx[1] = dimensions[1]
if dimensions[2] < z_idx[1]:
z_idx[1] = dimensions[2]
# Return the idxs for data
return x_idx, y_idx, z_idx | python | def _insert_idxs(feature_centre, feature_size, dimensions):
"""Returns the indices of where to put the signal into the signal volume
Parameters
----------
feature_centre : list, int
List of coordinates for the centre location of the signal
feature_size : list, int
How big is the signal's diameter.
dimensions : 3 length array, int
What are the dimensions of the volume you wish to create
Returns
----------
x_idxs : tuple
The x coordinates of where the signal is to be inserted
y_idxs : tuple
The y coordinates of where the signal is to be inserted
z_idxs : tuple
The z coordinates of where the signal is to be inserted
"""
# Set up the indexes within which to insert the signal
x_idx = [int(feature_centre[0] - (feature_size / 2)) + 1,
int(feature_centre[0] - (feature_size / 2) +
feature_size) + 1]
y_idx = [int(feature_centre[1] - (feature_size / 2)) + 1,
int(feature_centre[1] - (feature_size / 2) +
feature_size) + 1]
z_idx = [int(feature_centre[2] - (feature_size / 2)) + 1,
int(feature_centre[2] - (feature_size / 2) +
feature_size) + 1]
# Check for out of bounds
# Min Boundary
if 0 > x_idx[0]:
x_idx[0] = 0
if 0 > y_idx[0]:
y_idx[0] = 0
if 0 > z_idx[0]:
z_idx[0] = 0
# Max Boundary
if dimensions[0] < x_idx[1]:
x_idx[1] = dimensions[0]
if dimensions[1] < y_idx[1]:
y_idx[1] = dimensions[1]
if dimensions[2] < z_idx[1]:
z_idx[1] = dimensions[2]
# Return the idxs for data
return x_idx, y_idx, z_idx | [
"def",
"_insert_idxs",
"(",
"feature_centre",
",",
"feature_size",
",",
"dimensions",
")",
":",
"# Set up the indexes within which to insert the signal",
"x_idx",
"=",
"[",
"int",
"(",
"feature_centre",
"[",
"0",
"]",
"-",
"(",
"feature_size",
"/",
"2",
")",
")",
... | Returns the indices of where to put the signal into the signal volume
Parameters
----------
feature_centre : list, int
List of coordinates for the centre location of the signal
feature_size : list, int
How big is the signal's diameter.
dimensions : 3 length array, int
What are the dimensions of the volume you wish to create
Returns
----------
x_idxs : tuple
The x coordinates of where the signal is to be inserted
y_idxs : tuple
The y coordinates of where the signal is to be inserted
z_idxs : tuple
The z coordinates of where the signal is to be inserted | [
"Returns",
"the",
"indices",
"of",
"where",
"to",
"put",
"the",
"signal",
"into",
"the",
"signal",
"volume"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L245-L303 | train | 204,415 |
brainiak/brainiak | brainiak/utils/fmrisim.py | generate_signal | def generate_signal(dimensions,
feature_coordinates,
feature_size,
feature_type,
signal_magnitude=[1],
signal_constant=1,
):
"""Generate volume containing signal
Generate signal, of a specific shape in specific regions, for a single
volume. This will then be convolved with the HRF across time
Parameters
----------
dimensions : 1d array, ndarray
What are the dimensions of the volume you wish to create
feature_coordinates : multidimensional array
What are the feature_coordinates of the signal being created.
Be aware of clipping: features far from the centre of the
brain will be clipped. If you wish to have multiple features
then list these as a features x 3 array. To create a feature of
a unique shape then supply all the individual
feature_coordinates of the shape and set the feature_size to 1.
feature_size : list, int
How big is the signal. If feature_coordinates=1 then only one value is
accepted, if feature_coordinates>1 then either one value must be
supplied or m values
feature_type : list, string
What feature_type of signal is being inserted? Options are cube,
loop, cavity, sphere. If feature_coordinates = 1 then
only one value is accepted, if feature_coordinates > 1 then either
one value must be supplied or m values
signal_magnitude : list, float
What is the (average) magnitude of the signal being generated? A
value of 1 means that the signal is one standard deviation from the
noise
signal_constant : list, bool
Is the signal constant across the feature (for univariate activity)
or is it a random pattern of a given magnitude across the feature (for
multivariate activity)
Returns
----------
volume_signal : 3 dimensional array, float
Creates a single volume containing the signal
"""
# Preset the volume
volume_signal = np.zeros(dimensions)
feature_quantity = round(feature_coordinates.shape[0])
# If there is only one feature_size value then make sure to duplicate it
# for all signals
if len(feature_size) == 1:
feature_size = feature_size * feature_quantity
# Do the same for feature_type
if len(feature_type) == 1:
feature_type = feature_type * feature_quantity
if len(signal_magnitude) == 1:
signal_magnitude = signal_magnitude * feature_quantity
# Iterate through the signals and insert in the data
for signal_counter in range(feature_quantity):
# What is the centre of this signal
if len(feature_size) > 1:
feature_centre = np.asarray(feature_coordinates[signal_counter, ])
else:
feature_centre = np.asarray(feature_coordinates)[0]
# Generate the feature to be inserted in the volume
signal = _generate_feature(feature_type[signal_counter],
feature_size[signal_counter],
signal_magnitude[signal_counter],
)
# If the signal is a random noise pattern then multiply these ones by
# a noise mask
if signal_constant == 0:
signal = signal * np.random.random([feature_size[signal_counter],
feature_size[signal_counter],
feature_size[signal_counter]])
# Pull out the idxs for where to insert the data
x_idx, y_idx, z_idx = _insert_idxs(feature_centre,
feature_size[signal_counter],
dimensions)
# Insert the signal into the Volume
volume_signal[x_idx[0]:x_idx[1], y_idx[0]:y_idx[1], z_idx[0]:z_idx[
1]] = signal
return volume_signal | python | def generate_signal(dimensions,
feature_coordinates,
feature_size,
feature_type,
signal_magnitude=[1],
signal_constant=1,
):
"""Generate volume containing signal
Generate signal, of a specific shape in specific regions, for a single
volume. This will then be convolved with the HRF across time
Parameters
----------
dimensions : 1d array, ndarray
What are the dimensions of the volume you wish to create
feature_coordinates : multidimensional array
What are the feature_coordinates of the signal being created.
Be aware of clipping: features far from the centre of the
brain will be clipped. If you wish to have multiple features
then list these as a features x 3 array. To create a feature of
a unique shape then supply all the individual
feature_coordinates of the shape and set the feature_size to 1.
feature_size : list, int
How big is the signal. If feature_coordinates=1 then only one value is
accepted, if feature_coordinates>1 then either one value must be
supplied or m values
feature_type : list, string
What feature_type of signal is being inserted? Options are cube,
loop, cavity, sphere. If feature_coordinates = 1 then
only one value is accepted, if feature_coordinates > 1 then either
one value must be supplied or m values
signal_magnitude : list, float
What is the (average) magnitude of the signal being generated? A
value of 1 means that the signal is one standard deviation from the
noise
signal_constant : list, bool
Is the signal constant across the feature (for univariate activity)
or is it a random pattern of a given magnitude across the feature (for
multivariate activity)
Returns
----------
volume_signal : 3 dimensional array, float
Creates a single volume containing the signal
"""
# Preset the volume
volume_signal = np.zeros(dimensions)
feature_quantity = round(feature_coordinates.shape[0])
# If there is only one feature_size value then make sure to duplicate it
# for all signals
if len(feature_size) == 1:
feature_size = feature_size * feature_quantity
# Do the same for feature_type
if len(feature_type) == 1:
feature_type = feature_type * feature_quantity
if len(signal_magnitude) == 1:
signal_magnitude = signal_magnitude * feature_quantity
# Iterate through the signals and insert in the data
for signal_counter in range(feature_quantity):
# What is the centre of this signal
if len(feature_size) > 1:
feature_centre = np.asarray(feature_coordinates[signal_counter, ])
else:
feature_centre = np.asarray(feature_coordinates)[0]
# Generate the feature to be inserted in the volume
signal = _generate_feature(feature_type[signal_counter],
feature_size[signal_counter],
signal_magnitude[signal_counter],
)
# If the signal is a random noise pattern then multiply these ones by
# a noise mask
if signal_constant == 0:
signal = signal * np.random.random([feature_size[signal_counter],
feature_size[signal_counter],
feature_size[signal_counter]])
# Pull out the idxs for where to insert the data
x_idx, y_idx, z_idx = _insert_idxs(feature_centre,
feature_size[signal_counter],
dimensions)
# Insert the signal into the Volume
volume_signal[x_idx[0]:x_idx[1], y_idx[0]:y_idx[1], z_idx[0]:z_idx[
1]] = signal
return volume_signal | [
"def",
"generate_signal",
"(",
"dimensions",
",",
"feature_coordinates",
",",
"feature_size",
",",
"feature_type",
",",
"signal_magnitude",
"=",
"[",
"1",
"]",
",",
"signal_constant",
"=",
"1",
",",
")",
":",
"# Preset the volume",
"volume_signal",
"=",
"np",
".... | Generate volume containing signal
Generate signal, of a specific shape in specific regions, for a single
volume. This will then be convolved with the HRF across time
Parameters
----------
dimensions : 1d array, ndarray
What are the dimensions of the volume you wish to create
feature_coordinates : multidimensional array
What are the feature_coordinates of the signal being created.
Be aware of clipping: features far from the centre of the
brain will be clipped. If you wish to have multiple features
then list these as a features x 3 array. To create a feature of
a unique shape then supply all the individual
feature_coordinates of the shape and set the feature_size to 1.
feature_size : list, int
How big is the signal. If feature_coordinates=1 then only one value is
accepted, if feature_coordinates>1 then either one value must be
supplied or m values
feature_type : list, string
What feature_type of signal is being inserted? Options are cube,
loop, cavity, sphere. If feature_coordinates = 1 then
only one value is accepted, if feature_coordinates > 1 then either
one value must be supplied or m values
signal_magnitude : list, float
What is the (average) magnitude of the signal being generated? A
value of 1 means that the signal is one standard deviation from the
noise
signal_constant : list, bool
Is the signal constant across the feature (for univariate activity)
or is it a random pattern of a given magnitude across the feature (for
multivariate activity)
Returns
----------
volume_signal : 3 dimensional array, float
Creates a single volume containing the signal | [
"Generate",
"volume",
"containing",
"signal"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L306-L408 | train | 204,416 |
brainiak/brainiak | brainiak/utils/fmrisim.py | generate_stimfunction | def generate_stimfunction(onsets,
event_durations,
total_time,
weights=[1],
timing_file=None,
temporal_resolution=100.0,
):
"""Return the function for the timecourse events
When do stimuli onset, how long for and to what extent should you
resolve the fMRI time course. There are two ways to create this, either
by supplying onset, duration and weight information or by supplying a
timing file (in the three column format used by FSL).
Parameters
----------
onsets : list, int
What are the timestamps (in s) for when an event you want to
generate onsets?
event_durations : list, int
What are the durations (in s) of the events you want to
generate? If there is only one value then this will be assigned
to all onsets
total_time : int
How long (in s) is the experiment in total.
weights : list, float
What is the weight for each event (how high is the box car)? If
there is only one value then this will be assigned to all onsets
timing_file : string
The filename (with path) to a three column timing file (FSL) to
make the events. Still requires total_time to work
temporal_resolution : float
How many elements per second are you modeling for the
timecourse. This is useful when you want to model the HRF at an
arbitrarily high resolution (and then downsample to your TR later).
Returns
----------
stim_function : 1 by timepoint array, float
The time course of stimulus evoked activation. This has a temporal
resolution of temporal resolution / 1.0 elements per second
"""
# If the timing file is supplied then use this to acquire the
if timing_file is not None:
# Read in text file line by line
with open(timing_file) as f:
text = f.readlines() # Pull out file as a an array
# Preset
onsets = list()
event_durations = list()
weights = list()
# Pull out the onsets, weights and durations, set as a float
for line in text:
onset, duration, weight = line.strip().split()
# Check if the onset is more precise than the temporal resolution
upsampled_onset = float(onset) * temporal_resolution
# Because of float precision, the upsampled values might
# not round as expected .
# E.g. float('1.001') * 1000 = 1000.99
if np.allclose(upsampled_onset, np.round(upsampled_onset)) == 0:
warning = 'Your onset: ' + str(onset) + ' has more decimal ' \
'points than the ' \
'specified temporal ' \
'resolution can ' \
'resolve. This means' \
' that events might' \
' be missed. ' \
'Consider increasing' \
' the temporal ' \
'resolution.'
logger.warning(warning)
onsets.append(float(onset))
event_durations.append(float(duration))
weights.append(float(weight))
# If only one duration is supplied then duplicate it for the length of
# the onset variable
if len(event_durations) == 1:
event_durations = event_durations * len(onsets)
if len(weights) == 1:
weights = weights * len(onsets)
# Check files
if np.max(onsets) > total_time:
raise ValueError('Onsets outside of range of total time.')
# Generate the time course as empty, each element is a millisecond by
# default
stimfunction = np.zeros((int(round(total_time * temporal_resolution)), 1))
# Cycle through the onsets
for onset_counter in list(range(len(onsets))):
# Adjust for the resolution
onset_idx = int(np.floor(onsets[onset_counter] * temporal_resolution))
# Adjust for the resolution
offset_idx = int(np.floor((onsets[onset_counter] + event_durations[
onset_counter]) * temporal_resolution))
# Store the weights
stimfunction[onset_idx:offset_idx, 0] = [weights[onset_counter]]
return stimfunction | python | def generate_stimfunction(onsets,
event_durations,
total_time,
weights=[1],
timing_file=None,
temporal_resolution=100.0,
):
"""Return the function for the timecourse events
When do stimuli onset, how long for and to what extent should you
resolve the fMRI time course. There are two ways to create this, either
by supplying onset, duration and weight information or by supplying a
timing file (in the three column format used by FSL).
Parameters
----------
onsets : list, int
What are the timestamps (in s) for when an event you want to
generate onsets?
event_durations : list, int
What are the durations (in s) of the events you want to
generate? If there is only one value then this will be assigned
to all onsets
total_time : int
How long (in s) is the experiment in total.
weights : list, float
What is the weight for each event (how high is the box car)? If
there is only one value then this will be assigned to all onsets
timing_file : string
The filename (with path) to a three column timing file (FSL) to
make the events. Still requires total_time to work
temporal_resolution : float
How many elements per second are you modeling for the
timecourse. This is useful when you want to model the HRF at an
arbitrarily high resolution (and then downsample to your TR later).
Returns
----------
stim_function : 1 by timepoint array, float
The time course of stimulus evoked activation. This has a temporal
resolution of temporal resolution / 1.0 elements per second
"""
# If the timing file is supplied then use this to acquire the
if timing_file is not None:
# Read in text file line by line
with open(timing_file) as f:
text = f.readlines() # Pull out file as a an array
# Preset
onsets = list()
event_durations = list()
weights = list()
# Pull out the onsets, weights and durations, set as a float
for line in text:
onset, duration, weight = line.strip().split()
# Check if the onset is more precise than the temporal resolution
upsampled_onset = float(onset) * temporal_resolution
# Because of float precision, the upsampled values might
# not round as expected .
# E.g. float('1.001') * 1000 = 1000.99
if np.allclose(upsampled_onset, np.round(upsampled_onset)) == 0:
warning = 'Your onset: ' + str(onset) + ' has more decimal ' \
'points than the ' \
'specified temporal ' \
'resolution can ' \
'resolve. This means' \
' that events might' \
' be missed. ' \
'Consider increasing' \
' the temporal ' \
'resolution.'
logger.warning(warning)
onsets.append(float(onset))
event_durations.append(float(duration))
weights.append(float(weight))
# If only one duration is supplied then duplicate it for the length of
# the onset variable
if len(event_durations) == 1:
event_durations = event_durations * len(onsets)
if len(weights) == 1:
weights = weights * len(onsets)
# Check files
if np.max(onsets) > total_time:
raise ValueError('Onsets outside of range of total time.')
# Generate the time course as empty, each element is a millisecond by
# default
stimfunction = np.zeros((int(round(total_time * temporal_resolution)), 1))
# Cycle through the onsets
for onset_counter in list(range(len(onsets))):
# Adjust for the resolution
onset_idx = int(np.floor(onsets[onset_counter] * temporal_resolution))
# Adjust for the resolution
offset_idx = int(np.floor((onsets[onset_counter] + event_durations[
onset_counter]) * temporal_resolution))
# Store the weights
stimfunction[onset_idx:offset_idx, 0] = [weights[onset_counter]]
return stimfunction | [
"def",
"generate_stimfunction",
"(",
"onsets",
",",
"event_durations",
",",
"total_time",
",",
"weights",
"=",
"[",
"1",
"]",
",",
"timing_file",
"=",
"None",
",",
"temporal_resolution",
"=",
"100.0",
",",
")",
":",
"# If the timing file is supplied then use this to... | Return the function for the timecourse events
When do stimuli onset, how long for and to what extent should you
resolve the fMRI time course. There are two ways to create this, either
by supplying onset, duration and weight information or by supplying a
timing file (in the three column format used by FSL).
Parameters
----------
onsets : list, int
What are the timestamps (in s) for when an event you want to
generate onsets?
event_durations : list, int
What are the durations (in s) of the events you want to
generate? If there is only one value then this will be assigned
to all onsets
total_time : int
How long (in s) is the experiment in total.
weights : list, float
What is the weight for each event (how high is the box car)? If
there is only one value then this will be assigned to all onsets
timing_file : string
The filename (with path) to a three column timing file (FSL) to
make the events. Still requires total_time to work
temporal_resolution : float
How many elements per second are you modeling for the
timecourse. This is useful when you want to model the HRF at an
arbitrarily high resolution (and then downsample to your TR later).
Returns
----------
stim_function : 1 by timepoint array, float
The time course of stimulus evoked activation. This has a temporal
resolution of temporal resolution / 1.0 elements per second | [
"Return",
"the",
"function",
"for",
"the",
"timecourse",
"events"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L411-L529 | train | 204,417 |
brainiak/brainiak | brainiak/utils/fmrisim.py | export_3_column | def export_3_column(stimfunction,
filename,
temporal_resolution=100.0
):
""" Output a tab separated three column timing file
This produces a three column tab separated text file, with the three
columns representing onset time (s), event duration (s) and weight,
respectively. Useful if you want to run the simulated data through FEAT
analyses. In a way, this is the reverse of generate_stimfunction
Parameters
----------
stimfunction : timepoint by 1 array
The stimulus function describing the time course of events. For
instance output from generate_stimfunction.
filename : str
The name of the three column text file to be output
temporal_resolution : float
How many elements per second are you modeling with the
stimfunction?
"""
# Iterate through the stim function
stim_counter = 0
event_counter = 0
while stim_counter < stimfunction.shape[0]:
# Is it an event?
if stimfunction[stim_counter, 0] != 0:
# When did the event start?
event_onset = str(stim_counter / temporal_resolution)
# The weight of the stimulus
weight = str(stimfunction[stim_counter, 0])
# Reset
event_duration = 0
# Is the event still ongoing?
while stimfunction[stim_counter, 0] != 0 & stim_counter <= \
stimfunction.shape[0]:
# Add one millisecond to each duration
event_duration = event_duration + 1
# Increment
stim_counter = stim_counter + 1
# How long was the event in seconds
event_duration = str(event_duration / temporal_resolution)
# Append this row to the data file
with open(filename, "a") as file:
file.write(event_onset + '\t' + event_duration + '\t' +
weight + '\n')
# Increment the number of events
event_counter = event_counter + 1
# Increment
stim_counter = stim_counter + 1 | python | def export_3_column(stimfunction,
filename,
temporal_resolution=100.0
):
""" Output a tab separated three column timing file
This produces a three column tab separated text file, with the three
columns representing onset time (s), event duration (s) and weight,
respectively. Useful if you want to run the simulated data through FEAT
analyses. In a way, this is the reverse of generate_stimfunction
Parameters
----------
stimfunction : timepoint by 1 array
The stimulus function describing the time course of events. For
instance output from generate_stimfunction.
filename : str
The name of the three column text file to be output
temporal_resolution : float
How many elements per second are you modeling with the
stimfunction?
"""
# Iterate through the stim function
stim_counter = 0
event_counter = 0
while stim_counter < stimfunction.shape[0]:
# Is it an event?
if stimfunction[stim_counter, 0] != 0:
# When did the event start?
event_onset = str(stim_counter / temporal_resolution)
# The weight of the stimulus
weight = str(stimfunction[stim_counter, 0])
# Reset
event_duration = 0
# Is the event still ongoing?
while stimfunction[stim_counter, 0] != 0 & stim_counter <= \
stimfunction.shape[0]:
# Add one millisecond to each duration
event_duration = event_duration + 1
# Increment
stim_counter = stim_counter + 1
# How long was the event in seconds
event_duration = str(event_duration / temporal_resolution)
# Append this row to the data file
with open(filename, "a") as file:
file.write(event_onset + '\t' + event_duration + '\t' +
weight + '\n')
# Increment the number of events
event_counter = event_counter + 1
# Increment
stim_counter = stim_counter + 1 | [
"def",
"export_3_column",
"(",
"stimfunction",
",",
"filename",
",",
"temporal_resolution",
"=",
"100.0",
")",
":",
"# Iterate through the stim function",
"stim_counter",
"=",
"0",
"event_counter",
"=",
"0",
"while",
"stim_counter",
"<",
"stimfunction",
".",
"shape",
... | Output a tab separated three column timing file
This produces a three column tab separated text file, with the three
columns representing onset time (s), event duration (s) and weight,
respectively. Useful if you want to run the simulated data through FEAT
analyses. In a way, this is the reverse of generate_stimfunction
Parameters
----------
stimfunction : timepoint by 1 array
The stimulus function describing the time course of events. For
instance output from generate_stimfunction.
filename : str
The name of the three column text file to be output
temporal_resolution : float
How many elements per second are you modeling with the
stimfunction? | [
"Output",
"a",
"tab",
"separated",
"three",
"column",
"timing",
"file"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L532-L598 | train | 204,418 |
brainiak/brainiak | brainiak/utils/fmrisim.py | export_epoch_file | def export_epoch_file(stimfunction,
filename,
tr_duration,
temporal_resolution=100.0
):
""" Output an epoch file, necessary for some inputs into brainiak
This takes in the time course of stimulus events and outputs the epoch
file used in Brainiak. The epoch file is a way to structure the timing
information in fMRI that allows you to flexibly input different stimulus
sequences. This is a list with each entry a 3d matrix corresponding to a
participant. The dimensions of the 3d matrix are condition by epoch by
time. For the i-th condition, if its k-th epoch spans time points t_m to
t_n-1, then [i, k, t_m:t_n] are 1 in the epoch file.
Parameters
----------
stimfunction : list of timepoint by condition arrays
The stimulus function describing the time course of events. Each
list entry is from a different participant, each row is a different
timepoint (with the given temporal precision), each column is a
different condition. export_epoch_file is looking for differences in
the value of stimfunction to identify the start and end of an
epoch. If epochs in stimfunction are coded with the same weight and
there is no time between blocks then export_epoch_file won't be able to
label them as different epochs
filename : str
The name of the epoch file to be output
tr_duration : float
How long is each TR in seconds
temporal_resolution : float
How many elements per second are you modeling with the
stimfunction?
"""
# Cycle through the participants, different entries in the list
epoch_file = [0] * len(stimfunction)
for ppt_counter in range(len(stimfunction)):
# What is the time course for the participant (binarized)
stimfunction_ppt = np.abs(stimfunction[ppt_counter]) > 0
# Down sample the stim function
stride = tr_duration * temporal_resolution
stimfunction_downsampled = stimfunction_ppt[::int(stride), :]
# Calculates the number of event onsets. This uses changes in value
# to reflect different epochs. This might be false in some cases (the
# weight is non-uniform over an epoch or there is no break between
# identically weighted epochs).
epochs = 0 # Preset
conditions = stimfunction_ppt.shape[1]
for condition_counter in range(conditions):
weight_change = (np.diff(stimfunction_downsampled[:,
condition_counter], 1, 0) != 0)
# If the first or last events are 'on' then make these
# represent a epoch change
if stimfunction_downsampled[0, condition_counter] == 1:
weight_change[0] = True
if stimfunction_downsampled[-1, condition_counter] == 1:
weight_change[-1] = True
epochs += int(np.max(np.sum(weight_change, 0)) / 2)
# Get other information
trs = stimfunction_downsampled.shape[0]
# Make a timing file for this participant
epoch_file[ppt_counter] = np.zeros((conditions, epochs, trs))
# Cycle through conditions
epoch_counter = 0 # Reset and count across conditions
tr_counter = 0
while tr_counter < stimfunction_downsampled.shape[0]:
for condition_counter in range(conditions):
# Is it an event?
if tr_counter < stimfunction_downsampled.shape[0] and \
stimfunction_downsampled[
tr_counter, condition_counter] == 1:
# Add a one for this TR
epoch_file[ppt_counter][condition_counter,
epoch_counter, tr_counter] = 1
# Find the next non event value
end_idx = np.where(stimfunction_downsampled[tr_counter:,
condition_counter] == 0)[
0][0]
tr_idxs = list(range(tr_counter, tr_counter + end_idx))
# Add ones to all the trs within this event time frame
epoch_file[ppt_counter][condition_counter,
epoch_counter, tr_idxs] = 1
# Start from this index
tr_counter += end_idx
# Increment
epoch_counter += 1
# Increment the counter
tr_counter += 1
# Convert to boolean
epoch_file[ppt_counter] = epoch_file[ppt_counter].astype('bool')
# Save the file
np.save(filename, epoch_file) | python | def export_epoch_file(stimfunction,
filename,
tr_duration,
temporal_resolution=100.0
):
""" Output an epoch file, necessary for some inputs into brainiak
This takes in the time course of stimulus events and outputs the epoch
file used in Brainiak. The epoch file is a way to structure the timing
information in fMRI that allows you to flexibly input different stimulus
sequences. This is a list with each entry a 3d matrix corresponding to a
participant. The dimensions of the 3d matrix are condition by epoch by
time. For the i-th condition, if its k-th epoch spans time points t_m to
t_n-1, then [i, k, t_m:t_n] are 1 in the epoch file.
Parameters
----------
stimfunction : list of timepoint by condition arrays
The stimulus function describing the time course of events. Each
list entry is from a different participant, each row is a different
timepoint (with the given temporal precision), each column is a
different condition. export_epoch_file is looking for differences in
the value of stimfunction to identify the start and end of an
epoch. If epochs in stimfunction are coded with the same weight and
there is no time between blocks then export_epoch_file won't be able to
label them as different epochs
filename : str
The name of the epoch file to be output
tr_duration : float
How long is each TR in seconds
temporal_resolution : float
How many elements per second are you modeling with the
stimfunction?
"""
# Cycle through the participants, different entries in the list
epoch_file = [0] * len(stimfunction)
for ppt_counter in range(len(stimfunction)):
# What is the time course for the participant (binarized)
stimfunction_ppt = np.abs(stimfunction[ppt_counter]) > 0
# Down sample the stim function
stride = tr_duration * temporal_resolution
stimfunction_downsampled = stimfunction_ppt[::int(stride), :]
# Calculates the number of event onsets. This uses changes in value
# to reflect different epochs. This might be false in some cases (the
# weight is non-uniform over an epoch or there is no break between
# identically weighted epochs).
epochs = 0 # Preset
conditions = stimfunction_ppt.shape[1]
for condition_counter in range(conditions):
weight_change = (np.diff(stimfunction_downsampled[:,
condition_counter], 1, 0) != 0)
# If the first or last events are 'on' then make these
# represent a epoch change
if stimfunction_downsampled[0, condition_counter] == 1:
weight_change[0] = True
if stimfunction_downsampled[-1, condition_counter] == 1:
weight_change[-1] = True
epochs += int(np.max(np.sum(weight_change, 0)) / 2)
# Get other information
trs = stimfunction_downsampled.shape[0]
# Make a timing file for this participant
epoch_file[ppt_counter] = np.zeros((conditions, epochs, trs))
# Cycle through conditions
epoch_counter = 0 # Reset and count across conditions
tr_counter = 0
while tr_counter < stimfunction_downsampled.shape[0]:
for condition_counter in range(conditions):
# Is it an event?
if tr_counter < stimfunction_downsampled.shape[0] and \
stimfunction_downsampled[
tr_counter, condition_counter] == 1:
# Add a one for this TR
epoch_file[ppt_counter][condition_counter,
epoch_counter, tr_counter] = 1
# Find the next non event value
end_idx = np.where(stimfunction_downsampled[tr_counter:,
condition_counter] == 0)[
0][0]
tr_idxs = list(range(tr_counter, tr_counter + end_idx))
# Add ones to all the trs within this event time frame
epoch_file[ppt_counter][condition_counter,
epoch_counter, tr_idxs] = 1
# Start from this index
tr_counter += end_idx
# Increment
epoch_counter += 1
# Increment the counter
tr_counter += 1
# Convert to boolean
epoch_file[ppt_counter] = epoch_file[ppt_counter].astype('bool')
# Save the file
np.save(filename, epoch_file) | [
"def",
"export_epoch_file",
"(",
"stimfunction",
",",
"filename",
",",
"tr_duration",
",",
"temporal_resolution",
"=",
"100.0",
")",
":",
"# Cycle through the participants, different entries in the list",
"epoch_file",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"stimfunction"... | Output an epoch file, necessary for some inputs into brainiak
This takes in the time course of stimulus events and outputs the epoch
file used in Brainiak. The epoch file is a way to structure the timing
information in fMRI that allows you to flexibly input different stimulus
sequences. This is a list with each entry a 3d matrix corresponding to a
participant. The dimensions of the 3d matrix are condition by epoch by
time. For the i-th condition, if its k-th epoch spans time points t_m to
t_n-1, then [i, k, t_m:t_n] are 1 in the epoch file.
Parameters
----------
stimfunction : list of timepoint by condition arrays
The stimulus function describing the time course of events. Each
list entry is from a different participant, each row is a different
timepoint (with the given temporal precision), each column is a
different condition. export_epoch_file is looking for differences in
the value of stimfunction to identify the start and end of an
epoch. If epochs in stimfunction are coded with the same weight and
there is no time between blocks then export_epoch_file won't be able to
label them as different epochs
filename : str
The name of the epoch file to be output
tr_duration : float
How long is each TR in seconds
temporal_resolution : float
How many elements per second are you modeling with the
stimfunction? | [
"Output",
"an",
"epoch",
"file",
"necessary",
"for",
"some",
"inputs",
"into",
"brainiak"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L601-L716 | train | 204,419 |
brainiak/brainiak | brainiak/utils/fmrisim.py | _double_gamma_hrf | def _double_gamma_hrf(response_delay=6,
undershoot_delay=12,
response_dispersion=0.9,
undershoot_dispersion=0.9,
response_scale=1,
undershoot_scale=0.035,
temporal_resolution=100.0,
):
"""Create the double gamma HRF with the timecourse evoked activity.
Default values are based on Glover, 1999 and Walvaert, Durnez,
Moerkerke, Verdoolaege and Rosseel, 2011
Parameters
----------
response_delay : float
How many seconds until the peak of the HRF
undershoot_delay : float
How many seconds until the trough of the HRF
response_dispersion : float
How wide is the rising peak dispersion
undershoot_dispersion : float
How wide is the undershoot dispersion
response_scale : float
How big is the response relative to the peak
undershoot_scale :float
How big is the undershoot relative to the trough
scale_function : bool
Do you want to scale the function to a range of 1
temporal_resolution : float
How many elements per second are you modeling for the stimfunction
Returns
----------
hrf : multi dimensional array
A double gamma HRF to be used for convolution.
"""
hrf_length = 30 # How long is the HRF being created
# How many seconds of the HRF will you model?
hrf = [0] * int(hrf_length * temporal_resolution)
# When is the peak of the two aspects of the HRF
response_peak = response_delay * response_dispersion
undershoot_peak = undershoot_delay * undershoot_dispersion
for hrf_counter in list(range(len(hrf) - 1)):
# Specify the elements of the HRF for both the response and undershoot
resp_pow = math.pow((hrf_counter / temporal_resolution) /
response_peak, response_delay)
resp_exp = math.exp(-((hrf_counter / temporal_resolution) -
response_peak) /
response_dispersion)
response_model = response_scale * resp_pow * resp_exp
undershoot_pow = math.pow((hrf_counter / temporal_resolution) /
undershoot_peak,
undershoot_delay)
undershoot_exp = math.exp(-((hrf_counter / temporal_resolution) -
undershoot_peak /
undershoot_dispersion))
undershoot_model = undershoot_scale * undershoot_pow * undershoot_exp
# For this time point find the value of the HRF
hrf[hrf_counter] = response_model - undershoot_model
return hrf | python | def _double_gamma_hrf(response_delay=6,
undershoot_delay=12,
response_dispersion=0.9,
undershoot_dispersion=0.9,
response_scale=1,
undershoot_scale=0.035,
temporal_resolution=100.0,
):
"""Create the double gamma HRF with the timecourse evoked activity.
Default values are based on Glover, 1999 and Walvaert, Durnez,
Moerkerke, Verdoolaege and Rosseel, 2011
Parameters
----------
response_delay : float
How many seconds until the peak of the HRF
undershoot_delay : float
How many seconds until the trough of the HRF
response_dispersion : float
How wide is the rising peak dispersion
undershoot_dispersion : float
How wide is the undershoot dispersion
response_scale : float
How big is the response relative to the peak
undershoot_scale :float
How big is the undershoot relative to the trough
scale_function : bool
Do you want to scale the function to a range of 1
temporal_resolution : float
How many elements per second are you modeling for the stimfunction
Returns
----------
hrf : multi dimensional array
A double gamma HRF to be used for convolution.
"""
hrf_length = 30 # How long is the HRF being created
# How many seconds of the HRF will you model?
hrf = [0] * int(hrf_length * temporal_resolution)
# When is the peak of the two aspects of the HRF
response_peak = response_delay * response_dispersion
undershoot_peak = undershoot_delay * undershoot_dispersion
for hrf_counter in list(range(len(hrf) - 1)):
# Specify the elements of the HRF for both the response and undershoot
resp_pow = math.pow((hrf_counter / temporal_resolution) /
response_peak, response_delay)
resp_exp = math.exp(-((hrf_counter / temporal_resolution) -
response_peak) /
response_dispersion)
response_model = response_scale * resp_pow * resp_exp
undershoot_pow = math.pow((hrf_counter / temporal_resolution) /
undershoot_peak,
undershoot_delay)
undershoot_exp = math.exp(-((hrf_counter / temporal_resolution) -
undershoot_peak /
undershoot_dispersion))
undershoot_model = undershoot_scale * undershoot_pow * undershoot_exp
# For this time point find the value of the HRF
hrf[hrf_counter] = response_model - undershoot_model
return hrf | [
"def",
"_double_gamma_hrf",
"(",
"response_delay",
"=",
"6",
",",
"undershoot_delay",
"=",
"12",
",",
"response_dispersion",
"=",
"0.9",
",",
"undershoot_dispersion",
"=",
"0.9",
",",
"response_scale",
"=",
"1",
",",
"undershoot_scale",
"=",
"0.035",
",",
"tempo... | Create the double gamma HRF with the timecourse evoked activity.
Default values are based on Glover, 1999 and Walvaert, Durnez,
Moerkerke, Verdoolaege and Rosseel, 2011
Parameters
----------
response_delay : float
How many seconds until the peak of the HRF
undershoot_delay : float
How many seconds until the trough of the HRF
response_dispersion : float
How wide is the rising peak dispersion
undershoot_dispersion : float
How wide is the undershoot dispersion
response_scale : float
How big is the response relative to the peak
undershoot_scale :float
How big is the undershoot relative to the trough
scale_function : bool
Do you want to scale the function to a range of 1
temporal_resolution : float
How many elements per second are you modeling for the stimfunction
Returns
----------
hrf : multi dimensional array
A double gamma HRF to be used for convolution. | [
"Create",
"the",
"double",
"gamma",
"HRF",
"with",
"the",
"timecourse",
"evoked",
"activity",
".",
"Default",
"values",
"are",
"based",
"on",
"Glover",
"1999",
"and",
"Walvaert",
"Durnez",
"Moerkerke",
"Verdoolaege",
"and",
"Rosseel",
"2011"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L719-L797 | train | 204,420 |
brainiak/brainiak | brainiak/utils/fmrisim.py | apply_signal | def apply_signal(signal_function,
volume_signal,
):
"""Combine the signal volume with its timecourse
Apply the convolution of the HRF and stimulus time course to the
volume.
Parameters
----------
signal_function : timepoint by timecourse array, float
The timecourse of the signal over time. If there is only one column
then the same timecourse is applied to all non-zero voxels in
volume_signal. If there is more than one column then each column is
paired with a non-zero voxel in the volume_signal (a 3d numpy array
generated in generate_signal).
volume_signal : multi dimensional array, float
The volume containing the signal to be convolved with the same
dimensions as the output volume. The elements in volume_signal
indicate how strong each signal in signal_function are modulated by
in the output volume
Returns
----------
signal : multidimensional array, float
The convolved signal volume with the same 3d as volume signal and
the same 4th dimension as signal_function
"""
# How many timecourses are there within the signal_function
timepoints = signal_function.shape[0]
timecourses = signal_function.shape[1]
# Preset volume
signal = np.zeros([volume_signal.shape[0], volume_signal.shape[
1], volume_signal.shape[2], timepoints])
# Find all the non-zero voxels in the brain
idxs = np.where(volume_signal != 0)
if timecourses == 1:
# If there is only one time course supplied then duplicate it for
# every voxel
signal_function = np.matlib.repmat(signal_function, 1, len(idxs[0]))
elif len(idxs[0]) != timecourses:
raise IndexError('The number of non-zero voxels in the volume and '
'the number of timecourses does not match. Aborting')
# For each coordinate with a non zero voxel, fill in the timecourse for
# that voxel
for idx_counter in range(len(idxs[0])):
x = idxs[0][idx_counter]
y = idxs[1][idx_counter]
z = idxs[2][idx_counter]
# Pull out the function for this voxel
signal_function_temp = signal_function[:, idx_counter]
# Multiply the voxel value by the function timecourse
signal[x, y, z, :] = volume_signal[x, y, z] * signal_function_temp
return signal | python | def apply_signal(signal_function,
volume_signal,
):
"""Combine the signal volume with its timecourse
Apply the convolution of the HRF and stimulus time course to the
volume.
Parameters
----------
signal_function : timepoint by timecourse array, float
The timecourse of the signal over time. If there is only one column
then the same timecourse is applied to all non-zero voxels in
volume_signal. If there is more than one column then each column is
paired with a non-zero voxel in the volume_signal (a 3d numpy array
generated in generate_signal).
volume_signal : multi dimensional array, float
The volume containing the signal to be convolved with the same
dimensions as the output volume. The elements in volume_signal
indicate how strong each signal in signal_function are modulated by
in the output volume
Returns
----------
signal : multidimensional array, float
The convolved signal volume with the same 3d as volume signal and
the same 4th dimension as signal_function
"""
# How many timecourses are there within the signal_function
timepoints = signal_function.shape[0]
timecourses = signal_function.shape[1]
# Preset volume
signal = np.zeros([volume_signal.shape[0], volume_signal.shape[
1], volume_signal.shape[2], timepoints])
# Find all the non-zero voxels in the brain
idxs = np.where(volume_signal != 0)
if timecourses == 1:
# If there is only one time course supplied then duplicate it for
# every voxel
signal_function = np.matlib.repmat(signal_function, 1, len(idxs[0]))
elif len(idxs[0]) != timecourses:
raise IndexError('The number of non-zero voxels in the volume and '
'the number of timecourses does not match. Aborting')
# For each coordinate with a non zero voxel, fill in the timecourse for
# that voxel
for idx_counter in range(len(idxs[0])):
x = idxs[0][idx_counter]
y = idxs[1][idx_counter]
z = idxs[2][idx_counter]
# Pull out the function for this voxel
signal_function_temp = signal_function[:, idx_counter]
# Multiply the voxel value by the function timecourse
signal[x, y, z, :] = volume_signal[x, y, z] * signal_function_temp
return signal | [
"def",
"apply_signal",
"(",
"signal_function",
",",
"volume_signal",
",",
")",
":",
"# How many timecourses are there within the signal_function",
"timepoints",
"=",
"signal_function",
".",
"shape",
"[",
"0",
"]",
"timecourses",
"=",
"signal_function",
".",
"shape",
"["... | Combine the signal volume with its timecourse
Apply the convolution of the HRF and stimulus time course to the
volume.
Parameters
----------
signal_function : timepoint by timecourse array, float
The timecourse of the signal over time. If there is only one column
then the same timecourse is applied to all non-zero voxels in
volume_signal. If there is more than one column then each column is
paired with a non-zero voxel in the volume_signal (a 3d numpy array
generated in generate_signal).
volume_signal : multi dimensional array, float
The volume containing the signal to be convolved with the same
dimensions as the output volume. The elements in volume_signal
indicate how strong each signal in signal_function are modulated by
in the output volume
Returns
----------
signal : multidimensional array, float
The convolved signal volume with the same 3d as volume signal and
the same 4th dimension as signal_function | [
"Combine",
"the",
"signal",
"volume",
"with",
"its",
"timecourse"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L896-L961 | train | 204,421 |
brainiak/brainiak | brainiak/utils/fmrisim.py | _calc_sfnr | def _calc_sfnr(volume,
mask,
):
""" Calculate the the SFNR of a volume
Calculates the Signal to Fluctuation Noise Ratio, the mean divided
by the detrended standard deviation of each brain voxel. Based on
Friedman and Glover, 2006
Parameters
----------
volume : 4d array, float
Take a volume time series
mask : 3d array, binary
A binary mask the same size as the volume
Returns
-------
snr : float
The SFNR of the volume
"""
# Make a matrix of brain voxels by time
brain_voxels = volume[mask > 0]
# Take the means of each voxel over time
mean_voxels = np.nanmean(brain_voxels, 1)
# Detrend (second order polynomial) the voxels over time and then
# calculate the standard deviation.
order = 2
seq = np.linspace(1, brain_voxels.shape[1], brain_voxels.shape[1])
detrend_poly = np.polyfit(seq, brain_voxels.transpose(), order)
# Detrend for each voxel
detrend_voxels = np.zeros(brain_voxels.shape)
for voxel in range(brain_voxels.shape[0]):
trend = detrend_poly[0, voxel] * seq ** 2 + detrend_poly[1, voxel] * \
seq + detrend_poly[2, voxel]
detrend_voxels[voxel, :] = brain_voxels[voxel, :] - trend
std_voxels = np.nanstd(detrend_voxels, 1)
# Calculate the sfnr of all voxels across the brain
sfnr_voxels = mean_voxels / std_voxels
# Return the average sfnr
return np.mean(sfnr_voxels) | python | def _calc_sfnr(volume,
mask,
):
""" Calculate the the SFNR of a volume
Calculates the Signal to Fluctuation Noise Ratio, the mean divided
by the detrended standard deviation of each brain voxel. Based on
Friedman and Glover, 2006
Parameters
----------
volume : 4d array, float
Take a volume time series
mask : 3d array, binary
A binary mask the same size as the volume
Returns
-------
snr : float
The SFNR of the volume
"""
# Make a matrix of brain voxels by time
brain_voxels = volume[mask > 0]
# Take the means of each voxel over time
mean_voxels = np.nanmean(brain_voxels, 1)
# Detrend (second order polynomial) the voxels over time and then
# calculate the standard deviation.
order = 2
seq = np.linspace(1, brain_voxels.shape[1], brain_voxels.shape[1])
detrend_poly = np.polyfit(seq, brain_voxels.transpose(), order)
# Detrend for each voxel
detrend_voxels = np.zeros(brain_voxels.shape)
for voxel in range(brain_voxels.shape[0]):
trend = detrend_poly[0, voxel] * seq ** 2 + detrend_poly[1, voxel] * \
seq + detrend_poly[2, voxel]
detrend_voxels[voxel, :] = brain_voxels[voxel, :] - trend
std_voxels = np.nanstd(detrend_voxels, 1)
# Calculate the sfnr of all voxels across the brain
sfnr_voxels = mean_voxels / std_voxels
# Return the average sfnr
return np.mean(sfnr_voxels) | [
"def",
"_calc_sfnr",
"(",
"volume",
",",
"mask",
",",
")",
":",
"# Make a matrix of brain voxels by time",
"brain_voxels",
"=",
"volume",
"[",
"mask",
">",
"0",
"]",
"# Take the means of each voxel over time",
"mean_voxels",
"=",
"np",
".",
"nanmean",
"(",
"brain_vo... | Calculate the the SFNR of a volume
Calculates the Signal to Fluctuation Noise Ratio, the mean divided
by the detrended standard deviation of each brain voxel. Based on
Friedman and Glover, 2006
Parameters
----------
volume : 4d array, float
Take a volume time series
mask : 3d array, binary
A binary mask the same size as the volume
Returns
-------
snr : float
The SFNR of the volume | [
"Calculate",
"the",
"the",
"SFNR",
"of",
"a",
"volume",
"Calculates",
"the",
"Signal",
"to",
"Fluctuation",
"Noise",
"Ratio",
"the",
"mean",
"divided",
"by",
"the",
"detrended",
"standard",
"deviation",
"of",
"each",
"brain",
"voxel",
".",
"Based",
"on",
"Fr... | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L1072-L1122 | train | 204,422 |
brainiak/brainiak | brainiak/utils/fmrisim.py | _calc_snr | def _calc_snr(volume,
mask,
dilation=5,
reference_tr=None,
):
""" Calculate the the SNR of a volume
Calculates the Signal to Noise Ratio, the mean of brain voxels
divided by the standard deviation across non-brain voxels. Specify a TR
value to calculate the mean and standard deviation for that TR. To
calculate the standard deviation of non-brain voxels we can subtract
any baseline structure away first, hence getting at deviations due to the
system noise and not something like high baseline values in non-brain
parts of the body.
Parameters
----------
volume : 4d array, float
Take a volume time series
mask : 3d array, binary
A binary mask the same size as the volume
dilation : int
How many binary dilations do you want to perform on the mask to
determine the non-brain voxels. If you increase this the SNR
increases and the non-brain voxels (after baseline subtraction) more
closely resemble a gaussian
reference_tr : int or list
Specifies the TR to calculate the SNR for. If multiple are supplied
then it will use the average of them.
Returns
-------
snr : float
The SNR of the volume
"""
# If no TR is specified then take all of them
if reference_tr is None:
reference_tr = list(range(volume.shape[3]))
# Dilate the mask in order to ensure that non-brain voxels are far from
# the brain
if dilation > 0:
mask_dilated = ndimage.morphology.binary_dilation(mask,
iterations=dilation)
else:
mask_dilated = mask
# Make a matrix of brain and non_brain voxels, selecting the timepoint/s
brain_voxels = volume[mask > 0][:, reference_tr]
nonbrain_voxels = (volume[:, :, :, reference_tr]).astype('float64')
# If you have multiple TRs
if len(brain_voxels.shape) > 1:
brain_voxels = np.mean(brain_voxels, 1)
nonbrain_voxels = np.mean(nonbrain_voxels, 3)
nonbrain_voxels = nonbrain_voxels[mask_dilated == 0]
# Take the means of each voxel over time
mean_voxels = np.nanmean(brain_voxels)
# Find the standard deviation of the voxels
std_voxels = np.nanstd(nonbrain_voxels)
# Return the snr
return mean_voxels / std_voxels | python | def _calc_snr(volume,
mask,
dilation=5,
reference_tr=None,
):
""" Calculate the the SNR of a volume
Calculates the Signal to Noise Ratio, the mean of brain voxels
divided by the standard deviation across non-brain voxels. Specify a TR
value to calculate the mean and standard deviation for that TR. To
calculate the standard deviation of non-brain voxels we can subtract
any baseline structure away first, hence getting at deviations due to the
system noise and not something like high baseline values in non-brain
parts of the body.
Parameters
----------
volume : 4d array, float
Take a volume time series
mask : 3d array, binary
A binary mask the same size as the volume
dilation : int
How many binary dilations do you want to perform on the mask to
determine the non-brain voxels. If you increase this the SNR
increases and the non-brain voxels (after baseline subtraction) more
closely resemble a gaussian
reference_tr : int or list
Specifies the TR to calculate the SNR for. If multiple are supplied
then it will use the average of them.
Returns
-------
snr : float
The SNR of the volume
"""
# If no TR is specified then take all of them
if reference_tr is None:
reference_tr = list(range(volume.shape[3]))
# Dilate the mask in order to ensure that non-brain voxels are far from
# the brain
if dilation > 0:
mask_dilated = ndimage.morphology.binary_dilation(mask,
iterations=dilation)
else:
mask_dilated = mask
# Make a matrix of brain and non_brain voxels, selecting the timepoint/s
brain_voxels = volume[mask > 0][:, reference_tr]
nonbrain_voxels = (volume[:, :, :, reference_tr]).astype('float64')
# If you have multiple TRs
if len(brain_voxels.shape) > 1:
brain_voxels = np.mean(brain_voxels, 1)
nonbrain_voxels = np.mean(nonbrain_voxels, 3)
nonbrain_voxels = nonbrain_voxels[mask_dilated == 0]
# Take the means of each voxel over time
mean_voxels = np.nanmean(brain_voxels)
# Find the standard deviation of the voxels
std_voxels = np.nanstd(nonbrain_voxels)
# Return the snr
return mean_voxels / std_voxels | [
"def",
"_calc_snr",
"(",
"volume",
",",
"mask",
",",
"dilation",
"=",
"5",
",",
"reference_tr",
"=",
"None",
",",
")",
":",
"# If no TR is specified then take all of them",
"if",
"reference_tr",
"is",
"None",
":",
"reference_tr",
"=",
"list",
"(",
"range",
"("... | Calculate the the SNR of a volume
Calculates the Signal to Noise Ratio, the mean of brain voxels
divided by the standard deviation across non-brain voxels. Specify a TR
value to calculate the mean and standard deviation for that TR. To
calculate the standard deviation of non-brain voxels we can subtract
any baseline structure away first, hence getting at deviations due to the
system noise and not something like high baseline values in non-brain
parts of the body.
Parameters
----------
volume : 4d array, float
Take a volume time series
mask : 3d array, binary
A binary mask the same size as the volume
dilation : int
How many binary dilations do you want to perform on the mask to
determine the non-brain voxels. If you increase this the SNR
increases and the non-brain voxels (after baseline subtraction) more
closely resemble a gaussian
reference_tr : int or list
Specifies the TR to calculate the SNR for. If multiple are supplied
then it will use the average of them.
Returns
-------
snr : float
The SNR of the volume | [
"Calculate",
"the",
"the",
"SNR",
"of",
"a",
"volume",
"Calculates",
"the",
"Signal",
"to",
"Noise",
"Ratio",
"the",
"mean",
"of",
"brain",
"voxels",
"divided",
"by",
"the",
"standard",
"deviation",
"across",
"non",
"-",
"brain",
"voxels",
".",
"Specify",
... | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L1125-L1196 | train | 204,423 |
brainiak/brainiak | brainiak/utils/fmrisim.py | _calc_ARMA_noise | def _calc_ARMA_noise(volume,
mask,
auto_reg_order=1,
ma_order=1,
sample_num=100,
):
""" Calculate the the ARMA noise of a volume
This calculates the autoregressive and moving average noise of the volume
over time by sampling brain voxels and averaging them.
Parameters
----------
volume : 4d array or 1d array, float
Take a volume time series to extract the middle slice from the
middle TR. Can also accept a one dimensional time course (mask input
is then ignored).
mask : 3d array, binary
A binary mask the same size as the volume
auto_reg_order : int
What order of the autoregression do you want to estimate
sample_num : int
How many voxels would you like to sample to calculate the AR values.
The AR distribution of real data is approximately exponential maxing
at 1. From analyses across a number of participants, to get less
than 3% standard deviation of error from the true mean it is
necessary to sample at least 100 voxels.
Returns
-------
auto_reg_rho : list of floats
Rho of a specific order for the autoregression noise in the data
na_rho : list of floats
Moving average of a specific order for the data
"""
# Pull out the non masked voxels
if len(volume.shape) > 1:
brain_timecourse = volume[mask > 0]
else:
# If a 1 dimensional input is supplied then reshape it to make the
# timecourse
brain_timecourse = volume.reshape(1, len(volume))
# Identify some brain voxels to assess
voxel_idxs = list(range(brain_timecourse.shape[0]))
np.random.shuffle(voxel_idxs)
# If there are more samples than voxels, take all of the voxels
if len(voxel_idxs) < sample_num:
sample_num = len(voxel_idxs)
auto_reg_rho_all = np.zeros((sample_num, auto_reg_order))
ma_all = np.zeros((sample_num, ma_order))
for voxel_counter in range(sample_num):
# Get the timecourse and demean it
timecourse = brain_timecourse[voxel_idxs[voxel_counter], :]
demeaned_timecourse = timecourse - timecourse.mean()
# Pull out the ARMA values (depends on order)
try:
model = ARMA(demeaned_timecourse, [auto_reg_order, ma_order])
model_fit = model.fit(disp=False)
params = model_fit.params
except (ValueError, LinAlgError):
params = np.ones(auto_reg_order + ma_order + 1) * np.nan
# Add to the list
auto_reg_rho_all[voxel_counter, :] = params[1:auto_reg_order + 1]
ma_all[voxel_counter, :] = params[auto_reg_order + 1:]
# Average all of the values and then convert them to a list
auto_reg_rho = np.nanmean(auto_reg_rho_all, 0).tolist()
ma_rho = np.nanmean(ma_all, 0).tolist()
# Return the coefficients
return auto_reg_rho, ma_rho | python | def _calc_ARMA_noise(volume,
mask,
auto_reg_order=1,
ma_order=1,
sample_num=100,
):
""" Calculate the the ARMA noise of a volume
This calculates the autoregressive and moving average noise of the volume
over time by sampling brain voxels and averaging them.
Parameters
----------
volume : 4d array or 1d array, float
Take a volume time series to extract the middle slice from the
middle TR. Can also accept a one dimensional time course (mask input
is then ignored).
mask : 3d array, binary
A binary mask the same size as the volume
auto_reg_order : int
What order of the autoregression do you want to estimate
sample_num : int
How many voxels would you like to sample to calculate the AR values.
The AR distribution of real data is approximately exponential maxing
at 1. From analyses across a number of participants, to get less
than 3% standard deviation of error from the true mean it is
necessary to sample at least 100 voxels.
Returns
-------
auto_reg_rho : list of floats
Rho of a specific order for the autoregression noise in the data
na_rho : list of floats
Moving average of a specific order for the data
"""
# Pull out the non masked voxels
if len(volume.shape) > 1:
brain_timecourse = volume[mask > 0]
else:
# If a 1 dimensional input is supplied then reshape it to make the
# timecourse
brain_timecourse = volume.reshape(1, len(volume))
# Identify some brain voxels to assess
voxel_idxs = list(range(brain_timecourse.shape[0]))
np.random.shuffle(voxel_idxs)
# If there are more samples than voxels, take all of the voxels
if len(voxel_idxs) < sample_num:
sample_num = len(voxel_idxs)
auto_reg_rho_all = np.zeros((sample_num, auto_reg_order))
ma_all = np.zeros((sample_num, ma_order))
for voxel_counter in range(sample_num):
# Get the timecourse and demean it
timecourse = brain_timecourse[voxel_idxs[voxel_counter], :]
demeaned_timecourse = timecourse - timecourse.mean()
# Pull out the ARMA values (depends on order)
try:
model = ARMA(demeaned_timecourse, [auto_reg_order, ma_order])
model_fit = model.fit(disp=False)
params = model_fit.params
except (ValueError, LinAlgError):
params = np.ones(auto_reg_order + ma_order + 1) * np.nan
# Add to the list
auto_reg_rho_all[voxel_counter, :] = params[1:auto_reg_order + 1]
ma_all[voxel_counter, :] = params[auto_reg_order + 1:]
# Average all of the values and then convert them to a list
auto_reg_rho = np.nanmean(auto_reg_rho_all, 0).tolist()
ma_rho = np.nanmean(ma_all, 0).tolist()
# Return the coefficients
return auto_reg_rho, ma_rho | [
"def",
"_calc_ARMA_noise",
"(",
"volume",
",",
"mask",
",",
"auto_reg_order",
"=",
"1",
",",
"ma_order",
"=",
"1",
",",
"sample_num",
"=",
"100",
",",
")",
":",
"# Pull out the non masked voxels",
"if",
"len",
"(",
"volume",
".",
"shape",
")",
">",
"1",
... | Calculate the the ARMA noise of a volume
This calculates the autoregressive and moving average noise of the volume
over time by sampling brain voxels and averaging them.
Parameters
----------
volume : 4d array or 1d array, float
Take a volume time series to extract the middle slice from the
middle TR. Can also accept a one dimensional time course (mask input
is then ignored).
mask : 3d array, binary
A binary mask the same size as the volume
auto_reg_order : int
What order of the autoregression do you want to estimate
sample_num : int
How many voxels would you like to sample to calculate the AR values.
The AR distribution of real data is approximately exponential maxing
at 1. From analyses across a number of participants, to get less
than 3% standard deviation of error from the true mean it is
necessary to sample at least 100 voxels.
Returns
-------
auto_reg_rho : list of floats
Rho of a specific order for the autoregression noise in the data
na_rho : list of floats
Moving average of a specific order for the data | [
"Calculate",
"the",
"the",
"ARMA",
"noise",
"of",
"a",
"volume",
"This",
"calculates",
"the",
"autoregressive",
"and",
"moving",
"average",
"noise",
"of",
"the",
"volume",
"over",
"time",
"by",
"sampling",
"brain",
"voxels",
"and",
"averaging",
"them",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L1199-L1282 | train | 204,424 |
brainiak/brainiak | brainiak/utils/fmrisim.py | calc_noise | def calc_noise(volume,
mask,
template,
noise_dict=None,
):
""" Calculates the noise properties of the volume supplied.
This estimates what noise properties the volume has. For instance it
determines the spatial smoothness, the autoregressive noise, system
noise etc. Read the doc string for generate_noise to understand how
these different types of noise interact.
Parameters
----------
volume : 4d numpy array, float
Take in a functional volume (either the file name or the numpy
array) to be used to estimate the noise properties of this
mask : 3d numpy array, binary
A binary mask of the brain, the same size as the volume
template : 3d array, float
A continuous (0 -> 1) volume describing the likelihood a voxel is in
the brain. This can be used to contrast the brain and non brain.
noise_dict : dict
The initialized dictionary of the calculated noise parameters of the
provided dataset (usually it is only the voxel size)
Returns
-------
noise_dict : dict
Return a dictionary of the calculated noise parameters of the provided
dataset
"""
# Check the inputs
if template.max() > 1.1:
raise ValueError('Template out of range')
# Create the mask if not supplied and set the mask size
if mask is None:
raise ValueError('Mask not supplied')
# Update noise dict if it is not yet created
if noise_dict is None:
noise_dict = {'voxel_size': [1.0, 1.0, 1.0]}
elif 'voxel_size' not in noise_dict:
noise_dict['voxel_size'] = [1.0, 1.0, 1.0]
# What is the max activation of the mean of this voxel (allows you to
# convert between the mask and the mean of the brain volume)
noise_dict['max_activity'] = np.nanmax(np.mean(volume, 3))
# Calculate the temporal variability of the volume
noise_dict['auto_reg_rho'], noise_dict['ma_rho'] = _calc_ARMA_noise(
volume, mask)
# Set it such that all of the temporal variability will be accounted for
# by the AR component
noise_dict['auto_reg_sigma'] = 1
# Preset these values to be zero, as in you are not attempting to
# simulate them
noise_dict['physiological_sigma'] = 0
noise_dict['task_sigma'] = 0
noise_dict['drift_sigma'] = 0
# Calculate the sfnr
noise_dict['sfnr'] = _calc_sfnr(volume,
mask,
)
# Calculate the fwhm on a subset of volumes
if volume.shape[3] > 100:
# Take only 100 shuffled TRs
trs = np.random.choice(volume.shape[3], size=100, replace=False)
else:
trs = list(range(0, volume.shape[3]))
# Go through the trs and pull out the fwhm
fwhm = [0] * len(trs)
for tr in range(len(trs)):
fwhm[tr] = _calc_fwhm(volume[:, :, :, trs[tr]],
mask,
noise_dict['voxel_size'],
)
# Keep only the mean
noise_dict['fwhm'] = np.mean(fwhm)
noise_dict['snr'] = _calc_snr(volume,
mask,
)
# Return the noise dictionary
return noise_dict | python | def calc_noise(volume,
mask,
template,
noise_dict=None,
):
""" Calculates the noise properties of the volume supplied.
This estimates what noise properties the volume has. For instance it
determines the spatial smoothness, the autoregressive noise, system
noise etc. Read the doc string for generate_noise to understand how
these different types of noise interact.
Parameters
----------
volume : 4d numpy array, float
Take in a functional volume (either the file name or the numpy
array) to be used to estimate the noise properties of this
mask : 3d numpy array, binary
A binary mask of the brain, the same size as the volume
template : 3d array, float
A continuous (0 -> 1) volume describing the likelihood a voxel is in
the brain. This can be used to contrast the brain and non brain.
noise_dict : dict
The initialized dictionary of the calculated noise parameters of the
provided dataset (usually it is only the voxel size)
Returns
-------
noise_dict : dict
Return a dictionary of the calculated noise parameters of the provided
dataset
"""
# Check the inputs
if template.max() > 1.1:
raise ValueError('Template out of range')
# Create the mask if not supplied and set the mask size
if mask is None:
raise ValueError('Mask not supplied')
# Update noise dict if it is not yet created
if noise_dict is None:
noise_dict = {'voxel_size': [1.0, 1.0, 1.0]}
elif 'voxel_size' not in noise_dict:
noise_dict['voxel_size'] = [1.0, 1.0, 1.0]
# What is the max activation of the mean of this voxel (allows you to
# convert between the mask and the mean of the brain volume)
noise_dict['max_activity'] = np.nanmax(np.mean(volume, 3))
# Calculate the temporal variability of the volume
noise_dict['auto_reg_rho'], noise_dict['ma_rho'] = _calc_ARMA_noise(
volume, mask)
# Set it such that all of the temporal variability will be accounted for
# by the AR component
noise_dict['auto_reg_sigma'] = 1
# Preset these values to be zero, as in you are not attempting to
# simulate them
noise_dict['physiological_sigma'] = 0
noise_dict['task_sigma'] = 0
noise_dict['drift_sigma'] = 0
# Calculate the sfnr
noise_dict['sfnr'] = _calc_sfnr(volume,
mask,
)
# Calculate the fwhm on a subset of volumes
if volume.shape[3] > 100:
# Take only 100 shuffled TRs
trs = np.random.choice(volume.shape[3], size=100, replace=False)
else:
trs = list(range(0, volume.shape[3]))
# Go through the trs and pull out the fwhm
fwhm = [0] * len(trs)
for tr in range(len(trs)):
fwhm[tr] = _calc_fwhm(volume[:, :, :, trs[tr]],
mask,
noise_dict['voxel_size'],
)
# Keep only the mean
noise_dict['fwhm'] = np.mean(fwhm)
noise_dict['snr'] = _calc_snr(volume,
mask,
)
# Return the noise dictionary
return noise_dict | [
"def",
"calc_noise",
"(",
"volume",
",",
"mask",
",",
"template",
",",
"noise_dict",
"=",
"None",
",",
")",
":",
"# Check the inputs",
"if",
"template",
".",
"max",
"(",
")",
">",
"1.1",
":",
"raise",
"ValueError",
"(",
"'Template out of range'",
")",
"# C... | Calculates the noise properties of the volume supplied.
This estimates what noise properties the volume has. For instance it
determines the spatial smoothness, the autoregressive noise, system
noise etc. Read the doc string for generate_noise to understand how
these different types of noise interact.
Parameters
----------
volume : 4d numpy array, float
Take in a functional volume (either the file name or the numpy
array) to be used to estimate the noise properties of this
mask : 3d numpy array, binary
A binary mask of the brain, the same size as the volume
template : 3d array, float
A continuous (0 -> 1) volume describing the likelihood a voxel is in
the brain. This can be used to contrast the brain and non brain.
noise_dict : dict
The initialized dictionary of the calculated noise parameters of the
provided dataset (usually it is only the voxel size)
Returns
-------
noise_dict : dict
Return a dictionary of the calculated noise parameters of the provided
dataset | [
"Calculates",
"the",
"noise",
"properties",
"of",
"the",
"volume",
"supplied",
".",
"This",
"estimates",
"what",
"noise",
"properties",
"the",
"volume",
"has",
".",
"For",
"instance",
"it",
"determines",
"the",
"spatial",
"smoothness",
"the",
"autoregressive",
"... | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L1285-L1380 | train | 204,425 |
brainiak/brainiak | brainiak/utils/fmrisim.py | _generate_noise_system | def _generate_noise_system(dimensions_tr,
spatial_sd,
temporal_sd,
spatial_noise_type='gaussian',
temporal_noise_type='gaussian',
):
"""Generate the scanner noise
Generate system noise, either rician, gaussian or exponential, for the
scanner. Generates a distribution with a SD of 1. If you look at the
distribution of non-brain voxel intensity in modern scans you will see
it is rician. However, depending on how you have calculated the SNR and
whether the template is being used you will want to use this function
differently: the voxels outside the brain tend to be stable over time and
usually reflect structure in the MR signal (e.g. the
baseline MR of the head coil or skull). Hence the template captures this
rician noise structure. If you are adding the machine noise to the
template, as is done in generate_noise, then you are likely doubling up
on the addition of machine noise. In such cases, machine noise seems to
be better modelled by gaussian noise on top of this rician structure.
Parameters
----------
dimensions_tr : n length array, int
What are the dimensions of the volume you wish to insert
noise into. This can be a volume of any size
spatial_sd : float
What is the standard deviation in space of the noise volume to be
generated
temporal_sd : float
What is the standard deviation in time of the noise volume to be
generated
noise_type : str
String specifying the noise type. If you aren't specifying the noise
template then Rician is the appropriate model of noise. However,
if you are subtracting the template, as is default, then you should
use gaussian. (If the dilation parameter of _calc_snr is <10 then
gaussian is only an approximation)
Returns
----------
system_noise : multidimensional array, float
Create a volume with system noise
"""
def noise_volume(dimensions,
noise_type,
):
if noise_type == 'rician':
# Generate the Rician noise (has an SD of 1)
noise = stats.rice.rvs(b=0, loc=0, scale=1.527, size=dimensions)
elif noise_type == 'exponential':
# Make an exponential distribution (has an SD of 1)
noise = stats.expon.rvs(0, scale=1, size=dimensions)
elif noise_type == 'gaussian':
noise = np.random.randn(np.prod(dimensions)).reshape(dimensions)
# Return the noise
return noise
# Get just the xyz coordinates
dimensions = np.asarray([dimensions_tr[0],
dimensions_tr[1],
dimensions_tr[2],
1])
# Generate noise
spatial_noise = noise_volume(dimensions, spatial_noise_type)
temporal_noise = noise_volume(dimensions_tr, temporal_noise_type)
# Make the system noise have a specific spatial variability
spatial_noise *= spatial_sd
# Set the size of the noise
temporal_noise *= temporal_sd
# The mean in time of system noise needs to be zero, so subtract the
# means of the temporal noise in time
temporal_noise_mean = np.mean(temporal_noise, 3).reshape(dimensions[0],
dimensions[1],
dimensions[2],
1)
temporal_noise = temporal_noise - temporal_noise_mean
# Save the combination
system_noise = spatial_noise + temporal_noise
return system_noise | python | def _generate_noise_system(dimensions_tr,
spatial_sd,
temporal_sd,
spatial_noise_type='gaussian',
temporal_noise_type='gaussian',
):
"""Generate the scanner noise
Generate system noise, either rician, gaussian or exponential, for the
scanner. Generates a distribution with a SD of 1. If you look at the
distribution of non-brain voxel intensity in modern scans you will see
it is rician. However, depending on how you have calculated the SNR and
whether the template is being used you will want to use this function
differently: the voxels outside the brain tend to be stable over time and
usually reflect structure in the MR signal (e.g. the
baseline MR of the head coil or skull). Hence the template captures this
rician noise structure. If you are adding the machine noise to the
template, as is done in generate_noise, then you are likely doubling up
on the addition of machine noise. In such cases, machine noise seems to
be better modelled by gaussian noise on top of this rician structure.
Parameters
----------
dimensions_tr : n length array, int
What are the dimensions of the volume you wish to insert
noise into. This can be a volume of any size
spatial_sd : float
What is the standard deviation in space of the noise volume to be
generated
temporal_sd : float
What is the standard deviation in time of the noise volume to be
generated
noise_type : str
String specifying the noise type. If you aren't specifying the noise
template then Rician is the appropriate model of noise. However,
if you are subtracting the template, as is default, then you should
use gaussian. (If the dilation parameter of _calc_snr is <10 then
gaussian is only an approximation)
Returns
----------
system_noise : multidimensional array, float
Create a volume with system noise
"""
def noise_volume(dimensions,
noise_type,
):
if noise_type == 'rician':
# Generate the Rician noise (has an SD of 1)
noise = stats.rice.rvs(b=0, loc=0, scale=1.527, size=dimensions)
elif noise_type == 'exponential':
# Make an exponential distribution (has an SD of 1)
noise = stats.expon.rvs(0, scale=1, size=dimensions)
elif noise_type == 'gaussian':
noise = np.random.randn(np.prod(dimensions)).reshape(dimensions)
# Return the noise
return noise
# Get just the xyz coordinates
dimensions = np.asarray([dimensions_tr[0],
dimensions_tr[1],
dimensions_tr[2],
1])
# Generate noise
spatial_noise = noise_volume(dimensions, spatial_noise_type)
temporal_noise = noise_volume(dimensions_tr, temporal_noise_type)
# Make the system noise have a specific spatial variability
spatial_noise *= spatial_sd
# Set the size of the noise
temporal_noise *= temporal_sd
# The mean in time of system noise needs to be zero, so subtract the
# means of the temporal noise in time
temporal_noise_mean = np.mean(temporal_noise, 3).reshape(dimensions[0],
dimensions[1],
dimensions[2],
1)
temporal_noise = temporal_noise - temporal_noise_mean
# Save the combination
system_noise = spatial_noise + temporal_noise
return system_noise | [
"def",
"_generate_noise_system",
"(",
"dimensions_tr",
",",
"spatial_sd",
",",
"temporal_sd",
",",
"spatial_noise_type",
"=",
"'gaussian'",
",",
"temporal_noise_type",
"=",
"'gaussian'",
",",
")",
":",
"def",
"noise_volume",
"(",
"dimensions",
",",
"noise_type",
","... | Generate the scanner noise
Generate system noise, either rician, gaussian or exponential, for the
scanner. Generates a distribution with a SD of 1. If you look at the
distribution of non-brain voxel intensity in modern scans you will see
it is rician. However, depending on how you have calculated the SNR and
whether the template is being used you will want to use this function
differently: the voxels outside the brain tend to be stable over time and
usually reflect structure in the MR signal (e.g. the
baseline MR of the head coil or skull). Hence the template captures this
rician noise structure. If you are adding the machine noise to the
template, as is done in generate_noise, then you are likely doubling up
on the addition of machine noise. In such cases, machine noise seems to
be better modelled by gaussian noise on top of this rician structure.
Parameters
----------
dimensions_tr : n length array, int
What are the dimensions of the volume you wish to insert
noise into. This can be a volume of any size
spatial_sd : float
What is the standard deviation in space of the noise volume to be
generated
temporal_sd : float
What is the standard deviation in time of the noise volume to be
generated
noise_type : str
String specifying the noise type. If you aren't specifying the noise
template then Rician is the appropriate model of noise. However,
if you are subtracting the template, as is default, then you should
use gaussian. (If the dilation parameter of _calc_snr is <10 then
gaussian is only an approximation)
Returns
----------
system_noise : multidimensional array, float
Create a volume with system noise | [
"Generate",
"the",
"scanner",
"noise"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L1383-L1476 | train | 204,426 |
brainiak/brainiak | brainiak/utils/fmrisim.py | _generate_noise_temporal_task | def _generate_noise_temporal_task(stimfunction_tr,
motion_noise='gaussian',
):
"""Generate the signal dependent noise
Create noise specific to the signal, for instance there is variability
in how the signal manifests on each event
Parameters
----------
stimfunction_tr : 1 Dimensional array
This is the timecourse of the stimuli in this experiment,
each element represents a TR
motion_noise : str
What type of noise will you generate? Can be gaussian or rician
Returns
----------
noise_task : one dimensional array, float
Generates the temporal task noise timecourse
"""
# Make the noise to be added
stimfunction_tr = stimfunction_tr != 0
if motion_noise == 'gaussian':
noise = stimfunction_tr * np.random.normal(0, 1,
size=stimfunction_tr.shape)
elif motion_noise == 'rician':
noise = stimfunction_tr * stats.rice.rvs(0, 1,
size=stimfunction_tr.shape)
noise_task = stimfunction_tr + noise
# Normalize
noise_task = stats.zscore(noise_task).flatten()
return noise_task | python | def _generate_noise_temporal_task(stimfunction_tr,
motion_noise='gaussian',
):
"""Generate the signal dependent noise
Create noise specific to the signal, for instance there is variability
in how the signal manifests on each event
Parameters
----------
stimfunction_tr : 1 Dimensional array
This is the timecourse of the stimuli in this experiment,
each element represents a TR
motion_noise : str
What type of noise will you generate? Can be gaussian or rician
Returns
----------
noise_task : one dimensional array, float
Generates the temporal task noise timecourse
"""
# Make the noise to be added
stimfunction_tr = stimfunction_tr != 0
if motion_noise == 'gaussian':
noise = stimfunction_tr * np.random.normal(0, 1,
size=stimfunction_tr.shape)
elif motion_noise == 'rician':
noise = stimfunction_tr * stats.rice.rvs(0, 1,
size=stimfunction_tr.shape)
noise_task = stimfunction_tr + noise
# Normalize
noise_task = stats.zscore(noise_task).flatten()
return noise_task | [
"def",
"_generate_noise_temporal_task",
"(",
"stimfunction_tr",
",",
"motion_noise",
"=",
"'gaussian'",
",",
")",
":",
"# Make the noise to be added",
"stimfunction_tr",
"=",
"stimfunction_tr",
"!=",
"0",
"if",
"motion_noise",
"==",
"'gaussian'",
":",
"noise",
"=",
"s... | Generate the signal dependent noise
Create noise specific to the signal, for instance there is variability
in how the signal manifests on each event
Parameters
----------
stimfunction_tr : 1 Dimensional array
This is the timecourse of the stimuli in this experiment,
each element represents a TR
motion_noise : str
What type of noise will you generate? Can be gaussian or rician
Returns
----------
noise_task : one dimensional array, float
Generates the temporal task noise timecourse | [
"Generate",
"the",
"signal",
"dependent",
"noise"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L1479-L1519 | train | 204,427 |
brainiak/brainiak | brainiak/utils/fmrisim.py | _generate_noise_temporal_drift | def _generate_noise_temporal_drift(trs,
tr_duration,
basis="discrete_cos",
period=150,
):
"""Generate the drift noise
Create a trend (either sine or discrete_cos), of a given period and random
phase, to represent the drift of the signal over time
Parameters
----------
trs : int
How many volumes (aka TRs) are there
tr_duration : float
How long in seconds is each volume acqusition
basis : str
What is the basis function for the drift. Could be made of discrete
cosines (for longer run durations, more basis functions are
created) or a sine wave.
period : int
How many seconds is the period of oscillation of the drift
Returns
----------
noise_drift : one dimensional array, float
The drift timecourse of activity
"""
# Calculate drift differently depending on the basis function
if basis == 'discrete_cos':
# Specify each tr in terms of its phase with the given period
timepoints = np.linspace(0, trs - 1, trs)
timepoints = ((timepoints * tr_duration) / period) * 2 * np.pi
# Specify the other timing information
duration = trs * tr_duration
basis_funcs = int(np.floor(duration / period)) # How bases do you have
if basis_funcs == 0:
err_msg = 'Too few timepoints (' + str(trs) + ') to accurately ' \
'model drift'
logger.warning(err_msg)
basis_funcs = 1
noise_drift = np.zeros((timepoints.shape[0], basis_funcs))
for basis_counter in list(range(1, basis_funcs + 1)):
# What steps do you want to take for this basis function
timepoints_basis = (timepoints/basis_counter) + (np.random.rand()
* np.pi * 2)
# Store the drift from this basis func
noise_drift[:, basis_counter - 1] = np.cos(timepoints_basis)
# Average the drift
noise_drift = np.mean(noise_drift, 1)
elif basis == 'sine':
# Calculate the cycles of the drift for a given function.
cycles = trs * tr_duration / period
# Create a sine wave with a given number of cycles and random phase
timepoints = np.linspace(0, trs - 1, trs)
phaseshift = np.pi * 2 * np.random.random()
phase = (timepoints / (trs - 1) * cycles * 2 * np.pi) + phaseshift
noise_drift = np.sin(phase)
# Normalize so the sigma is 1
noise_drift = stats.zscore(noise_drift)
# Return noise
return noise_drift | python | def _generate_noise_temporal_drift(trs,
tr_duration,
basis="discrete_cos",
period=150,
):
"""Generate the drift noise
Create a trend (either sine or discrete_cos), of a given period and random
phase, to represent the drift of the signal over time
Parameters
----------
trs : int
How many volumes (aka TRs) are there
tr_duration : float
How long in seconds is each volume acqusition
basis : str
What is the basis function for the drift. Could be made of discrete
cosines (for longer run durations, more basis functions are
created) or a sine wave.
period : int
How many seconds is the period of oscillation of the drift
Returns
----------
noise_drift : one dimensional array, float
The drift timecourse of activity
"""
# Calculate drift differently depending on the basis function
if basis == 'discrete_cos':
# Specify each tr in terms of its phase with the given period
timepoints = np.linspace(0, trs - 1, trs)
timepoints = ((timepoints * tr_duration) / period) * 2 * np.pi
# Specify the other timing information
duration = trs * tr_duration
basis_funcs = int(np.floor(duration / period)) # How bases do you have
if basis_funcs == 0:
err_msg = 'Too few timepoints (' + str(trs) + ') to accurately ' \
'model drift'
logger.warning(err_msg)
basis_funcs = 1
noise_drift = np.zeros((timepoints.shape[0], basis_funcs))
for basis_counter in list(range(1, basis_funcs + 1)):
# What steps do you want to take for this basis function
timepoints_basis = (timepoints/basis_counter) + (np.random.rand()
* np.pi * 2)
# Store the drift from this basis func
noise_drift[:, basis_counter - 1] = np.cos(timepoints_basis)
# Average the drift
noise_drift = np.mean(noise_drift, 1)
elif basis == 'sine':
# Calculate the cycles of the drift for a given function.
cycles = trs * tr_duration / period
# Create a sine wave with a given number of cycles and random phase
timepoints = np.linspace(0, trs - 1, trs)
phaseshift = np.pi * 2 * np.random.random()
phase = (timepoints / (trs - 1) * cycles * 2 * np.pi) + phaseshift
noise_drift = np.sin(phase)
# Normalize so the sigma is 1
noise_drift = stats.zscore(noise_drift)
# Return noise
return noise_drift | [
"def",
"_generate_noise_temporal_drift",
"(",
"trs",
",",
"tr_duration",
",",
"basis",
"=",
"\"discrete_cos\"",
",",
"period",
"=",
"150",
",",
")",
":",
"# Calculate drift differently depending on the basis function",
"if",
"basis",
"==",
"'discrete_cos'",
":",
"# Spec... | Generate the drift noise
Create a trend (either sine or discrete_cos), of a given period and random
phase, to represent the drift of the signal over time
Parameters
----------
trs : int
How many volumes (aka TRs) are there
tr_duration : float
How long in seconds is each volume acqusition
basis : str
What is the basis function for the drift. Could be made of discrete
cosines (for longer run durations, more basis functions are
created) or a sine wave.
period : int
How many seconds is the period of oscillation of the drift
Returns
----------
noise_drift : one dimensional array, float
The drift timecourse of activity | [
"Generate",
"the",
"drift",
"noise"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L1522-L1602 | train | 204,428 |
brainiak/brainiak | brainiak/utils/fmrisim.py | _generate_noise_temporal_autoregression | def _generate_noise_temporal_autoregression(timepoints,
noise_dict,
dimensions,
mask,
):
"""Generate the autoregression noise
Make a slowly drifting timecourse with the given autoregression
parameters. This can take in both AR and MA components
Parameters
----------
timepoints : 1 Dimensional array
What time points are sampled by a TR
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, all noise types
ending with sigma (e.g. motion sigma) are mixed together in
_generate_temporal_noise. The sigma values describe the proportion of
mixing of these elements. However critically, SFNR is the
parameter that describes how much noise these components contribute
to the brain. If you set the noise dict to matched then it will fit
the parameters to match the participant as best as possible.
dimensions : 3 length array, int
What is the shape of the volume to be generated
mask : 3 dimensional array, binary
The masked brain, thresholded to distinguish brain and non-brain
Returns
----------
noise_autoregression : one dimensional array, float
Generates the autoregression noise timecourse
"""
# Pull out the relevant noise parameters
auto_reg_rho = noise_dict['auto_reg_rho']
ma_rho = noise_dict['ma_rho']
# Specify the order based on the number of rho supplied
auto_reg_order = len(auto_reg_rho)
ma_order = len(ma_rho)
# This code assumes that the AR order is higher than the MA order
if ma_order > auto_reg_order:
msg = 'MA order (%d) is greater than AR order (%d). Cannot run.' % (
ma_order, auto_reg_order)
raise ValueError(msg)
# Generate a random variable at each time point that is a decayed value
# of the previous time points
noise_autoregression = np.zeros((dimensions[0], dimensions[1],
dimensions[2], len(timepoints)))
err_vols = np.zeros((dimensions[0], dimensions[1], dimensions[2],
len(timepoints)))
for tr_counter in range(len(timepoints)):
# Create a brain shaped volume with appropriate smoothing properties
noise = _generate_noise_spatial(dimensions=dimensions,
mask=mask,
fwhm=noise_dict['fwhm'],
)
# Store all of the noise volumes
err_vols[:, :, :, tr_counter] = noise
if tr_counter == 0:
noise_autoregression[:, :, :, tr_counter] = noise
else:
# Preset the volume to collect the AR estimated process
AR_vol = np.zeros((dimensions[0], dimensions[1], dimensions[2]))
# Iterate through both the AR and MA values
for pCounter in list(range(1, auto_reg_order + 1)):
past_TR = int(tr_counter - pCounter)
if tr_counter - pCounter >= 0:
# Pull out a previous TR
past_vols = noise_autoregression[:, :, :, past_TR]
# Add the discounted previous volume
AR_vol += past_vols * auto_reg_rho[pCounter - 1]
# If the MA order has at least this many coefficients
# then consider the error terms
if ma_order >= pCounter:
# Pull out a previous TR
past_noise = err_vols[:, :, :, past_TR]
# Add the discounted previous noise
AR_vol += past_noise * ma_rho[pCounter - 1]
noise_autoregression[:, :, :, tr_counter] = AR_vol + noise
# Z score the data so that all of the standard deviations of the voxels
# are one (but the ARMA coefs are unchanged)
noise_autoregression = stats.zscore(noise_autoregression, 3)
return noise_autoregression | python | def _generate_noise_temporal_autoregression(timepoints,
noise_dict,
dimensions,
mask,
):
"""Generate the autoregression noise
Make a slowly drifting timecourse with the given autoregression
parameters. This can take in both AR and MA components
Parameters
----------
timepoints : 1 Dimensional array
What time points are sampled by a TR
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, all noise types
ending with sigma (e.g. motion sigma) are mixed together in
_generate_temporal_noise. The sigma values describe the proportion of
mixing of these elements. However critically, SFNR is the
parameter that describes how much noise these components contribute
to the brain. If you set the noise dict to matched then it will fit
the parameters to match the participant as best as possible.
dimensions : 3 length array, int
What is the shape of the volume to be generated
mask : 3 dimensional array, binary
The masked brain, thresholded to distinguish brain and non-brain
Returns
----------
noise_autoregression : one dimensional array, float
Generates the autoregression noise timecourse
"""
# Pull out the relevant noise parameters
auto_reg_rho = noise_dict['auto_reg_rho']
ma_rho = noise_dict['ma_rho']
# Specify the order based on the number of rho supplied
auto_reg_order = len(auto_reg_rho)
ma_order = len(ma_rho)
# This code assumes that the AR order is higher than the MA order
if ma_order > auto_reg_order:
msg = 'MA order (%d) is greater than AR order (%d). Cannot run.' % (
ma_order, auto_reg_order)
raise ValueError(msg)
# Generate a random variable at each time point that is a decayed value
# of the previous time points
noise_autoregression = np.zeros((dimensions[0], dimensions[1],
dimensions[2], len(timepoints)))
err_vols = np.zeros((dimensions[0], dimensions[1], dimensions[2],
len(timepoints)))
for tr_counter in range(len(timepoints)):
# Create a brain shaped volume with appropriate smoothing properties
noise = _generate_noise_spatial(dimensions=dimensions,
mask=mask,
fwhm=noise_dict['fwhm'],
)
# Store all of the noise volumes
err_vols[:, :, :, tr_counter] = noise
if tr_counter == 0:
noise_autoregression[:, :, :, tr_counter] = noise
else:
# Preset the volume to collect the AR estimated process
AR_vol = np.zeros((dimensions[0], dimensions[1], dimensions[2]))
# Iterate through both the AR and MA values
for pCounter in list(range(1, auto_reg_order + 1)):
past_TR = int(tr_counter - pCounter)
if tr_counter - pCounter >= 0:
# Pull out a previous TR
past_vols = noise_autoregression[:, :, :, past_TR]
# Add the discounted previous volume
AR_vol += past_vols * auto_reg_rho[pCounter - 1]
# If the MA order has at least this many coefficients
# then consider the error terms
if ma_order >= pCounter:
# Pull out a previous TR
past_noise = err_vols[:, :, :, past_TR]
# Add the discounted previous noise
AR_vol += past_noise * ma_rho[pCounter - 1]
noise_autoregression[:, :, :, tr_counter] = AR_vol + noise
# Z score the data so that all of the standard deviations of the voxels
# are one (but the ARMA coefs are unchanged)
noise_autoregression = stats.zscore(noise_autoregression, 3)
return noise_autoregression | [
"def",
"_generate_noise_temporal_autoregression",
"(",
"timepoints",
",",
"noise_dict",
",",
"dimensions",
",",
"mask",
",",
")",
":",
"# Pull out the relevant noise parameters",
"auto_reg_rho",
"=",
"noise_dict",
"[",
"'auto_reg_rho'",
"]",
"ma_rho",
"=",
"noise_dict",
... | Generate the autoregression noise
Make a slowly drifting timecourse with the given autoregression
parameters. This can take in both AR and MA components
Parameters
----------
timepoints : 1 Dimensional array
What time points are sampled by a TR
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, all noise types
ending with sigma (e.g. motion sigma) are mixed together in
_generate_temporal_noise. The sigma values describe the proportion of
mixing of these elements. However critically, SFNR is the
parameter that describes how much noise these components contribute
to the brain. If you set the noise dict to matched then it will fit
the parameters to match the participant as best as possible.
dimensions : 3 length array, int
What is the shape of the volume to be generated
mask : 3 dimensional array, binary
The masked brain, thresholded to distinguish brain and non-brain
Returns
----------
noise_autoregression : one dimensional array, float
Generates the autoregression noise timecourse | [
"Generate",
"the",
"autoregression",
"noise",
"Make",
"a",
"slowly",
"drifting",
"timecourse",
"with",
"the",
"given",
"autoregression",
"parameters",
".",
"This",
"can",
"take",
"in",
"both",
"AR",
"and",
"MA",
"components"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L1605-L1711 | train | 204,429 |
brainiak/brainiak | brainiak/utils/fmrisim.py | _generate_noise_temporal_phys | def _generate_noise_temporal_phys(timepoints,
resp_freq=0.2,
heart_freq=1.17,
):
"""Generate the physiological noise.
Create noise representing the heart rate and respiration of the data.
Default values based on Walvaert, Durnez, Moerkerke, Verdoolaege and
Rosseel, 2011
Parameters
----------
timepoints : 1 Dimensional array
What time points, in seconds, are sampled by a TR
resp_freq : float
What is the frequency of respiration (in Hz)
heart_freq : float
What is the frequency of heart beat (in Hz)
Returns
----------
noise_phys : one dimensional array, float
Generates the physiological temporal noise timecourse
"""
resp_phase = (np.random.rand(1) * 2 * np.pi)[0]
heart_phase = (np.random.rand(1) * 2 * np.pi)[0]
# Find the rate for each timepoint
resp_rate = (resp_freq * 2 * np.pi)
heart_rate = (heart_freq * 2 * np.pi)
# Calculate the radians for each variable at this
# given TR
resp_radians = np.multiply(timepoints, resp_rate) + resp_phase
heart_radians = np.multiply(timepoints, heart_rate) + heart_phase
# Combine the two types of noise and append
noise_phys = np.cos(resp_radians) + np.sin(heart_radians)
# Normalize
noise_phys = stats.zscore(noise_phys)
return noise_phys | python | def _generate_noise_temporal_phys(timepoints,
resp_freq=0.2,
heart_freq=1.17,
):
"""Generate the physiological noise.
Create noise representing the heart rate and respiration of the data.
Default values based on Walvaert, Durnez, Moerkerke, Verdoolaege and
Rosseel, 2011
Parameters
----------
timepoints : 1 Dimensional array
What time points, in seconds, are sampled by a TR
resp_freq : float
What is the frequency of respiration (in Hz)
heart_freq : float
What is the frequency of heart beat (in Hz)
Returns
----------
noise_phys : one dimensional array, float
Generates the physiological temporal noise timecourse
"""
resp_phase = (np.random.rand(1) * 2 * np.pi)[0]
heart_phase = (np.random.rand(1) * 2 * np.pi)[0]
# Find the rate for each timepoint
resp_rate = (resp_freq * 2 * np.pi)
heart_rate = (heart_freq * 2 * np.pi)
# Calculate the radians for each variable at this
# given TR
resp_radians = np.multiply(timepoints, resp_rate) + resp_phase
heart_radians = np.multiply(timepoints, heart_rate) + heart_phase
# Combine the two types of noise and append
noise_phys = np.cos(resp_radians) + np.sin(heart_radians)
# Normalize
noise_phys = stats.zscore(noise_phys)
return noise_phys | [
"def",
"_generate_noise_temporal_phys",
"(",
"timepoints",
",",
"resp_freq",
"=",
"0.2",
",",
"heart_freq",
"=",
"1.17",
",",
")",
":",
"resp_phase",
"=",
"(",
"np",
".",
"random",
".",
"rand",
"(",
"1",
")",
"*",
"2",
"*",
"np",
".",
"pi",
")",
"[",... | Generate the physiological noise.
Create noise representing the heart rate and respiration of the data.
Default values based on Walvaert, Durnez, Moerkerke, Verdoolaege and
Rosseel, 2011
Parameters
----------
timepoints : 1 Dimensional array
What time points, in seconds, are sampled by a TR
resp_freq : float
What is the frequency of respiration (in Hz)
heart_freq : float
What is the frequency of heart beat (in Hz)
Returns
----------
noise_phys : one dimensional array, float
Generates the physiological temporal noise timecourse | [
"Generate",
"the",
"physiological",
"noise",
".",
"Create",
"noise",
"representing",
"the",
"heart",
"rate",
"and",
"respiration",
"of",
"the",
"data",
".",
"Default",
"values",
"based",
"on",
"Walvaert",
"Durnez",
"Moerkerke",
"Verdoolaege",
"and",
"Rosseel",
"... | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L1714-L1760 | train | 204,430 |
brainiak/brainiak | brainiak/utils/fmrisim.py | _generate_noise_spatial | def _generate_noise_spatial(dimensions,
mask=None,
fwhm=4.0,
):
"""Generate code for Gaussian Random Fields.
Adapted from code found here:
http://andrewwalker.github.io/statefultransitions/post/gaussian-fields/
with permission from the author:
https://twitter.com/walkera101/status/785578499440377858. Original code
comes from http://mathematica.stackexchange.com/questions/4829
/efficiently-generating-n-d-gaussian-random-fields with a WTFPL (
http://www.wtfpl.net).
Parameters
----------
dimensions : 3 length array, int
What is the shape of the volume to be generated. This code
compresesses the range if the x and y dimensions are not equivalent.
This fixes this by upsampling and then downsampling the volume.
template : 3d array, float
A continuous (0 -> 1) volume describing the likelihood a voxel is in
the brain. This can be used to contrast the brain and non brain.
mask : 3 dimensional array, binary
The masked brain, thresholded to distinguish brain and non-brain
fwhm : float
What is the full width half max of the gaussian fields being created.
This is converted into a sigma which is used in this function.
However, this conversion was found empirically by testing values of
sigma and how it relates to fwhm values. The relationship that would be
found in such a test depends on the size of the brain (bigger brains
can have bigger fwhm).
However, small errors shouldn't matter too much since the fwhm
generated here can only be approximate anyway: firstly, although the
distribution that is being drawn from is set to this value,
this will manifest differently on every draw. Secondly, because of
the masking and dimensions of the generated volume, this does not
behave simply- wrapping effects matter (the outputs are
closer to the input value if you have no mask).
Use _calc_fwhm on this volume alone if you have concerns about the
accuracy of the fwhm.
Returns
----------
noise_spatial : 3d array, float
Generates the spatial noise volume for these parameters
"""
# Check the input is correct
if len(dimensions) == 4:
logger.warning('4 dimensions have been supplied, only using 3')
dimensions = dimensions[0:3]
# If the dimensions are wrong then upsample now
if dimensions[0] != dimensions[1] or dimensions[1] != dimensions[2]:
max_dim = np.max(dimensions)
new_dim = (max_dim, max_dim, max_dim)
else:
new_dim = dimensions
def _logfunc(x, a, b, c):
"""Solve for y given x for log function.
Parameters
----------
x : float
x value of log function
a : float
x shift of function
b : float
rate of change
c : float
y shift of function
Returns
----------
float
y value of log function
"""
return (np.log(x + a) / np.log(b)) + c
def _fftIndgen(n):
"""# Specify the fft coefficents
Parameters
----------
n : int
Dim size to estimate over
Returns
----------
array of ints
fft indexes
"""
# Pull out the ascending and descending indexes
ascending = np.linspace(0, int(n / 2), int(n / 2 + 1))
elements = int(np.ceil(n / 2 - 1)) # Round up so that len(output)==n
descending = np.linspace(-elements, -1, elements)
return np.concatenate((ascending, descending))
def _Pk2(idxs, sigma):
"""# Specify the amplitude given the fft coefficents
Parameters
----------
idxs : 3 by voxel array int
fft indexes
sigma : float
spatial sigma
Returns
----------
amplitude : 3 by voxel array
amplitude of the fft coefficients
"""
# The first set of idxs ought to be zero so make the first value
# zero to avoid a divide by zero error
amp_start = np.array((0))
# Compute the amplitude of the function for a series of indices
amp_end = np.sqrt(np.sqrt(np.sum(idxs[:, 1:] ** 2, 0)) ** (-1 * sigma))
amplitude = np.append(amp_start, amp_end)
# Return the output
return amplitude
# Convert from fwhm to sigma (relationship discovered empirical, only an
# approximation up to sigma = 0 -> 5 which corresponds to fwhm = 0 -> 8,
# relies on an assumption of brain size).
spatial_sigma = _logfunc(fwhm, -0.36778719, 2.10601011, 2.15439247)
noise = np.fft.fftn(np.random.normal(size=new_dim))
# Create a meshgrid of the object
fft_vol = np.meshgrid(_fftIndgen(new_dim[0]), _fftIndgen(new_dim[1]),
_fftIndgen(new_dim[2]))
# Reshape the data into a vector
fft_vec = np.asarray((fft_vol[0].flatten(), fft_vol[1].flatten(), fft_vol[
2].flatten()))
# Compute the amplitude for each element in the grid
amp_vec = _Pk2(fft_vec, spatial_sigma)
# Reshape to be a brain volume
amplitude = amp_vec.reshape(new_dim)
# The output
noise_fft = (np.fft.ifftn(noise * amplitude)).real
# Fix the dimensionality of the data (if necessary)
noise_spatial = noise_fft[:dimensions[0], :dimensions[1], :dimensions[2]]
# Mask or not, then z score
if mask is not None:
# Mask the output
noise_spatial *= mask
# Z score the specific to the brain
noise_spatial[mask > 0] = stats.zscore(noise_spatial[mask > 0])
else:
# Take the grand mean/std and use for z scoring
grand_mean = (noise_spatial).mean()
grand_std = (noise_spatial).std()
noise_spatial = (noise_spatial - grand_mean) / grand_std
return noise_spatial | python | def _generate_noise_spatial(dimensions,
mask=None,
fwhm=4.0,
):
"""Generate code for Gaussian Random Fields.
Adapted from code found here:
http://andrewwalker.github.io/statefultransitions/post/gaussian-fields/
with permission from the author:
https://twitter.com/walkera101/status/785578499440377858. Original code
comes from http://mathematica.stackexchange.com/questions/4829
/efficiently-generating-n-d-gaussian-random-fields with a WTFPL (
http://www.wtfpl.net).
Parameters
----------
dimensions : 3 length array, int
What is the shape of the volume to be generated. This code
compresesses the range if the x and y dimensions are not equivalent.
This fixes this by upsampling and then downsampling the volume.
template : 3d array, float
A continuous (0 -> 1) volume describing the likelihood a voxel is in
the brain. This can be used to contrast the brain and non brain.
mask : 3 dimensional array, binary
The masked brain, thresholded to distinguish brain and non-brain
fwhm : float
What is the full width half max of the gaussian fields being created.
This is converted into a sigma which is used in this function.
However, this conversion was found empirically by testing values of
sigma and how it relates to fwhm values. The relationship that would be
found in such a test depends on the size of the brain (bigger brains
can have bigger fwhm).
However, small errors shouldn't matter too much since the fwhm
generated here can only be approximate anyway: firstly, although the
distribution that is being drawn from is set to this value,
this will manifest differently on every draw. Secondly, because of
the masking and dimensions of the generated volume, this does not
behave simply- wrapping effects matter (the outputs are
closer to the input value if you have no mask).
Use _calc_fwhm on this volume alone if you have concerns about the
accuracy of the fwhm.
Returns
----------
noise_spatial : 3d array, float
Generates the spatial noise volume for these parameters
"""
# Check the input is correct
if len(dimensions) == 4:
logger.warning('4 dimensions have been supplied, only using 3')
dimensions = dimensions[0:3]
# If the dimensions are wrong then upsample now
if dimensions[0] != dimensions[1] or dimensions[1] != dimensions[2]:
max_dim = np.max(dimensions)
new_dim = (max_dim, max_dim, max_dim)
else:
new_dim = dimensions
def _logfunc(x, a, b, c):
"""Solve for y given x for log function.
Parameters
----------
x : float
x value of log function
a : float
x shift of function
b : float
rate of change
c : float
y shift of function
Returns
----------
float
y value of log function
"""
return (np.log(x + a) / np.log(b)) + c
def _fftIndgen(n):
"""# Specify the fft coefficents
Parameters
----------
n : int
Dim size to estimate over
Returns
----------
array of ints
fft indexes
"""
# Pull out the ascending and descending indexes
ascending = np.linspace(0, int(n / 2), int(n / 2 + 1))
elements = int(np.ceil(n / 2 - 1)) # Round up so that len(output)==n
descending = np.linspace(-elements, -1, elements)
return np.concatenate((ascending, descending))
def _Pk2(idxs, sigma):
"""# Specify the amplitude given the fft coefficents
Parameters
----------
idxs : 3 by voxel array int
fft indexes
sigma : float
spatial sigma
Returns
----------
amplitude : 3 by voxel array
amplitude of the fft coefficients
"""
# The first set of idxs ought to be zero so make the first value
# zero to avoid a divide by zero error
amp_start = np.array((0))
# Compute the amplitude of the function for a series of indices
amp_end = np.sqrt(np.sqrt(np.sum(idxs[:, 1:] ** 2, 0)) ** (-1 * sigma))
amplitude = np.append(amp_start, amp_end)
# Return the output
return amplitude
# Convert from fwhm to sigma (relationship discovered empirical, only an
# approximation up to sigma = 0 -> 5 which corresponds to fwhm = 0 -> 8,
# relies on an assumption of brain size).
spatial_sigma = _logfunc(fwhm, -0.36778719, 2.10601011, 2.15439247)
noise = np.fft.fftn(np.random.normal(size=new_dim))
# Create a meshgrid of the object
fft_vol = np.meshgrid(_fftIndgen(new_dim[0]), _fftIndgen(new_dim[1]),
_fftIndgen(new_dim[2]))
# Reshape the data into a vector
fft_vec = np.asarray((fft_vol[0].flatten(), fft_vol[1].flatten(), fft_vol[
2].flatten()))
# Compute the amplitude for each element in the grid
amp_vec = _Pk2(fft_vec, spatial_sigma)
# Reshape to be a brain volume
amplitude = amp_vec.reshape(new_dim)
# The output
noise_fft = (np.fft.ifftn(noise * amplitude)).real
# Fix the dimensionality of the data (if necessary)
noise_spatial = noise_fft[:dimensions[0], :dimensions[1], :dimensions[2]]
# Mask or not, then z score
if mask is not None:
# Mask the output
noise_spatial *= mask
# Z score the specific to the brain
noise_spatial[mask > 0] = stats.zscore(noise_spatial[mask > 0])
else:
# Take the grand mean/std and use for z scoring
grand_mean = (noise_spatial).mean()
grand_std = (noise_spatial).std()
noise_spatial = (noise_spatial - grand_mean) / grand_std
return noise_spatial | [
"def",
"_generate_noise_spatial",
"(",
"dimensions",
",",
"mask",
"=",
"None",
",",
"fwhm",
"=",
"4.0",
",",
")",
":",
"# Check the input is correct",
"if",
"len",
"(",
"dimensions",
")",
"==",
"4",
":",
"logger",
".",
"warning",
"(",
"'4 dimensions have been ... | Generate code for Gaussian Random Fields.
Adapted from code found here:
http://andrewwalker.github.io/statefultransitions/post/gaussian-fields/
with permission from the author:
https://twitter.com/walkera101/status/785578499440377858. Original code
comes from http://mathematica.stackexchange.com/questions/4829
/efficiently-generating-n-d-gaussian-random-fields with a WTFPL (
http://www.wtfpl.net).
Parameters
----------
dimensions : 3 length array, int
What is the shape of the volume to be generated. This code
compresesses the range if the x and y dimensions are not equivalent.
This fixes this by upsampling and then downsampling the volume.
template : 3d array, float
A continuous (0 -> 1) volume describing the likelihood a voxel is in
the brain. This can be used to contrast the brain and non brain.
mask : 3 dimensional array, binary
The masked brain, thresholded to distinguish brain and non-brain
fwhm : float
What is the full width half max of the gaussian fields being created.
This is converted into a sigma which is used in this function.
However, this conversion was found empirically by testing values of
sigma and how it relates to fwhm values. The relationship that would be
found in such a test depends on the size of the brain (bigger brains
can have bigger fwhm).
However, small errors shouldn't matter too much since the fwhm
generated here can only be approximate anyway: firstly, although the
distribution that is being drawn from is set to this value,
this will manifest differently on every draw. Secondly, because of
the masking and dimensions of the generated volume, this does not
behave simply- wrapping effects matter (the outputs are
closer to the input value if you have no mask).
Use _calc_fwhm on this volume alone if you have concerns about the
accuracy of the fwhm.
Returns
----------
noise_spatial : 3d array, float
Generates the spatial noise volume for these parameters | [
"Generate",
"code",
"for",
"Gaussian",
"Random",
"Fields",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L1763-L1950 | train | 204,431 |
brainiak/brainiak | brainiak/utils/fmrisim.py | _generate_noise_temporal | def _generate_noise_temporal(stimfunction_tr,
tr_duration,
dimensions,
template,
mask,
noise_dict
):
"""Generate the temporal noise
Generate the time course of the average brain voxel. To change the
relative mixing of the noise components, change the sigma's specified
below.
Parameters
----------
stimfunction_tr : 1 Dimensional array
This is the timecourse of the stimuli in this experiment,
each element represents a TR
tr_duration : int
How long is a TR, in seconds
dimensions : 3 length array, int
What is the shape of the volume to be generated
template : 3d array, float
A continuous (0 -> 1) volume describing the likelihood a voxel is in
the brain. This can be used to contrast the brain and non brain.
mask : 3 dimensional array, binary
The masked brain, thresholded to distinguish brain and non-brain
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, all noise types
ending with sigma (e.g. motion sigma) are mixed together in
_generate_temporal_noise. The sigma values describe the proportion of
mixing of these elements. However critically, SFNR is the
parameter that describes how much noise these components contribute
to the brain. If you set the noise dict to matched then it will fit
the parameters to match the participant as best as possible.
Returns
----------
noise_temporal : one dimensional array, float
Generates the temporal noise timecourse for these parameters
"""
# Set up common parameters
# How many TRs are there
trs = len(stimfunction_tr)
# What time points are sampled by a TR?
timepoints = list(np.linspace(0, (trs - 1) * tr_duration, trs))
# Preset the volume
noise_volume = np.zeros((dimensions[0], dimensions[1], dimensions[2], trs))
# Generate the drift noise
if noise_dict['drift_sigma'] != 0:
# Calculate the drift time course
noise = _generate_noise_temporal_drift(trs,
tr_duration,
)
# Create a volume with the drift properties
volume = np.ones(dimensions)
# Combine the volume and noise
noise_volume += np.multiply.outer(volume, noise) * noise_dict[
'drift_sigma']
# Generate the physiological noise
if noise_dict['physiological_sigma'] != 0:
# Calculate the physiological time course
noise = _generate_noise_temporal_phys(timepoints,
)
# Create a brain shaped volume with similar smoothing properties
volume = _generate_noise_spatial(dimensions=dimensions,
mask=mask,
fwhm=noise_dict['fwhm'],
)
# Combine the volume and noise
noise_volume += np.multiply.outer(volume, noise) * noise_dict[
'physiological_sigma']
# Generate the AR noise
if noise_dict['auto_reg_sigma'] != 0:
# Calculate the AR time course volume
noise = _generate_noise_temporal_autoregression(timepoints,
noise_dict,
dimensions,
mask,
)
# Combine the volume and noise
noise_volume += noise * noise_dict['auto_reg_sigma']
# Generate the task related noise
if noise_dict['task_sigma'] != 0 and np.sum(stimfunction_tr) > 0:
# Calculate the task based noise time course
noise = _generate_noise_temporal_task(stimfunction_tr,
)
# Create a brain shaped volume with similar smoothing properties
volume = _generate_noise_spatial(dimensions=dimensions,
mask=mask,
fwhm=noise_dict['fwhm'],
)
# Combine the volume and noise
noise_volume += np.multiply.outer(volume, noise) * noise_dict[
'task_sigma']
# Finally, z score each voxel so things mix nicely
noise_volume = stats.zscore(noise_volume, 3)
# If it is a nan it is because you just divided by zero (since some
# voxels are zeros in the template)
noise_volume[np.isnan(noise_volume)] = 0
return noise_volume | python | def _generate_noise_temporal(stimfunction_tr,
tr_duration,
dimensions,
template,
mask,
noise_dict
):
"""Generate the temporal noise
Generate the time course of the average brain voxel. To change the
relative mixing of the noise components, change the sigma's specified
below.
Parameters
----------
stimfunction_tr : 1 Dimensional array
This is the timecourse of the stimuli in this experiment,
each element represents a TR
tr_duration : int
How long is a TR, in seconds
dimensions : 3 length array, int
What is the shape of the volume to be generated
template : 3d array, float
A continuous (0 -> 1) volume describing the likelihood a voxel is in
the brain. This can be used to contrast the brain and non brain.
mask : 3 dimensional array, binary
The masked brain, thresholded to distinguish brain and non-brain
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, all noise types
ending with sigma (e.g. motion sigma) are mixed together in
_generate_temporal_noise. The sigma values describe the proportion of
mixing of these elements. However critically, SFNR is the
parameter that describes how much noise these components contribute
to the brain. If you set the noise dict to matched then it will fit
the parameters to match the participant as best as possible.
Returns
----------
noise_temporal : one dimensional array, float
Generates the temporal noise timecourse for these parameters
"""
# Set up common parameters
# How many TRs are there
trs = len(stimfunction_tr)
# What time points are sampled by a TR?
timepoints = list(np.linspace(0, (trs - 1) * tr_duration, trs))
# Preset the volume
noise_volume = np.zeros((dimensions[0], dimensions[1], dimensions[2], trs))
# Generate the drift noise
if noise_dict['drift_sigma'] != 0:
# Calculate the drift time course
noise = _generate_noise_temporal_drift(trs,
tr_duration,
)
# Create a volume with the drift properties
volume = np.ones(dimensions)
# Combine the volume and noise
noise_volume += np.multiply.outer(volume, noise) * noise_dict[
'drift_sigma']
# Generate the physiological noise
if noise_dict['physiological_sigma'] != 0:
# Calculate the physiological time course
noise = _generate_noise_temporal_phys(timepoints,
)
# Create a brain shaped volume with similar smoothing properties
volume = _generate_noise_spatial(dimensions=dimensions,
mask=mask,
fwhm=noise_dict['fwhm'],
)
# Combine the volume and noise
noise_volume += np.multiply.outer(volume, noise) * noise_dict[
'physiological_sigma']
# Generate the AR noise
if noise_dict['auto_reg_sigma'] != 0:
# Calculate the AR time course volume
noise = _generate_noise_temporal_autoregression(timepoints,
noise_dict,
dimensions,
mask,
)
# Combine the volume and noise
noise_volume += noise * noise_dict['auto_reg_sigma']
# Generate the task related noise
if noise_dict['task_sigma'] != 0 and np.sum(stimfunction_tr) > 0:
# Calculate the task based noise time course
noise = _generate_noise_temporal_task(stimfunction_tr,
)
# Create a brain shaped volume with similar smoothing properties
volume = _generate_noise_spatial(dimensions=dimensions,
mask=mask,
fwhm=noise_dict['fwhm'],
)
# Combine the volume and noise
noise_volume += np.multiply.outer(volume, noise) * noise_dict[
'task_sigma']
# Finally, z score each voxel so things mix nicely
noise_volume = stats.zscore(noise_volume, 3)
# If it is a nan it is because you just divided by zero (since some
# voxels are zeros in the template)
noise_volume[np.isnan(noise_volume)] = 0
return noise_volume | [
"def",
"_generate_noise_temporal",
"(",
"stimfunction_tr",
",",
"tr_duration",
",",
"dimensions",
",",
"template",
",",
"mask",
",",
"noise_dict",
")",
":",
"# Set up common parameters",
"# How many TRs are there",
"trs",
"=",
"len",
"(",
"stimfunction_tr",
")",
"# Wh... | Generate the temporal noise
Generate the time course of the average brain voxel. To change the
relative mixing of the noise components, change the sigma's specified
below.
Parameters
----------
stimfunction_tr : 1 Dimensional array
This is the timecourse of the stimuli in this experiment,
each element represents a TR
tr_duration : int
How long is a TR, in seconds
dimensions : 3 length array, int
What is the shape of the volume to be generated
template : 3d array, float
A continuous (0 -> 1) volume describing the likelihood a voxel is in
the brain. This can be used to contrast the brain and non brain.
mask : 3 dimensional array, binary
The masked brain, thresholded to distinguish brain and non-brain
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, all noise types
ending with sigma (e.g. motion sigma) are mixed together in
_generate_temporal_noise. The sigma values describe the proportion of
mixing of these elements. However critically, SFNR is the
parameter that describes how much noise these components contribute
to the brain. If you set the noise dict to matched then it will fit
the parameters to match the participant as best as possible.
Returns
----------
noise_temporal : one dimensional array, float
Generates the temporal noise timecourse for these parameters | [
"Generate",
"the",
"temporal",
"noise",
"Generate",
"the",
"time",
"course",
"of",
"the",
"average",
"brain",
"voxel",
".",
"To",
"change",
"the",
"relative",
"mixing",
"of",
"the",
"noise",
"components",
"change",
"the",
"sigma",
"s",
"specified",
"below",
... | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L1953-L2079 | train | 204,432 |
brainiak/brainiak | brainiak/utils/fmrisim.py | _noise_dict_update | def _noise_dict_update(noise_dict):
"""
Update the noise dictionary parameters with default values, in case any
were missing
Parameters
----------
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, all noise types
ending with sigma (e.g. motion sigma) are mixed together in
_generate_temporal_noise. These values describe the proportion of
mixing of these elements. However critically, SFNR is the
parameter that describes how much noise these components contribute
to the brain. If you set the noise dict to matched then it will fit
the parameters to match the participant as best as possible.
The noise variables are as follows:
snr [float]: Ratio of MR signal to the spatial noise
sfnr [float]: Ratio of the MR signal to the temporal noise. This is the
total variability that the following sigmas 'sum' to:
task_sigma [float]: Size of the variance of task specific noise
drift_sigma [float]: Size of the variance of drift noise
auto_reg_sigma [float]: Size of the variance of autoregressive
noise. This is an ARMA process where the AR and MA components can be
separately specified
physiological_sigma [float]: Size of the variance of physiological
noise
auto_reg_rho [list]: The coefficients of the autoregressive
components you are modeling
ma_rho [list]:The coefficients of the moving average components you
are modeling
max_activity [float]: The max value of the averaged brain in order
to reference the template
voxel_size [list]: The mm size of the voxels
fwhm [float]: The gaussian smoothing kernel size (mm)
matched [bool]: Specify whether you are fitting the noise parameters
The volumes of brain noise that are generated have smoothness
specified by 'fwhm'
Returns
-------
noise_dict : dict
Updated dictionary
"""
# Create the default dictionary
default_dict = {'task_sigma': 0, 'drift_sigma': 0, 'auto_reg_sigma': 1,
'auto_reg_rho': [0.5], 'ma_rho': [0.0],
'physiological_sigma': 0, 'sfnr': 90, 'snr': 50,
'max_activity': 1000, 'voxel_size': [1.0, 1.0, 1.0],
'fwhm': 4, 'matched': 1}
# Check what noise is in the dictionary and add if necessary. Numbers
# determine relative proportion of noise
for default_key in default_dict:
if default_key not in noise_dict:
noise_dict[default_key] = default_dict[default_key]
return noise_dict | python | def _noise_dict_update(noise_dict):
"""
Update the noise dictionary parameters with default values, in case any
were missing
Parameters
----------
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, all noise types
ending with sigma (e.g. motion sigma) are mixed together in
_generate_temporal_noise. These values describe the proportion of
mixing of these elements. However critically, SFNR is the
parameter that describes how much noise these components contribute
to the brain. If you set the noise dict to matched then it will fit
the parameters to match the participant as best as possible.
The noise variables are as follows:
snr [float]: Ratio of MR signal to the spatial noise
sfnr [float]: Ratio of the MR signal to the temporal noise. This is the
total variability that the following sigmas 'sum' to:
task_sigma [float]: Size of the variance of task specific noise
drift_sigma [float]: Size of the variance of drift noise
auto_reg_sigma [float]: Size of the variance of autoregressive
noise. This is an ARMA process where the AR and MA components can be
separately specified
physiological_sigma [float]: Size of the variance of physiological
noise
auto_reg_rho [list]: The coefficients of the autoregressive
components you are modeling
ma_rho [list]:The coefficients of the moving average components you
are modeling
max_activity [float]: The max value of the averaged brain in order
to reference the template
voxel_size [list]: The mm size of the voxels
fwhm [float]: The gaussian smoothing kernel size (mm)
matched [bool]: Specify whether you are fitting the noise parameters
The volumes of brain noise that are generated have smoothness
specified by 'fwhm'
Returns
-------
noise_dict : dict
Updated dictionary
"""
# Create the default dictionary
default_dict = {'task_sigma': 0, 'drift_sigma': 0, 'auto_reg_sigma': 1,
'auto_reg_rho': [0.5], 'ma_rho': [0.0],
'physiological_sigma': 0, 'sfnr': 90, 'snr': 50,
'max_activity': 1000, 'voxel_size': [1.0, 1.0, 1.0],
'fwhm': 4, 'matched': 1}
# Check what noise is in the dictionary and add if necessary. Numbers
# determine relative proportion of noise
for default_key in default_dict:
if default_key not in noise_dict:
noise_dict[default_key] = default_dict[default_key]
return noise_dict | [
"def",
"_noise_dict_update",
"(",
"noise_dict",
")",
":",
"# Create the default dictionary",
"default_dict",
"=",
"{",
"'task_sigma'",
":",
"0",
",",
"'drift_sigma'",
":",
"0",
",",
"'auto_reg_sigma'",
":",
"1",
",",
"'auto_reg_rho'",
":",
"[",
"0.5",
"]",
",",
... | Update the noise dictionary parameters with default values, in case any
were missing
Parameters
----------
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, all noise types
ending with sigma (e.g. motion sigma) are mixed together in
_generate_temporal_noise. These values describe the proportion of
mixing of these elements. However critically, SFNR is the
parameter that describes how much noise these components contribute
to the brain. If you set the noise dict to matched then it will fit
the parameters to match the participant as best as possible.
The noise variables are as follows:
snr [float]: Ratio of MR signal to the spatial noise
sfnr [float]: Ratio of the MR signal to the temporal noise. This is the
total variability that the following sigmas 'sum' to:
task_sigma [float]: Size of the variance of task specific noise
drift_sigma [float]: Size of the variance of drift noise
auto_reg_sigma [float]: Size of the variance of autoregressive
noise. This is an ARMA process where the AR and MA components can be
separately specified
physiological_sigma [float]: Size of the variance of physiological
noise
auto_reg_rho [list]: The coefficients of the autoregressive
components you are modeling
ma_rho [list]:The coefficients of the moving average components you
are modeling
max_activity [float]: The max value of the averaged brain in order
to reference the template
voxel_size [list]: The mm size of the voxels
fwhm [float]: The gaussian smoothing kernel size (mm)
matched [bool]: Specify whether you are fitting the noise parameters
The volumes of brain noise that are generated have smoothness
specified by 'fwhm'
Returns
-------
noise_dict : dict
Updated dictionary | [
"Update",
"the",
"noise",
"dictionary",
"parameters",
"with",
"default",
"values",
"in",
"case",
"any",
"were",
"missing"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L2217-L2281 | train | 204,433 |
brainiak/brainiak | brainiak/utils/fmrisim.py | _fit_spatial | def _fit_spatial(noise,
noise_temporal,
mask,
template,
spatial_sd,
temporal_sd,
noise_dict,
fit_thresh,
fit_delta,
iterations,
):
"""
Fit the noise model to match the SNR of the data
Parameters
----------
noise : multidimensional array, float
Initial estimate of the noise
noise_temporal : multidimensional array, float
The temporal noise that was generated by _generate_temporal_noise
tr_duration : float
What is the duration, in seconds, of each TR?
template : 3d array, float
A continuous (0 -> 1) volume describing the likelihood a voxel
is in the brain. This can be used to contrast the brain and non
brain.
mask : 3d array, binary
The mask of the brain volume, distinguishing brain from non-brain
spatial_sd : float
What is the standard deviation in space of the noise volume to be
generated
temporal_sd : float
What is the standard deviation in time of the noise volume to be
generated
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, all noise types
ending with sigma (e.g. motion sigma) are mixed together in
_generate_temporal_noise. These values describe the proportion of
mixing of these elements. However critically, SFNR is the
parameter that describes how much noise these components contribute
to the brain. If you set the noise dict to matched then it will
fit the parameters to match the participant as best as possible.
fit_thresh : float
What proportion of the target parameter value is sufficient
error to warrant finishing fit search.
fit_delta : float
How much are the parameters attenuated during the fitting process,
in terms of the proportion of difference between the target
parameter and the actual parameter
iterations : int
The first element is how many steps of fitting the SFNR and SNR
values will be performed. Usually converges after < 5. The
second element is the number of iterations for the AR fitting.
This is much more time consuming (has to make a new timecourse
on each iteration) so be careful about setting this appropriately.
Returns
-------
noise : multidimensional array, float
Generates the noise volume given these parameters
"""
# Pull out information that is needed
dim_tr = noise.shape
base = template * noise_dict['max_activity']
base = base.reshape(dim_tr[0], dim_tr[1], dim_tr[2], 1)
mean_signal = (base[mask > 0]).mean()
target_snr = noise_dict['snr']
# Iterate through different parameters to fit SNR and SFNR
spat_sd_orig = np.copy(spatial_sd)
iteration = 0
for iteration in list(range(iterations)):
# Calculate the new metrics
new_snr = _calc_snr(noise, mask)
# Calculate the difference between the real and simulated data
diff_snr = abs(new_snr - target_snr) / target_snr
# If the AR is sufficiently close then break the loop
if diff_snr < fit_thresh:
logger.info('Terminated SNR fit after ' + str(
iteration) + ' iterations.')
break
# Convert the SFNR and SNR
spat_sd_new = mean_signal / new_snr
# Update the variable
spatial_sd -= ((spat_sd_new - spat_sd_orig) * fit_delta)
# Prevent these going out of range
if spatial_sd < 0 or np.isnan(spatial_sd):
spatial_sd = 10e-3
# Set up the machine noise
noise_system = _generate_noise_system(dimensions_tr=dim_tr,
spatial_sd=spatial_sd,
temporal_sd=temporal_sd,
)
# Sum up the noise of the brain
noise = base + (noise_temporal * temporal_sd) + noise_system
# Reject negative values (only happens outside of the brain)
noise[noise < 0] = 0
# Failed to converge
if iterations == 0:
logger.info('No fitting iterations were run')
elif iteration == iterations:
logger.warning('SNR failed to converge.')
# Return the updated noise
return noise, spatial_sd | python | def _fit_spatial(noise,
noise_temporal,
mask,
template,
spatial_sd,
temporal_sd,
noise_dict,
fit_thresh,
fit_delta,
iterations,
):
"""
Fit the noise model to match the SNR of the data
Parameters
----------
noise : multidimensional array, float
Initial estimate of the noise
noise_temporal : multidimensional array, float
The temporal noise that was generated by _generate_temporal_noise
tr_duration : float
What is the duration, in seconds, of each TR?
template : 3d array, float
A continuous (0 -> 1) volume describing the likelihood a voxel
is in the brain. This can be used to contrast the brain and non
brain.
mask : 3d array, binary
The mask of the brain volume, distinguishing brain from non-brain
spatial_sd : float
What is the standard deviation in space of the noise volume to be
generated
temporal_sd : float
What is the standard deviation in time of the noise volume to be
generated
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, all noise types
ending with sigma (e.g. motion sigma) are mixed together in
_generate_temporal_noise. These values describe the proportion of
mixing of these elements. However critically, SFNR is the
parameter that describes how much noise these components contribute
to the brain. If you set the noise dict to matched then it will
fit the parameters to match the participant as best as possible.
fit_thresh : float
What proportion of the target parameter value is sufficient
error to warrant finishing fit search.
fit_delta : float
How much are the parameters attenuated during the fitting process,
in terms of the proportion of difference between the target
parameter and the actual parameter
iterations : int
The first element is how many steps of fitting the SFNR and SNR
values will be performed. Usually converges after < 5. The
second element is the number of iterations for the AR fitting.
This is much more time consuming (has to make a new timecourse
on each iteration) so be careful about setting this appropriately.
Returns
-------
noise : multidimensional array, float
Generates the noise volume given these parameters
"""
# Pull out information that is needed
dim_tr = noise.shape
base = template * noise_dict['max_activity']
base = base.reshape(dim_tr[0], dim_tr[1], dim_tr[2], 1)
mean_signal = (base[mask > 0]).mean()
target_snr = noise_dict['snr']
# Iterate through different parameters to fit SNR and SFNR
spat_sd_orig = np.copy(spatial_sd)
iteration = 0
for iteration in list(range(iterations)):
# Calculate the new metrics
new_snr = _calc_snr(noise, mask)
# Calculate the difference between the real and simulated data
diff_snr = abs(new_snr - target_snr) / target_snr
# If the AR is sufficiently close then break the loop
if diff_snr < fit_thresh:
logger.info('Terminated SNR fit after ' + str(
iteration) + ' iterations.')
break
# Convert the SFNR and SNR
spat_sd_new = mean_signal / new_snr
# Update the variable
spatial_sd -= ((spat_sd_new - spat_sd_orig) * fit_delta)
# Prevent these going out of range
if spatial_sd < 0 or np.isnan(spatial_sd):
spatial_sd = 10e-3
# Set up the machine noise
noise_system = _generate_noise_system(dimensions_tr=dim_tr,
spatial_sd=spatial_sd,
temporal_sd=temporal_sd,
)
# Sum up the noise of the brain
noise = base + (noise_temporal * temporal_sd) + noise_system
# Reject negative values (only happens outside of the brain)
noise[noise < 0] = 0
# Failed to converge
if iterations == 0:
logger.info('No fitting iterations were run')
elif iteration == iterations:
logger.warning('SNR failed to converge.')
# Return the updated noise
return noise, spatial_sd | [
"def",
"_fit_spatial",
"(",
"noise",
",",
"noise_temporal",
",",
"mask",
",",
"template",
",",
"spatial_sd",
",",
"temporal_sd",
",",
"noise_dict",
",",
"fit_thresh",
",",
"fit_delta",
",",
"iterations",
",",
")",
":",
"# Pull out information that is needed",
"dim... | Fit the noise model to match the SNR of the data
Parameters
----------
noise : multidimensional array, float
Initial estimate of the noise
noise_temporal : multidimensional array, float
The temporal noise that was generated by _generate_temporal_noise
tr_duration : float
What is the duration, in seconds, of each TR?
template : 3d array, float
A continuous (0 -> 1) volume describing the likelihood a voxel
is in the brain. This can be used to contrast the brain and non
brain.
mask : 3d array, binary
The mask of the brain volume, distinguishing brain from non-brain
spatial_sd : float
What is the standard deviation in space of the noise volume to be
generated
temporal_sd : float
What is the standard deviation in time of the noise volume to be
generated
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, all noise types
ending with sigma (e.g. motion sigma) are mixed together in
_generate_temporal_noise. These values describe the proportion of
mixing of these elements. However critically, SFNR is the
parameter that describes how much noise these components contribute
to the brain. If you set the noise dict to matched then it will
fit the parameters to match the participant as best as possible.
fit_thresh : float
What proportion of the target parameter value is sufficient
error to warrant finishing fit search.
fit_delta : float
How much are the parameters attenuated during the fitting process,
in terms of the proportion of difference between the target
parameter and the actual parameter
iterations : int
The first element is how many steps of fitting the SFNR and SNR
values will be performed. Usually converges after < 5. The
second element is the number of iterations for the AR fitting.
This is much more time consuming (has to make a new timecourse
on each iteration) so be careful about setting this appropriately.
Returns
-------
noise : multidimensional array, float
Generates the noise volume given these parameters | [
"Fit",
"the",
"noise",
"model",
"to",
"match",
"the",
"SNR",
"of",
"the",
"data"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L2284-L2413 | train | 204,434 |
brainiak/brainiak | brainiak/utils/fmrisim.py | _fit_temporal | def _fit_temporal(noise,
mask,
template,
stimfunction_tr,
tr_duration,
spatial_sd,
temporal_proportion,
temporal_sd,
noise_dict,
fit_thresh,
fit_delta,
iterations,
):
"""
Fit the noise model to match the SFNR and AR of the data
Parameters
----------
noise : multidimensional array, float
Initial estimate of the noise
mask : 3d array, binary
The mask of the brain volume, distinguishing brain from non-brain
template : 3d array, float
A continuous (0 -> 1) volume describing the likelihood a voxel
is in the brain. This can be used to contrast the brain and non
brain.
stimfunction_tr : Iterable, list
When do the stimuli events occur. Each element is a TR
tr_duration : float
What is the duration, in seconds, of each TR?
spatial_sd : float
What is the standard deviation in space of the noise volume to be
generated
temporal_proportion, float
What is the proportion of the temporal variance (as specified by
the SFNR noise parameter) that is accounted for by the system
noise. If this number is high then all of the temporal
variability is due to system noise, if it is low then all of the
temporal variability is due to brain variability.
temporal_sd : float
What is the standard deviation in time of the noise volume to be
generated
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, all noise types
ending with sigma (e.g. motion sigma) are mixed together in
_generate_temporal_noise. These values describe the proportion of
mixing of these elements. However critically, SFNR is the
parameter that describes how much noise these components contribute
to the brain. If you set the noise dict to matched then it will
fit the parameters to match the participant as best as possible.
fit_thresh : float
What proportion of the target parameter value is sufficient
error to warrant finishing fit search.
fit_delta : float
How much are the parameters attenuated during the fitting process,
in terms of the proportion of difference between the target
parameter and the actual parameter
iterations : list, int
The first element is how many steps of fitting the SFNR and SNR
values will be performed. Usually converges after < 5. The
second element is the number of iterations for the AR fitting.
This is much more time consuming (has to make a new timecourse
on each iteration) so be careful about setting this appropriately.
Returns
-------
noise : multidimensional array, float
Generates the noise volume given these parameters
"""
# Pull out the
dim_tr = noise.shape
dim = dim_tr[0:3]
base = template * noise_dict['max_activity']
base = base.reshape(dim[0], dim[1], dim[2], 1)
mean_signal = (base[mask > 0]).mean()
# Iterate through different parameters to fit SNR and SFNR
temp_sd_orig = np.copy(temporal_sd)
# Make a copy of the dictionary so it can be modified
new_nd = copy.deepcopy(noise_dict)
# What SFNR do you want
target_sfnr = noise_dict['sfnr']
# What AR do you want?
target_ar = noise_dict['auto_reg_rho'][0]
# Iterate through different MA parameters to fit AR
for iteration in list(range(iterations)):
# If there are iterations left to perform then recalculate the
# metrics and try again
# Calculate the new SFNR
new_sfnr = _calc_sfnr(noise, mask)
# Calculate the AR
new_ar, _ = _calc_ARMA_noise(noise,
mask,
len(noise_dict['auto_reg_rho']),
len(noise_dict['ma_rho']),
)
# Calculate the difference between the real and simulated data
sfnr_diff = abs(new_sfnr - target_sfnr) / target_sfnr
# Calculate the difference in the first AR component
ar_diff = new_ar[0] - target_ar
# If the SFNR and AR is sufficiently close then break the loop
if (abs(ar_diff) / target_ar) < fit_thresh and sfnr_diff < fit_thresh:
msg = 'Terminated AR fit after ' + str(iteration) + ' iterations.'
logger.info(msg)
break
# Otherwise update the noise metrics. Get the new temporal noise value
temp_sd_new = mean_signal / new_sfnr
temporal_sd -= ((temp_sd_new - temp_sd_orig) * fit_delta)
# Prevent these going out of range
if temporal_sd < 0 or np.isnan(temporal_sd):
temporal_sd = 10e-3
# Set the new system noise
temp_sd_system_new = np.sqrt((temporal_sd ** 2) * temporal_proportion)
# Get the new AR value
new_nd['auto_reg_rho'][0] -= (ar_diff * fit_delta)
# Don't let the AR coefficient exceed 1
if new_nd['auto_reg_rho'][0] >= 1:
new_nd['auto_reg_rho'][0] = 0.99
# Generate the noise. The appropriate
noise_temporal = _generate_noise_temporal(stimfunction_tr,
tr_duration,
dim,
template,
mask,
new_nd,
)
# Set up the machine noise
noise_system = _generate_noise_system(dimensions_tr=dim_tr,
spatial_sd=spatial_sd,
temporal_sd=temp_sd_system_new,
)
# Sum up the noise of the brain
noise = base + (noise_temporal * temporal_sd) + noise_system
# Reject negative values (only happens outside of the brain)
noise[noise < 0] = 0
# Failed to converge
if iterations == 0:
logger.info('No fitting iterations were run')
elif iteration == iterations:
logger.warning('AR failed to converge.')
# Return the updated noise
return noise | python | def _fit_temporal(noise,
mask,
template,
stimfunction_tr,
tr_duration,
spatial_sd,
temporal_proportion,
temporal_sd,
noise_dict,
fit_thresh,
fit_delta,
iterations,
):
"""
Fit the noise model to match the SFNR and AR of the data
Parameters
----------
noise : multidimensional array, float
Initial estimate of the noise
mask : 3d array, binary
The mask of the brain volume, distinguishing brain from non-brain
template : 3d array, float
A continuous (0 -> 1) volume describing the likelihood a voxel
is in the brain. This can be used to contrast the brain and non
brain.
stimfunction_tr : Iterable, list
When do the stimuli events occur. Each element is a TR
tr_duration : float
What is the duration, in seconds, of each TR?
spatial_sd : float
What is the standard deviation in space of the noise volume to be
generated
temporal_proportion, float
What is the proportion of the temporal variance (as specified by
the SFNR noise parameter) that is accounted for by the system
noise. If this number is high then all of the temporal
variability is due to system noise, if it is low then all of the
temporal variability is due to brain variability.
temporal_sd : float
What is the standard deviation in time of the noise volume to be
generated
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, all noise types
ending with sigma (e.g. motion sigma) are mixed together in
_generate_temporal_noise. These values describe the proportion of
mixing of these elements. However critically, SFNR is the
parameter that describes how much noise these components contribute
to the brain. If you set the noise dict to matched then it will
fit the parameters to match the participant as best as possible.
fit_thresh : float
What proportion of the target parameter value is sufficient
error to warrant finishing fit search.
fit_delta : float
How much are the parameters attenuated during the fitting process,
in terms of the proportion of difference between the target
parameter and the actual parameter
iterations : list, int
The first element is how many steps of fitting the SFNR and SNR
values will be performed. Usually converges after < 5. The
second element is the number of iterations for the AR fitting.
This is much more time consuming (has to make a new timecourse
on each iteration) so be careful about setting this appropriately.
Returns
-------
noise : multidimensional array, float
Generates the noise volume given these parameters
"""
# Pull out the
dim_tr = noise.shape
dim = dim_tr[0:3]
base = template * noise_dict['max_activity']
base = base.reshape(dim[0], dim[1], dim[2], 1)
mean_signal = (base[mask > 0]).mean()
# Iterate through different parameters to fit SNR and SFNR
temp_sd_orig = np.copy(temporal_sd)
# Make a copy of the dictionary so it can be modified
new_nd = copy.deepcopy(noise_dict)
# What SFNR do you want
target_sfnr = noise_dict['sfnr']
# What AR do you want?
target_ar = noise_dict['auto_reg_rho'][0]
# Iterate through different MA parameters to fit AR
for iteration in list(range(iterations)):
# If there are iterations left to perform then recalculate the
# metrics and try again
# Calculate the new SFNR
new_sfnr = _calc_sfnr(noise, mask)
# Calculate the AR
new_ar, _ = _calc_ARMA_noise(noise,
mask,
len(noise_dict['auto_reg_rho']),
len(noise_dict['ma_rho']),
)
# Calculate the difference between the real and simulated data
sfnr_diff = abs(new_sfnr - target_sfnr) / target_sfnr
# Calculate the difference in the first AR component
ar_diff = new_ar[0] - target_ar
# If the SFNR and AR is sufficiently close then break the loop
if (abs(ar_diff) / target_ar) < fit_thresh and sfnr_diff < fit_thresh:
msg = 'Terminated AR fit after ' + str(iteration) + ' iterations.'
logger.info(msg)
break
# Otherwise update the noise metrics. Get the new temporal noise value
temp_sd_new = mean_signal / new_sfnr
temporal_sd -= ((temp_sd_new - temp_sd_orig) * fit_delta)
# Prevent these going out of range
if temporal_sd < 0 or np.isnan(temporal_sd):
temporal_sd = 10e-3
# Set the new system noise
temp_sd_system_new = np.sqrt((temporal_sd ** 2) * temporal_proportion)
# Get the new AR value
new_nd['auto_reg_rho'][0] -= (ar_diff * fit_delta)
# Don't let the AR coefficient exceed 1
if new_nd['auto_reg_rho'][0] >= 1:
new_nd['auto_reg_rho'][0] = 0.99
# Generate the noise. The appropriate
noise_temporal = _generate_noise_temporal(stimfunction_tr,
tr_duration,
dim,
template,
mask,
new_nd,
)
# Set up the machine noise
noise_system = _generate_noise_system(dimensions_tr=dim_tr,
spatial_sd=spatial_sd,
temporal_sd=temp_sd_system_new,
)
# Sum up the noise of the brain
noise = base + (noise_temporal * temporal_sd) + noise_system
# Reject negative values (only happens outside of the brain)
noise[noise < 0] = 0
# Failed to converge
if iterations == 0:
logger.info('No fitting iterations were run')
elif iteration == iterations:
logger.warning('AR failed to converge.')
# Return the updated noise
return noise | [
"def",
"_fit_temporal",
"(",
"noise",
",",
"mask",
",",
"template",
",",
"stimfunction_tr",
",",
"tr_duration",
",",
"spatial_sd",
",",
"temporal_proportion",
",",
"temporal_sd",
",",
"noise_dict",
",",
"fit_thresh",
",",
"fit_delta",
",",
"iterations",
",",
")"... | Fit the noise model to match the SFNR and AR of the data
Parameters
----------
noise : multidimensional array, float
Initial estimate of the noise
mask : 3d array, binary
The mask of the brain volume, distinguishing brain from non-brain
template : 3d array, float
A continuous (0 -> 1) volume describing the likelihood a voxel
is in the brain. This can be used to contrast the brain and non
brain.
stimfunction_tr : Iterable, list
When do the stimuli events occur. Each element is a TR
tr_duration : float
What is the duration, in seconds, of each TR?
spatial_sd : float
What is the standard deviation in space of the noise volume to be
generated
temporal_proportion, float
What is the proportion of the temporal variance (as specified by
the SFNR noise parameter) that is accounted for by the system
noise. If this number is high then all of the temporal
variability is due to system noise, if it is low then all of the
temporal variability is due to brain variability.
temporal_sd : float
What is the standard deviation in time of the noise volume to be
generated
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, all noise types
ending with sigma (e.g. motion sigma) are mixed together in
_generate_temporal_noise. These values describe the proportion of
mixing of these elements. However critically, SFNR is the
parameter that describes how much noise these components contribute
to the brain. If you set the noise dict to matched then it will
fit the parameters to match the participant as best as possible.
fit_thresh : float
What proportion of the target parameter value is sufficient
error to warrant finishing fit search.
fit_delta : float
How much are the parameters attenuated during the fitting process,
in terms of the proportion of difference between the target
parameter and the actual parameter
iterations : list, int
The first element is how many steps of fitting the SFNR and SNR
values will be performed. Usually converges after < 5. The
second element is the number of iterations for the AR fitting.
This is much more time consuming (has to make a new timecourse
on each iteration) so be careful about setting this appropriately.
Returns
-------
noise : multidimensional array, float
Generates the noise volume given these parameters | [
"Fit",
"the",
"noise",
"model",
"to",
"match",
"the",
"SFNR",
"and",
"AR",
"of",
"the",
"data"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/fmrisim.py#L2416-L2594 | train | 204,435 |
brainiak/brainiak | brainiak/io.py | load_images_from_dir | def load_images_from_dir(in_dir: Union[str, Path], suffix: str = "nii.gz",
) -> Iterable[SpatialImage]:
"""Load images from directory.
For efficiency, returns an iterator, not a sequence, so the results cannot
be accessed by indexing.
For every new iteration through the images, load_images_from_dir must be
called again.
Parameters
----------
in_dir:
Path to directory.
suffix:
Only load images with names that end like this.
Yields
------
SpatialImage
Image.
"""
if isinstance(in_dir, str):
in_dir = Path(in_dir)
files = sorted(in_dir.glob("*" + suffix))
for f in files:
logger.debug(
'Starting to read file %s', f
)
yield nib.load(str(f)) | python | def load_images_from_dir(in_dir: Union[str, Path], suffix: str = "nii.gz",
) -> Iterable[SpatialImage]:
"""Load images from directory.
For efficiency, returns an iterator, not a sequence, so the results cannot
be accessed by indexing.
For every new iteration through the images, load_images_from_dir must be
called again.
Parameters
----------
in_dir:
Path to directory.
suffix:
Only load images with names that end like this.
Yields
------
SpatialImage
Image.
"""
if isinstance(in_dir, str):
in_dir = Path(in_dir)
files = sorted(in_dir.glob("*" + suffix))
for f in files:
logger.debug(
'Starting to read file %s', f
)
yield nib.load(str(f)) | [
"def",
"load_images_from_dir",
"(",
"in_dir",
":",
"Union",
"[",
"str",
",",
"Path",
"]",
",",
"suffix",
":",
"str",
"=",
"\"nii.gz\"",
",",
")",
"->",
"Iterable",
"[",
"SpatialImage",
"]",
":",
"if",
"isinstance",
"(",
"in_dir",
",",
"str",
")",
":",
... | Load images from directory.
For efficiency, returns an iterator, not a sequence, so the results cannot
be accessed by indexing.
For every new iteration through the images, load_images_from_dir must be
called again.
Parameters
----------
in_dir:
Path to directory.
suffix:
Only load images with names that end like this.
Yields
------
SpatialImage
Image. | [
"Load",
"images",
"from",
"directory",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/io.py#L39-L68 | train | 204,436 |
brainiak/brainiak | brainiak/io.py | load_images | def load_images(image_paths: Iterable[Union[str, Path]]
) -> Iterable[SpatialImage]:
"""Load images from paths.
For efficiency, returns an iterator, not a sequence, so the results cannot
be accessed by indexing.
For every new iteration through the images, load_images must be called
again.
Parameters
----------
image_paths:
Paths to images.
Yields
------
SpatialImage
Image.
"""
for image_path in image_paths:
if isinstance(image_path, Path):
string_path = str(image_path)
else:
string_path = image_path
logger.debug(
'Starting to read file %s', string_path
)
yield nib.load(string_path) | python | def load_images(image_paths: Iterable[Union[str, Path]]
) -> Iterable[SpatialImage]:
"""Load images from paths.
For efficiency, returns an iterator, not a sequence, so the results cannot
be accessed by indexing.
For every new iteration through the images, load_images must be called
again.
Parameters
----------
image_paths:
Paths to images.
Yields
------
SpatialImage
Image.
"""
for image_path in image_paths:
if isinstance(image_path, Path):
string_path = str(image_path)
else:
string_path = image_path
logger.debug(
'Starting to read file %s', string_path
)
yield nib.load(string_path) | [
"def",
"load_images",
"(",
"image_paths",
":",
"Iterable",
"[",
"Union",
"[",
"str",
",",
"Path",
"]",
"]",
")",
"->",
"Iterable",
"[",
"SpatialImage",
"]",
":",
"for",
"image_path",
"in",
"image_paths",
":",
"if",
"isinstance",
"(",
"image_path",
",",
"... | Load images from paths.
For efficiency, returns an iterator, not a sequence, so the results cannot
be accessed by indexing.
For every new iteration through the images, load_images must be called
again.
Parameters
----------
image_paths:
Paths to images.
Yields
------
SpatialImage
Image. | [
"Load",
"images",
"from",
"paths",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/io.py#L71-L99 | train | 204,437 |
brainiak/brainiak | brainiak/io.py | load_boolean_mask | def load_boolean_mask(path: Union[str, Path],
predicate: Callable[[np.ndarray], np.ndarray] = None
) -> np.ndarray:
"""Load boolean nibabel.SpatialImage mask.
Parameters
----------
path
Mask path.
predicate
Callable used to create boolean values, e.g. a threshold function
``lambda x: x > 50``.
Returns
-------
np.ndarray
Boolean array corresponding to mask.
"""
if not isinstance(path, str):
path = str(path)
data = nib.load(path).get_data()
if predicate is not None:
mask = predicate(data)
else:
mask = data.astype(np.bool)
return mask | python | def load_boolean_mask(path: Union[str, Path],
predicate: Callable[[np.ndarray], np.ndarray] = None
) -> np.ndarray:
"""Load boolean nibabel.SpatialImage mask.
Parameters
----------
path
Mask path.
predicate
Callable used to create boolean values, e.g. a threshold function
``lambda x: x > 50``.
Returns
-------
np.ndarray
Boolean array corresponding to mask.
"""
if not isinstance(path, str):
path = str(path)
data = nib.load(path).get_data()
if predicate is not None:
mask = predicate(data)
else:
mask = data.astype(np.bool)
return mask | [
"def",
"load_boolean_mask",
"(",
"path",
":",
"Union",
"[",
"str",
",",
"Path",
"]",
",",
"predicate",
":",
"Callable",
"[",
"[",
"np",
".",
"ndarray",
"]",
",",
"np",
".",
"ndarray",
"]",
"=",
"None",
")",
"->",
"np",
".",
"ndarray",
":",
"if",
... | Load boolean nibabel.SpatialImage mask.
Parameters
----------
path
Mask path.
predicate
Callable used to create boolean values, e.g. a threshold function
``lambda x: x > 50``.
Returns
-------
np.ndarray
Boolean array corresponding to mask. | [
"Load",
"boolean",
"nibabel",
".",
"SpatialImage",
"mask",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/io.py#L102-L127 | train | 204,438 |
brainiak/brainiak | brainiak/io.py | load_labels | def load_labels(path: Union[str, Path]) -> List[SingleConditionSpec]:
"""Load labels files.
Parameters
----------
path
Path of labels file.
Returns
-------
List[SingleConditionSpec]
List of SingleConditionSpec stored in labels file.
"""
condition_specs = np.load(str(path))
return [c.view(SingleConditionSpec) for c in condition_specs] | python | def load_labels(path: Union[str, Path]) -> List[SingleConditionSpec]:
"""Load labels files.
Parameters
----------
path
Path of labels file.
Returns
-------
List[SingleConditionSpec]
List of SingleConditionSpec stored in labels file.
"""
condition_specs = np.load(str(path))
return [c.view(SingleConditionSpec) for c in condition_specs] | [
"def",
"load_labels",
"(",
"path",
":",
"Union",
"[",
"str",
",",
"Path",
"]",
")",
"->",
"List",
"[",
"SingleConditionSpec",
"]",
":",
"condition_specs",
"=",
"np",
".",
"load",
"(",
"str",
"(",
"path",
")",
")",
"return",
"[",
"c",
".",
"view",
"... | Load labels files.
Parameters
----------
path
Path of labels file.
Returns
-------
List[SingleConditionSpec]
List of SingleConditionSpec stored in labels file. | [
"Load",
"labels",
"files",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/io.py#L130-L144 | train | 204,439 |
brainiak/brainiak | brainiak/io.py | save_as_nifti_file | def save_as_nifti_file(data: np.ndarray, affine: np.ndarray,
path: Union[str, Path]) -> None:
"""Create a Nifti file and save it.
Parameters
----------
data
Brain data.
affine
Affine of the image, usually inherited from an existing image.
path
Output filename.
"""
if not isinstance(path, str):
path = str(path)
img = Nifti1Pair(data, affine)
nib.nifti1.save(img, path) | python | def save_as_nifti_file(data: np.ndarray, affine: np.ndarray,
path: Union[str, Path]) -> None:
"""Create a Nifti file and save it.
Parameters
----------
data
Brain data.
affine
Affine of the image, usually inherited from an existing image.
path
Output filename.
"""
if not isinstance(path, str):
path = str(path)
img = Nifti1Pair(data, affine)
nib.nifti1.save(img, path) | [
"def",
"save_as_nifti_file",
"(",
"data",
":",
"np",
".",
"ndarray",
",",
"affine",
":",
"np",
".",
"ndarray",
",",
"path",
":",
"Union",
"[",
"str",
",",
"Path",
"]",
")",
"->",
"None",
":",
"if",
"not",
"isinstance",
"(",
"path",
",",
"str",
")",... | Create a Nifti file and save it.
Parameters
----------
data
Brain data.
affine
Affine of the image, usually inherited from an existing image.
path
Output filename. | [
"Create",
"a",
"Nifti",
"file",
"and",
"save",
"it",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/io.py#L147-L163 | train | 204,440 |
brainiak/brainiak | brainiak/factoranalysis/htfa.py | HTFA._mse_converged | def _mse_converged(self):
"""Check convergence based on mean squared difference between
prior and posterior
Returns
-------
converged : boolean
Whether the parameter estimation converged.
mse : float
Mean squared error between prior and posterior.
"""
prior = self.global_prior_[0:self.prior_size]
posterior = self.global_posterior_[0:self.prior_size]
mse = mean_squared_error(prior, posterior,
multioutput='uniform_average')
if mse > self.threshold:
return False, mse
else:
return True, mse | python | def _mse_converged(self):
"""Check convergence based on mean squared difference between
prior and posterior
Returns
-------
converged : boolean
Whether the parameter estimation converged.
mse : float
Mean squared error between prior and posterior.
"""
prior = self.global_prior_[0:self.prior_size]
posterior = self.global_posterior_[0:self.prior_size]
mse = mean_squared_error(prior, posterior,
multioutput='uniform_average')
if mse > self.threshold:
return False, mse
else:
return True, mse | [
"def",
"_mse_converged",
"(",
"self",
")",
":",
"prior",
"=",
"self",
".",
"global_prior_",
"[",
"0",
":",
"self",
".",
"prior_size",
"]",
"posterior",
"=",
"self",
".",
"global_posterior_",
"[",
"0",
":",
"self",
".",
"prior_size",
"]",
"mse",
"=",
"m... | Check convergence based on mean squared difference between
prior and posterior
Returns
-------
converged : boolean
Whether the parameter estimation converged.
mse : float
Mean squared error between prior and posterior. | [
"Check",
"convergence",
"based",
"on",
"mean",
"squared",
"difference",
"between",
"prior",
"and",
"posterior"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/htfa.py#L222-L244 | train | 204,441 |
brainiak/brainiak | brainiak/factoranalysis/htfa.py | HTFA._get_gather_offset | def _get_gather_offset(self, size):
"""Calculate the offset for gather result from this process
Parameters
----------
size : int
The total number of process.
Returns
-------
tuple_size : tuple_int
Number of elements to send from each process
(one integer for each process)
tuple_offset : tuple_int
Number of elements away from the first element
in the array at which to begin the new, segmented
array for a process
(one integer for each process)
subject_map : dictionary
Mapping between global subject id to local id
"""
gather_size = np.zeros(size).astype(int)
gather_offset = np.zeros(size).astype(int)
num_local_subjs = np.zeros(size).astype(int)
subject_map = {}
for idx, s in enumerate(np.arange(self.n_subj)):
cur_rank = idx % size
gather_size[cur_rank] += self.prior_size
subject_map[idx] = (cur_rank, num_local_subjs[cur_rank])
num_local_subjs[cur_rank] += 1
for idx in np.arange(size - 1) + 1:
gather_offset[idx] = gather_offset[idx - 1] + gather_size[idx - 1]
tuple_size = tuple(gather_size)
tuple_offset = tuple(gather_offset)
return tuple_size, tuple_offset, subject_map | python | def _get_gather_offset(self, size):
"""Calculate the offset for gather result from this process
Parameters
----------
size : int
The total number of process.
Returns
-------
tuple_size : tuple_int
Number of elements to send from each process
(one integer for each process)
tuple_offset : tuple_int
Number of elements away from the first element
in the array at which to begin the new, segmented
array for a process
(one integer for each process)
subject_map : dictionary
Mapping between global subject id to local id
"""
gather_size = np.zeros(size).astype(int)
gather_offset = np.zeros(size).astype(int)
num_local_subjs = np.zeros(size).astype(int)
subject_map = {}
for idx, s in enumerate(np.arange(self.n_subj)):
cur_rank = idx % size
gather_size[cur_rank] += self.prior_size
subject_map[idx] = (cur_rank, num_local_subjs[cur_rank])
num_local_subjs[cur_rank] += 1
for idx in np.arange(size - 1) + 1:
gather_offset[idx] = gather_offset[idx - 1] + gather_size[idx - 1]
tuple_size = tuple(gather_size)
tuple_offset = tuple(gather_offset)
return tuple_size, tuple_offset, subject_map | [
"def",
"_get_gather_offset",
"(",
"self",
",",
"size",
")",
":",
"gather_size",
"=",
"np",
".",
"zeros",
"(",
"size",
")",
".",
"astype",
"(",
"int",
")",
"gather_offset",
"=",
"np",
".",
"zeros",
"(",
"size",
")",
".",
"astype",
"(",
"int",
")",
"... | Calculate the offset for gather result from this process
Parameters
----------
size : int
The total number of process.
Returns
-------
tuple_size : tuple_int
Number of elements to send from each process
(one integer for each process)
tuple_offset : tuple_int
Number of elements away from the first element
in the array at which to begin the new, segmented
array for a process
(one integer for each process)
subject_map : dictionary
Mapping between global subject id to local id | [
"Calculate",
"the",
"offset",
"for",
"gather",
"result",
"from",
"this",
"process"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/htfa.py#L343-L386 | train | 204,442 |
brainiak/brainiak | brainiak/factoranalysis/htfa.py | HTFA._get_weight_size | def _get_weight_size(self, data, n_local_subj):
"""Calculate the size of weight for this process
Parameters
----------
data : a list of 2D array, each in shape [n_voxel, n_tr]
The fMRI data from multi-subject.
n_local_subj : int
Number of subjects allocated to this process.
Returns
-------
weight_size : 1D array
The size of total subject weight on this process.
local_weight_offset : 1D array
Number of elements away from the first element
in the combined weight array at which to begin
the new, segmented array for a subject
"""
weight_size = np.zeros(1).astype(int)
local_weight_offset = np.zeros(n_local_subj).astype(int)
for idx, subj_data in enumerate(data):
if idx > 0:
local_weight_offset[idx] = weight_size[0]
weight_size[0] += self.K * subj_data.shape[1]
return weight_size, local_weight_offset | python | def _get_weight_size(self, data, n_local_subj):
"""Calculate the size of weight for this process
Parameters
----------
data : a list of 2D array, each in shape [n_voxel, n_tr]
The fMRI data from multi-subject.
n_local_subj : int
Number of subjects allocated to this process.
Returns
-------
weight_size : 1D array
The size of total subject weight on this process.
local_weight_offset : 1D array
Number of elements away from the first element
in the combined weight array at which to begin
the new, segmented array for a subject
"""
weight_size = np.zeros(1).astype(int)
local_weight_offset = np.zeros(n_local_subj).astype(int)
for idx, subj_data in enumerate(data):
if idx > 0:
local_weight_offset[idx] = weight_size[0]
weight_size[0] += self.K * subj_data.shape[1]
return weight_size, local_weight_offset | [
"def",
"_get_weight_size",
"(",
"self",
",",
"data",
",",
"n_local_subj",
")",
":",
"weight_size",
"=",
"np",
".",
"zeros",
"(",
"1",
")",
".",
"astype",
"(",
"int",
")",
"local_weight_offset",
"=",
"np",
".",
"zeros",
"(",
"n_local_subj",
")",
".",
"a... | Calculate the size of weight for this process
Parameters
----------
data : a list of 2D array, each in shape [n_voxel, n_tr]
The fMRI data from multi-subject.
n_local_subj : int
Number of subjects allocated to this process.
Returns
-------
weight_size : 1D array
The size of total subject weight on this process.
local_weight_offset : 1D array
Number of elements away from the first element
in the combined weight array at which to begin
the new, segmented array for a subject | [
"Calculate",
"the",
"size",
"of",
"weight",
"for",
"this",
"process"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/htfa.py#L388-L420 | train | 204,443 |
brainiak/brainiak | brainiak/factoranalysis/htfa.py | HTFA._get_subject_info | def _get_subject_info(self, n_local_subj, data):
"""Calculate metadata for subjects allocated to this process
Parameters
----------
n_local_subj : int
Number of subjects allocated to this process.
data : list of 2D array. Each in shape [n_voxel, n_tr]
Total number of MPI process.
Returns
-------
max_sample_tr : 1D array
Maximum number of TR to subsample for each subject
max_sample_voxel : 1D array
Maximum number of voxel to subsample for each subject
"""
max_sample_tr = np.zeros(n_local_subj).astype(int)
max_sample_voxel = np.zeros(n_local_subj).astype(int)
for idx in np.arange(n_local_subj):
nvoxel = data[idx].shape[0]
ntr = data[idx].shape[1]
max_sample_voxel[idx] =\
min(self.max_voxel, int(self.voxel_ratio * nvoxel))
max_sample_tr[idx] = min(self.max_tr, int(self.tr_ratio * ntr))
return max_sample_tr, max_sample_voxel | python | def _get_subject_info(self, n_local_subj, data):
"""Calculate metadata for subjects allocated to this process
Parameters
----------
n_local_subj : int
Number of subjects allocated to this process.
data : list of 2D array. Each in shape [n_voxel, n_tr]
Total number of MPI process.
Returns
-------
max_sample_tr : 1D array
Maximum number of TR to subsample for each subject
max_sample_voxel : 1D array
Maximum number of voxel to subsample for each subject
"""
max_sample_tr = np.zeros(n_local_subj).astype(int)
max_sample_voxel = np.zeros(n_local_subj).astype(int)
for idx in np.arange(n_local_subj):
nvoxel = data[idx].shape[0]
ntr = data[idx].shape[1]
max_sample_voxel[idx] =\
min(self.max_voxel, int(self.voxel_ratio * nvoxel))
max_sample_tr[idx] = min(self.max_tr, int(self.tr_ratio * ntr))
return max_sample_tr, max_sample_voxel | [
"def",
"_get_subject_info",
"(",
"self",
",",
"n_local_subj",
",",
"data",
")",
":",
"max_sample_tr",
"=",
"np",
".",
"zeros",
"(",
"n_local_subj",
")",
".",
"astype",
"(",
"int",
")",
"max_sample_voxel",
"=",
"np",
".",
"zeros",
"(",
"n_local_subj",
")",
... | Calculate metadata for subjects allocated to this process
Parameters
----------
n_local_subj : int
Number of subjects allocated to this process.
data : list of 2D array. Each in shape [n_voxel, n_tr]
Total number of MPI process.
Returns
-------
max_sample_tr : 1D array
Maximum number of TR to subsample for each subject
max_sample_voxel : 1D array
Maximum number of voxel to subsample for each subject | [
"Calculate",
"metadata",
"for",
"subjects",
"allocated",
"to",
"this",
"process"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/htfa.py#L422-L453 | train | 204,444 |
brainiak/brainiak | brainiak/factoranalysis/htfa.py | HTFA._get_mpi_info | def _get_mpi_info(self):
"""get basic MPI info
Returns
-------
comm : Intracomm
Returns MPI communication group
rank : integer
Returns the rank of this process
size : integer
Returns total number of processes
"""
rank = self.comm.Get_rank()
size = self.comm.Get_size()
return rank, size | python | def _get_mpi_info(self):
"""get basic MPI info
Returns
-------
comm : Intracomm
Returns MPI communication group
rank : integer
Returns the rank of this process
size : integer
Returns total number of processes
"""
rank = self.comm.Get_rank()
size = self.comm.Get_size()
return rank, size | [
"def",
"_get_mpi_info",
"(",
"self",
")",
":",
"rank",
"=",
"self",
".",
"comm",
".",
"Get_rank",
"(",
")",
"size",
"=",
"self",
".",
"comm",
".",
"Get_size",
"(",
")",
"return",
"rank",
",",
"size"
] | get basic MPI info
Returns
-------
comm : Intracomm
Returns MPI communication group
rank : integer
Returns the rank of this process
size : integer
Returns total number of processes | [
"get",
"basic",
"MPI",
"info"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/htfa.py#L455-L473 | train | 204,445 |
brainiak/brainiak | brainiak/factoranalysis/htfa.py | HTFA._init_prior_posterior | def _init_prior_posterior(self, rank, R, n_local_subj):
"""set prior for this subject
Parameters
----------
rank : integer
The rank of this process
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
n_local_subj : integer
The number of subjects allocated to this process.
Returns
-------
HTFA
Returns the instance itself.
"""
if rank == 0:
idx = np.random.choice(n_local_subj, 1)
self.global_prior_, self.global_centers_cov,\
self.global_widths_var = self.get_template(R[idx[0]])
self.global_centers_cov_scaled =\
self.global_centers_cov / float(self.n_subj)
self.global_widths_var_scaled =\
self.global_widths_var / float(self.n_subj)
self.gather_posterior = np.zeros(self.n_subj * self.prior_size)
self.global_posterior_ = np.zeros(self.prior_size)
else:
self.global_prior_ = np.zeros(self.prior_bcast_size)
self.global_posterior_ = None
self.gather_posterior = None
return self | python | def _init_prior_posterior(self, rank, R, n_local_subj):
"""set prior for this subject
Parameters
----------
rank : integer
The rank of this process
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
n_local_subj : integer
The number of subjects allocated to this process.
Returns
-------
HTFA
Returns the instance itself.
"""
if rank == 0:
idx = np.random.choice(n_local_subj, 1)
self.global_prior_, self.global_centers_cov,\
self.global_widths_var = self.get_template(R[idx[0]])
self.global_centers_cov_scaled =\
self.global_centers_cov / float(self.n_subj)
self.global_widths_var_scaled =\
self.global_widths_var / float(self.n_subj)
self.gather_posterior = np.zeros(self.n_subj * self.prior_size)
self.global_posterior_ = np.zeros(self.prior_size)
else:
self.global_prior_ = np.zeros(self.prior_bcast_size)
self.global_posterior_ = None
self.gather_posterior = None
return self | [
"def",
"_init_prior_posterior",
"(",
"self",
",",
"rank",
",",
"R",
",",
"n_local_subj",
")",
":",
"if",
"rank",
"==",
"0",
":",
"idx",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"n_local_subj",
",",
"1",
")",
"self",
".",
"global_prior_",
",",
"s... | set prior for this subject
Parameters
----------
rank : integer
The rank of this process
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
n_local_subj : integer
The number of subjects allocated to this process.
Returns
-------
HTFA
Returns the instance itself. | [
"set",
"prior",
"for",
"this",
"subject"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/htfa.py#L475-L513 | train | 204,446 |
brainiak/brainiak | brainiak/factoranalysis/htfa.py | HTFA._assign_posterior | def _assign_posterior(self):
"""assign posterior to the right prior based on
Hungarian algorithm
Returns
-------
HTFA
Returns the instance itself.
"""
prior_centers = self.get_centers(self.global_prior_)
posterior_centers = self.get_centers(self.global_posterior_)
posterior_widths = self.get_widths(self.global_posterior_)
posterior_centers_mean_cov =\
self.get_centers_mean_cov(self.global_posterior_)
posterior_widths_mean_var =\
self.get_widths_mean_var(self.global_posterior_)
# linear assignment on centers
cost = distance.cdist(prior_centers, posterior_centers, 'euclidean')
_, col_ind = linear_sum_assignment(cost)
# reorder centers/widths based on cost assignment
self.set_centers(self.global_posterior_, posterior_centers)
self.set_widths(self.global_posterior_, posterior_widths)
# reorder cov/var based on cost assignment
self.set_centers_mean_cov(
self.global_posterior_,
posterior_centers_mean_cov[col_ind])
self.set_widths_mean_var(
self.global_posterior_,
posterior_widths_mean_var[col_ind])
return self | python | def _assign_posterior(self):
"""assign posterior to the right prior based on
Hungarian algorithm
Returns
-------
HTFA
Returns the instance itself.
"""
prior_centers = self.get_centers(self.global_prior_)
posterior_centers = self.get_centers(self.global_posterior_)
posterior_widths = self.get_widths(self.global_posterior_)
posterior_centers_mean_cov =\
self.get_centers_mean_cov(self.global_posterior_)
posterior_widths_mean_var =\
self.get_widths_mean_var(self.global_posterior_)
# linear assignment on centers
cost = distance.cdist(prior_centers, posterior_centers, 'euclidean')
_, col_ind = linear_sum_assignment(cost)
# reorder centers/widths based on cost assignment
self.set_centers(self.global_posterior_, posterior_centers)
self.set_widths(self.global_posterior_, posterior_widths)
# reorder cov/var based on cost assignment
self.set_centers_mean_cov(
self.global_posterior_,
posterior_centers_mean_cov[col_ind])
self.set_widths_mean_var(
self.global_posterior_,
posterior_widths_mean_var[col_ind])
return self | [
"def",
"_assign_posterior",
"(",
"self",
")",
":",
"prior_centers",
"=",
"self",
".",
"get_centers",
"(",
"self",
".",
"global_prior_",
")",
"posterior_centers",
"=",
"self",
".",
"get_centers",
"(",
"self",
".",
"global_posterior_",
")",
"posterior_widths",
"="... | assign posterior to the right prior based on
Hungarian algorithm
Returns
-------
HTFA
Returns the instance itself. | [
"assign",
"posterior",
"to",
"the",
"right",
"prior",
"based",
"on",
"Hungarian",
"algorithm"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/htfa.py#L560-L590 | train | 204,447 |
brainiak/brainiak | brainiak/factoranalysis/htfa.py | HTFA._update_global_posterior | def _update_global_posterior(
self, rank, m, outer_converged):
"""Update global posterior and then check convergence
Parameters
----------
rank : integer
The rank of current process.
m : integer
The outer iteration number of HTFA.
outer_converged : 1D array
Record whether HTFA loop converged
Returns
-------
1D array, contains only 1 element for MPI
1 means HTFA converged, 0 means not converged.
"""
if rank == 0:
self._map_update_posterior()
self._assign_posterior()
is_converged, _ = self._converged()
if is_converged:
logger.info("converged at %d outer iter" % (m))
outer_converged[0] = 1
else:
self.global_prior_ = self.global_posterior_
return outer_converged | python | def _update_global_posterior(
self, rank, m, outer_converged):
"""Update global posterior and then check convergence
Parameters
----------
rank : integer
The rank of current process.
m : integer
The outer iteration number of HTFA.
outer_converged : 1D array
Record whether HTFA loop converged
Returns
-------
1D array, contains only 1 element for MPI
1 means HTFA converged, 0 means not converged.
"""
if rank == 0:
self._map_update_posterior()
self._assign_posterior()
is_converged, _ = self._converged()
if is_converged:
logger.info("converged at %d outer iter" % (m))
outer_converged[0] = 1
else:
self.global_prior_ = self.global_posterior_
return outer_converged | [
"def",
"_update_global_posterior",
"(",
"self",
",",
"rank",
",",
"m",
",",
"outer_converged",
")",
":",
"if",
"rank",
"==",
"0",
":",
"self",
".",
"_map_update_posterior",
"(",
")",
"self",
".",
"_assign_posterior",
"(",
")",
"is_converged",
",",
"_",
"="... | Update global posterior and then check convergence
Parameters
----------
rank : integer
The rank of current process.
m : integer
The outer iteration number of HTFA.
outer_converged : 1D array
Record whether HTFA loop converged
Returns
-------
1D array, contains only 1 element for MPI
1 means HTFA converged, 0 means not converged. | [
"Update",
"global",
"posterior",
"and",
"then",
"check",
"convergence"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/htfa.py#L592-L624 | train | 204,448 |
brainiak/brainiak | brainiak/factoranalysis/htfa.py | HTFA._update_weight | def _update_weight(self, data, R, n_local_subj, local_weight_offset):
"""update local weight
Parameters
----------
data : list of 2D array, element i has shape=[n_voxel, n_tr]
Subjects' fMRI data.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
n_local_subj : integer
Number of subjects allocated to this process.
local_weight_offset : 1D array
Offset of each subject's weights on this process.
Returns
-------
HTFA
Returns the instance itself.
"""
for s, subj_data in enumerate(data):
base = s * self.prior_size
centers = self.local_posterior_[base:base + self.K * self.n_dim]\
.reshape((self.K, self.n_dim))
start_idx = base + self.K * self.n_dim
end_idx = base + self.prior_size
widths = self.local_posterior_[start_idx:end_idx]\
.reshape((self.K, 1))
unique_R, inds = self.get_unique_R(R[s])
F = self.get_factors(unique_R, inds, centers, widths)
start_idx = local_weight_offset[s]
if s == n_local_subj - 1:
self.local_weights_[start_idx:] =\
self.get_weights(subj_data, F).ravel()
else:
end_idx = local_weight_offset[s + 1]
self.local_weights_[start_idx:end_idx] =\
self.get_weights(subj_data, F).ravel()
return self | python | def _update_weight(self, data, R, n_local_subj, local_weight_offset):
"""update local weight
Parameters
----------
data : list of 2D array, element i has shape=[n_voxel, n_tr]
Subjects' fMRI data.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
n_local_subj : integer
Number of subjects allocated to this process.
local_weight_offset : 1D array
Offset of each subject's weights on this process.
Returns
-------
HTFA
Returns the instance itself.
"""
for s, subj_data in enumerate(data):
base = s * self.prior_size
centers = self.local_posterior_[base:base + self.K * self.n_dim]\
.reshape((self.K, self.n_dim))
start_idx = base + self.K * self.n_dim
end_idx = base + self.prior_size
widths = self.local_posterior_[start_idx:end_idx]\
.reshape((self.K, 1))
unique_R, inds = self.get_unique_R(R[s])
F = self.get_factors(unique_R, inds, centers, widths)
start_idx = local_weight_offset[s]
if s == n_local_subj - 1:
self.local_weights_[start_idx:] =\
self.get_weights(subj_data, F).ravel()
else:
end_idx = local_weight_offset[s + 1]
self.local_weights_[start_idx:end_idx] =\
self.get_weights(subj_data, F).ravel()
return self | [
"def",
"_update_weight",
"(",
"self",
",",
"data",
",",
"R",
",",
"n_local_subj",
",",
"local_weight_offset",
")",
":",
"for",
"s",
",",
"subj_data",
"in",
"enumerate",
"(",
"data",
")",
":",
"base",
"=",
"s",
"*",
"self",
".",
"prior_size",
"centers",
... | update local weight
Parameters
----------
data : list of 2D array, element i has shape=[n_voxel, n_tr]
Subjects' fMRI data.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
n_local_subj : integer
Number of subjects allocated to this process.
local_weight_offset : 1D array
Offset of each subject's weights on this process.
Returns
-------
HTFA
Returns the instance itself. | [
"update",
"local",
"weight"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/htfa.py#L626-L670 | train | 204,449 |
brainiak/brainiak | brainiak/factoranalysis/htfa.py | HTFA._fit_htfa | def _fit_htfa(self, data, R):
"""HTFA main algorithm
Parameters
----------
data : list of 2D array. Each in shape [n_voxel, n_tr]
The fMRI data from multiple subjects.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
Returns
-------
HTFA
Returns the instance itself.
"""
rank, size = self._get_mpi_info()
use_gather = True if self.n_subj % size == 0 else False
n_local_subj = len(R)
max_sample_tr, max_sample_voxel =\
self._get_subject_info(n_local_subj, data)
tfa = []
# init tfa for each subject
for s, subj_data in enumerate(data):
tfa.append(TFA(
max_iter=self.max_local_iter,
threshold=self.threshold,
K=self.K,
nlss_method=self.nlss_method,
nlss_loss=self.nlss_loss,
x_scale=self.x_scale,
tr_solver=self.tr_solver,
weight_method=self.weight_method,
upper_ratio=self.upper_ratio,
lower_ratio=self.lower_ratio,
verbose=self.verbose,
max_num_tr=max_sample_tr[s],
max_num_voxel=max_sample_voxel[s]))
# map data to processes
gather_size, gather_offset, subject_map =\
self._get_gather_offset(size)
self.local_posterior_ = np.zeros(n_local_subj * self.prior_size)
self._init_prior_posterior(rank, R, n_local_subj)
node_weight_size, local_weight_offset =\
self._get_weight_size(data, n_local_subj)
self.local_weights_ = np.zeros(node_weight_size[0])
m = 0
outer_converged = np.array([0])
while m < self.max_global_iter and not outer_converged[0]:
if(self.verbose):
logger.info("HTFA global iter %d " % (m))
# root broadcast first 4 fields of global_prior to all nodes
self.comm.Bcast(self.global_prior_, root=0)
# each node loop over its data
for s, subj_data in enumerate(data):
# update tfa with current local prior
tfa[s].set_prior(self.global_prior_[0:self.prior_size].copy())
tfa[s].set_seed(m * self.max_local_iter)
tfa[s].fit(
subj_data,
R=R[s],
template_prior=self.global_prior_.copy())
tfa[s]._assign_posterior()
start_idx = s * self.prior_size
end_idx = (s + 1) * self.prior_size
self.local_posterior_[start_idx:end_idx] =\
tfa[s].local_posterior_
self._gather_local_posterior(
use_gather,
gather_size,
gather_offset)
# root updates global_posterior
outer_converged =\
self._update_global_posterior(rank, m, outer_converged)
self.comm.Bcast(outer_converged, root=0)
m += 1
# update weight matrix for each subject
self._update_weight(
data,
R,
n_local_subj,
local_weight_offset)
return self | python | def _fit_htfa(self, data, R):
"""HTFA main algorithm
Parameters
----------
data : list of 2D array. Each in shape [n_voxel, n_tr]
The fMRI data from multiple subjects.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
Returns
-------
HTFA
Returns the instance itself.
"""
rank, size = self._get_mpi_info()
use_gather = True if self.n_subj % size == 0 else False
n_local_subj = len(R)
max_sample_tr, max_sample_voxel =\
self._get_subject_info(n_local_subj, data)
tfa = []
# init tfa for each subject
for s, subj_data in enumerate(data):
tfa.append(TFA(
max_iter=self.max_local_iter,
threshold=self.threshold,
K=self.K,
nlss_method=self.nlss_method,
nlss_loss=self.nlss_loss,
x_scale=self.x_scale,
tr_solver=self.tr_solver,
weight_method=self.weight_method,
upper_ratio=self.upper_ratio,
lower_ratio=self.lower_ratio,
verbose=self.verbose,
max_num_tr=max_sample_tr[s],
max_num_voxel=max_sample_voxel[s]))
# map data to processes
gather_size, gather_offset, subject_map =\
self._get_gather_offset(size)
self.local_posterior_ = np.zeros(n_local_subj * self.prior_size)
self._init_prior_posterior(rank, R, n_local_subj)
node_weight_size, local_weight_offset =\
self._get_weight_size(data, n_local_subj)
self.local_weights_ = np.zeros(node_weight_size[0])
m = 0
outer_converged = np.array([0])
while m < self.max_global_iter and not outer_converged[0]:
if(self.verbose):
logger.info("HTFA global iter %d " % (m))
# root broadcast first 4 fields of global_prior to all nodes
self.comm.Bcast(self.global_prior_, root=0)
# each node loop over its data
for s, subj_data in enumerate(data):
# update tfa with current local prior
tfa[s].set_prior(self.global_prior_[0:self.prior_size].copy())
tfa[s].set_seed(m * self.max_local_iter)
tfa[s].fit(
subj_data,
R=R[s],
template_prior=self.global_prior_.copy())
tfa[s]._assign_posterior()
start_idx = s * self.prior_size
end_idx = (s + 1) * self.prior_size
self.local_posterior_[start_idx:end_idx] =\
tfa[s].local_posterior_
self._gather_local_posterior(
use_gather,
gather_size,
gather_offset)
# root updates global_posterior
outer_converged =\
self._update_global_posterior(rank, m, outer_converged)
self.comm.Bcast(outer_converged, root=0)
m += 1
# update weight matrix for each subject
self._update_weight(
data,
R,
n_local_subj,
local_weight_offset)
return self | [
"def",
"_fit_htfa",
"(",
"self",
",",
"data",
",",
"R",
")",
":",
"rank",
",",
"size",
"=",
"self",
".",
"_get_mpi_info",
"(",
")",
"use_gather",
"=",
"True",
"if",
"self",
".",
"n_subj",
"%",
"size",
"==",
"0",
"else",
"False",
"n_local_subj",
"=",
... | HTFA main algorithm
Parameters
----------
data : list of 2D array. Each in shape [n_voxel, n_tr]
The fMRI data from multiple subjects.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
Returns
-------
HTFA
Returns the instance itself. | [
"HTFA",
"main",
"algorithm"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/htfa.py#L672-L764 | train | 204,450 |
brainiak/brainiak | brainiak/factoranalysis/htfa.py | HTFA._check_input | def _check_input(self, X, R):
"""Check whether input data and coordinates in right type
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
Returns
-------
HTFA
Returns the instance itself.
"""
# Check data type
if not isinstance(X, list):
raise TypeError("Input data should be a list")
if not isinstance(R, list):
raise TypeError("Coordinates should be a list")
# Check the number of subjects
if len(X) < 1:
raise ValueError("Need at leat one subject to train the model.\
Got {0:d}".format(len(X)))
for idx, x in enumerate(X):
if not isinstance(x, np.ndarray):
raise TypeError("Each subject data should be an array")
if x.ndim != 2:
raise TypeError("Each subject data should be 2D array")
if not isinstance(R[idx], np.ndarray):
raise TypeError(
"Each scanner coordinate matrix should be an array")
if R[idx].ndim != 2:
raise TypeError(
"Each scanner coordinate matrix should be 2D array")
if x.shape[0] != R[idx].shape[0]:
raise TypeError(
"n_voxel should be the same in X[idx] and R[idx]")
return self | python | def _check_input(self, X, R):
"""Check whether input data and coordinates in right type
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
Returns
-------
HTFA
Returns the instance itself.
"""
# Check data type
if not isinstance(X, list):
raise TypeError("Input data should be a list")
if not isinstance(R, list):
raise TypeError("Coordinates should be a list")
# Check the number of subjects
if len(X) < 1:
raise ValueError("Need at leat one subject to train the model.\
Got {0:d}".format(len(X)))
for idx, x in enumerate(X):
if not isinstance(x, np.ndarray):
raise TypeError("Each subject data should be an array")
if x.ndim != 2:
raise TypeError("Each subject data should be 2D array")
if not isinstance(R[idx], np.ndarray):
raise TypeError(
"Each scanner coordinate matrix should be an array")
if R[idx].ndim != 2:
raise TypeError(
"Each scanner coordinate matrix should be 2D array")
if x.shape[0] != R[idx].shape[0]:
raise TypeError(
"n_voxel should be the same in X[idx] and R[idx]")
return self | [
"def",
"_check_input",
"(",
"self",
",",
"X",
",",
"R",
")",
":",
"# Check data type",
"if",
"not",
"isinstance",
"(",
"X",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"\"Input data should be a list\"",
")",
"if",
"not",
"isinstance",
"(",
"R",
",",
... | Check whether input data and coordinates in right type
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
Returns
-------
HTFA
Returns the instance itself. | [
"Check",
"whether",
"input",
"data",
"and",
"coordinates",
"in",
"right",
"type"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/htfa.py#L766-L809 | train | 204,451 |
brainiak/brainiak | brainiak/hyperparamopt/hpo.py | get_sigma | def get_sigma(x, min_limit=-np.inf, max_limit=np.inf):
"""Compute the standard deviations around the points for a 1D GMM.
We take the distance from the nearest left and right neighbors
for each point, then use the max as the estimate of standard
deviation for the gaussian mixture around that point.
Arguments
---------
x : 1D array
Set of points to create the GMM
min_limit : Optional[float], default : -inf
Minimum limit for the distribution
max_limit : Optional[float], default : inf
maximum limit for the distribution
Returns
-------
1D array
Array of standard deviations
"""
z = np.append(x, [min_limit, max_limit])
sigma = np.ones(x.shape)
for i in range(x.size):
# Calculate the nearest left neighbor of x[i]
# Find the minimum of (x[i] - k) for k < x[i]
xleft = z[np.argmin([(x[i] - k) if k < x[i] else np.inf for k in z])]
# Calculate the nearest right neighbor of x[i]
# Find the minimum of (k - x[i]) for k > x[i]
xright = z[np.argmin([(k - x[i]) if k > x[i] else np.inf for k in z])]
sigma[i] = max(x[i] - xleft, xright - x[i])
if sigma[i] == np.inf:
sigma[i] = min(x[i] - xleft, xright - x[i])
if (sigma[i] == -np.inf): # should never happen
sigma[i] = 1.0
return sigma | python | def get_sigma(x, min_limit=-np.inf, max_limit=np.inf):
"""Compute the standard deviations around the points for a 1D GMM.
We take the distance from the nearest left and right neighbors
for each point, then use the max as the estimate of standard
deviation for the gaussian mixture around that point.
Arguments
---------
x : 1D array
Set of points to create the GMM
min_limit : Optional[float], default : -inf
Minimum limit for the distribution
max_limit : Optional[float], default : inf
maximum limit for the distribution
Returns
-------
1D array
Array of standard deviations
"""
z = np.append(x, [min_limit, max_limit])
sigma = np.ones(x.shape)
for i in range(x.size):
# Calculate the nearest left neighbor of x[i]
# Find the minimum of (x[i] - k) for k < x[i]
xleft = z[np.argmin([(x[i] - k) if k < x[i] else np.inf for k in z])]
# Calculate the nearest right neighbor of x[i]
# Find the minimum of (k - x[i]) for k > x[i]
xright = z[np.argmin([(k - x[i]) if k > x[i] else np.inf for k in z])]
sigma[i] = max(x[i] - xleft, xright - x[i])
if sigma[i] == np.inf:
sigma[i] = min(x[i] - xleft, xright - x[i])
if (sigma[i] == -np.inf): # should never happen
sigma[i] = 1.0
return sigma | [
"def",
"get_sigma",
"(",
"x",
",",
"min_limit",
"=",
"-",
"np",
".",
"inf",
",",
"max_limit",
"=",
"np",
".",
"inf",
")",
":",
"z",
"=",
"np",
".",
"append",
"(",
"x",
",",
"[",
"min_limit",
",",
"max_limit",
"]",
")",
"sigma",
"=",
"np",
".",
... | Compute the standard deviations around the points for a 1D GMM.
We take the distance from the nearest left and right neighbors
for each point, then use the max as the estimate of standard
deviation for the gaussian mixture around that point.
Arguments
---------
x : 1D array
Set of points to create the GMM
min_limit : Optional[float], default : -inf
Minimum limit for the distribution
max_limit : Optional[float], default : inf
maximum limit for the distribution
Returns
-------
1D array
Array of standard deviations | [
"Compute",
"the",
"standard",
"deviations",
"around",
"the",
"points",
"for",
"a",
"1D",
"GMM",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/hyperparamopt/hpo.py#L46-L86 | train | 204,452 |
brainiak/brainiak | brainiak/hyperparamopt/hpo.py | get_next_sample | def get_next_sample(x, y, min_limit=-np.inf, max_limit=np.inf):
"""Get the next point to try, given the previous samples.
We use [Bergstra2013]_ to compute the point that gives the largest
Expected improvement (EI) in the optimization function. This model fits 2
different GMMs - one for points that have loss values in the bottom 15%
and another for the rest. Then we sample from the former distribution
and estimate EI as the ratio of the likelihoods of the 2 distributions.
We pick the point with the best EI among the samples that is also not
very close to a point we have sampled earlier.
Arguments
---------
x : 1D array
Samples generated from the distribution so far
y : 1D array
Loss values at the corresponding samples
min_limit : float, default : -inf
Minimum limit for the distribution
max_limit : float, default : +inf
Maximum limit for the distribution
Returns
-------
float
Next value to use for HPO
"""
z = np.array(list(zip(x, y)), dtype=np.dtype([('x', float), ('y', float)]))
z = np.sort(z, order='y')
n = y.shape[0]
g = int(np.round(np.ceil(0.15 * n)))
ldata = z[0:g]
gdata = z[g:n]
lymin = ldata['y'].min()
lymax = ldata['y'].max()
weights = (lymax - ldata['y']) / (lymax - lymin)
lx = gmm_1d_distribution(ldata['x'], min_limit=min_limit,
max_limit=max_limit, weights=weights)
gx = gmm_1d_distribution(gdata['x'], min_limit=min_limit,
max_limit=max_limit)
samples = lx.get_samples(n=1000)
ei = lx(samples) / gx(samples)
h = (x.max() - x.min()) / (10 * x.size)
# TODO
# assumes prior of x is uniform; should ideally change for other priors
# d = np.abs(x - samples[ei.argmax()]).min()
# CDF(x+d/2) - CDF(x-d/2) < 1/(10*x.size) then reject else accept
s = 0
while (np.abs(x - samples[ei.argmax()]).min() < h):
ei[ei.argmax()] = 0
s = s + 1
if (s == samples.size):
break
xnext = samples[ei.argmax()]
return xnext | python | def get_next_sample(x, y, min_limit=-np.inf, max_limit=np.inf):
"""Get the next point to try, given the previous samples.
We use [Bergstra2013]_ to compute the point that gives the largest
Expected improvement (EI) in the optimization function. This model fits 2
different GMMs - one for points that have loss values in the bottom 15%
and another for the rest. Then we sample from the former distribution
and estimate EI as the ratio of the likelihoods of the 2 distributions.
We pick the point with the best EI among the samples that is also not
very close to a point we have sampled earlier.
Arguments
---------
x : 1D array
Samples generated from the distribution so far
y : 1D array
Loss values at the corresponding samples
min_limit : float, default : -inf
Minimum limit for the distribution
max_limit : float, default : +inf
Maximum limit for the distribution
Returns
-------
float
Next value to use for HPO
"""
z = np.array(list(zip(x, y)), dtype=np.dtype([('x', float), ('y', float)]))
z = np.sort(z, order='y')
n = y.shape[0]
g = int(np.round(np.ceil(0.15 * n)))
ldata = z[0:g]
gdata = z[g:n]
lymin = ldata['y'].min()
lymax = ldata['y'].max()
weights = (lymax - ldata['y']) / (lymax - lymin)
lx = gmm_1d_distribution(ldata['x'], min_limit=min_limit,
max_limit=max_limit, weights=weights)
gx = gmm_1d_distribution(gdata['x'], min_limit=min_limit,
max_limit=max_limit)
samples = lx.get_samples(n=1000)
ei = lx(samples) / gx(samples)
h = (x.max() - x.min()) / (10 * x.size)
# TODO
# assumes prior of x is uniform; should ideally change for other priors
# d = np.abs(x - samples[ei.argmax()]).min()
# CDF(x+d/2) - CDF(x-d/2) < 1/(10*x.size) then reject else accept
s = 0
while (np.abs(x - samples[ei.argmax()]).min() < h):
ei[ei.argmax()] = 0
s = s + 1
if (s == samples.size):
break
xnext = samples[ei.argmax()]
return xnext | [
"def",
"get_next_sample",
"(",
"x",
",",
"y",
",",
"min_limit",
"=",
"-",
"np",
".",
"inf",
",",
"max_limit",
"=",
"np",
".",
"inf",
")",
":",
"z",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"zip",
"(",
"x",
",",
"y",
")",
")",
",",
"dtype",
... | Get the next point to try, given the previous samples.
We use [Bergstra2013]_ to compute the point that gives the largest
Expected improvement (EI) in the optimization function. This model fits 2
different GMMs - one for points that have loss values in the bottom 15%
and another for the rest. Then we sample from the former distribution
and estimate EI as the ratio of the likelihoods of the 2 distributions.
We pick the point with the best EI among the samples that is also not
very close to a point we have sampled earlier.
Arguments
---------
x : 1D array
Samples generated from the distribution so far
y : 1D array
Loss values at the corresponding samples
min_limit : float, default : -inf
Minimum limit for the distribution
max_limit : float, default : +inf
Maximum limit for the distribution
Returns
-------
float
Next value to use for HPO | [
"Get",
"the",
"next",
"point",
"to",
"try",
"given",
"the",
"previous",
"samples",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/hyperparamopt/hpo.py#L219-L280 | train | 204,453 |
brainiak/brainiak | brainiak/hyperparamopt/hpo.py | fmin | def fmin(loss_fn,
space,
max_evals,
trials,
init_random_evals=30,
explore_prob=0.2):
"""Find the minimum of function through hyper parameter optimization.
Arguments
---------
loss_fn : ``function(*args) -> float``
Function that takes in a dictionary and returns a real value.
This is the function to be minimized.
space : dictionary
Custom dictionary specifying the range and distribution of
the hyperparamters.
E.g. ``space = {'x': {'dist':scipy.stats.uniform(0,1),
'lo':0, 'hi':1}}``
for a 1-dimensional space with variable x in range [0,1]
max_evals : int
Maximum number of evaluations of loss_fn allowed
trials : list
Holds the output of the optimization trials.
Need not be empty to begin with, new trials are appended
at the end.
init_random_evals : Optional[int], default 30
Number of random trials to initialize the
optimization.
explore_prob : Optional[float], default 0.2
Controls the exploration-vs-exploitation ratio. Value should
be in [0,1]. By default, 20% of trails are random samples.
Returns
-------
trial entry (dictionary of hyperparameters)
Best hyperparameter setting found.
E.g. {'x': 5.6, 'loss' : 0.5} where x is the best hyparameter
value found and loss is the value of the function for the
best hyperparameter value(s).
Raises
------
ValueError
If the distribution specified in space does not support a ``rvs()``
method to generate random numbers, a ValueError is raised.
"""
for s in space:
if not hasattr(space[s]['dist'], 'rvs'):
raise ValueError('Unknown distribution type for variable')
if 'lo' not in space[s]:
space[s]['lo'] = -np.inf
if 'hi' not in space[s]:
space[s]['hi'] = np.inf
if len(trials) > init_random_evals:
init_random_evals = 0
for t in range(max_evals):
sdict = {}
if t >= init_random_evals and np.random.random() > explore_prob:
use_random_sampling = False
else:
use_random_sampling = True
yarray = np.array([tr['loss'] for tr in trials])
for s in space:
sarray = np.array([tr[s] for tr in trials])
if use_random_sampling:
sdict[s] = space[s]['dist'].rvs()
else:
sdict[s] = get_next_sample(sarray, yarray,
min_limit=space[s]['lo'],
max_limit=space[s]['hi'])
logger.debug('Explore' if use_random_sampling else 'Exploit')
logger.info('Next point ', t, ' = ', sdict)
y = loss_fn(sdict)
sdict['loss'] = y
trials.append(sdict)
yarray = np.array([tr['loss'] for tr in trials])
yargmin = yarray.argmin()
logger.info('Best point so far = ', trials[yargmin])
return trials[yargmin] | python | def fmin(loss_fn,
space,
max_evals,
trials,
init_random_evals=30,
explore_prob=0.2):
"""Find the minimum of function through hyper parameter optimization.
Arguments
---------
loss_fn : ``function(*args) -> float``
Function that takes in a dictionary and returns a real value.
This is the function to be minimized.
space : dictionary
Custom dictionary specifying the range and distribution of
the hyperparamters.
E.g. ``space = {'x': {'dist':scipy.stats.uniform(0,1),
'lo':0, 'hi':1}}``
for a 1-dimensional space with variable x in range [0,1]
max_evals : int
Maximum number of evaluations of loss_fn allowed
trials : list
Holds the output of the optimization trials.
Need not be empty to begin with, new trials are appended
at the end.
init_random_evals : Optional[int], default 30
Number of random trials to initialize the
optimization.
explore_prob : Optional[float], default 0.2
Controls the exploration-vs-exploitation ratio. Value should
be in [0,1]. By default, 20% of trails are random samples.
Returns
-------
trial entry (dictionary of hyperparameters)
Best hyperparameter setting found.
E.g. {'x': 5.6, 'loss' : 0.5} where x is the best hyparameter
value found and loss is the value of the function for the
best hyperparameter value(s).
Raises
------
ValueError
If the distribution specified in space does not support a ``rvs()``
method to generate random numbers, a ValueError is raised.
"""
for s in space:
if not hasattr(space[s]['dist'], 'rvs'):
raise ValueError('Unknown distribution type for variable')
if 'lo' not in space[s]:
space[s]['lo'] = -np.inf
if 'hi' not in space[s]:
space[s]['hi'] = np.inf
if len(trials) > init_random_evals:
init_random_evals = 0
for t in range(max_evals):
sdict = {}
if t >= init_random_evals and np.random.random() > explore_prob:
use_random_sampling = False
else:
use_random_sampling = True
yarray = np.array([tr['loss'] for tr in trials])
for s in space:
sarray = np.array([tr[s] for tr in trials])
if use_random_sampling:
sdict[s] = space[s]['dist'].rvs()
else:
sdict[s] = get_next_sample(sarray, yarray,
min_limit=space[s]['lo'],
max_limit=space[s]['hi'])
logger.debug('Explore' if use_random_sampling else 'Exploit')
logger.info('Next point ', t, ' = ', sdict)
y = loss_fn(sdict)
sdict['loss'] = y
trials.append(sdict)
yarray = np.array([tr['loss'] for tr in trials])
yargmin = yarray.argmin()
logger.info('Best point so far = ', trials[yargmin])
return trials[yargmin] | [
"def",
"fmin",
"(",
"loss_fn",
",",
"space",
",",
"max_evals",
",",
"trials",
",",
"init_random_evals",
"=",
"30",
",",
"explore_prob",
"=",
"0.2",
")",
":",
"for",
"s",
"in",
"space",
":",
"if",
"not",
"hasattr",
"(",
"space",
"[",
"s",
"]",
"[",
... | Find the minimum of function through hyper parameter optimization.
Arguments
---------
loss_fn : ``function(*args) -> float``
Function that takes in a dictionary and returns a real value.
This is the function to be minimized.
space : dictionary
Custom dictionary specifying the range and distribution of
the hyperparamters.
E.g. ``space = {'x': {'dist':scipy.stats.uniform(0,1),
'lo':0, 'hi':1}}``
for a 1-dimensional space with variable x in range [0,1]
max_evals : int
Maximum number of evaluations of loss_fn allowed
trials : list
Holds the output of the optimization trials.
Need not be empty to begin with, new trials are appended
at the end.
init_random_evals : Optional[int], default 30
Number of random trials to initialize the
optimization.
explore_prob : Optional[float], default 0.2
Controls the exploration-vs-exploitation ratio. Value should
be in [0,1]. By default, 20% of trails are random samples.
Returns
-------
trial entry (dictionary of hyperparameters)
Best hyperparameter setting found.
E.g. {'x': 5.6, 'loss' : 0.5} where x is the best hyparameter
value found and loss is the value of the function for the
best hyperparameter value(s).
Raises
------
ValueError
If the distribution specified in space does not support a ``rvs()``
method to generate random numbers, a ValueError is raised. | [
"Find",
"the",
"minimum",
"of",
"function",
"through",
"hyper",
"parameter",
"optimization",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/hyperparamopt/hpo.py#L283-L375 | train | 204,454 |
brainiak/brainiak | brainiak/hyperparamopt/hpo.py | gmm_1d_distribution.get_gmm_pdf | def get_gmm_pdf(self, x):
"""Calculate the GMM likelihood for a single point.
.. math::
y = \\sum_{i=1}^{N} w_i
\\times \\text{normpdf}(x, x_i, \\sigma_i)/\\sum_{i=1}^{N} w_i
:label: gmm-likelihood
Arguments
---------
x : float
Point at which likelihood needs to be computed
Returns
-------
float
Likelihood value at x
"""
def my_norm_pdf(xt, mu, sigma):
z = (xt - mu) / sigma
return (math.exp(-0.5 * z * z)
/ (math.sqrt(2. * np.pi) * sigma))
y = 0
if (x < self.min_limit):
return 0
if (x > self.max_limit):
return 0
for _x in range(self.points.size):
y += (my_norm_pdf(x, self.points[_x], self.sigma[_x])
* self.weights[_x]) / self.W_sum
return y | python | def get_gmm_pdf(self, x):
"""Calculate the GMM likelihood for a single point.
.. math::
y = \\sum_{i=1}^{N} w_i
\\times \\text{normpdf}(x, x_i, \\sigma_i)/\\sum_{i=1}^{N} w_i
:label: gmm-likelihood
Arguments
---------
x : float
Point at which likelihood needs to be computed
Returns
-------
float
Likelihood value at x
"""
def my_norm_pdf(xt, mu, sigma):
z = (xt - mu) / sigma
return (math.exp(-0.5 * z * z)
/ (math.sqrt(2. * np.pi) * sigma))
y = 0
if (x < self.min_limit):
return 0
if (x > self.max_limit):
return 0
for _x in range(self.points.size):
y += (my_norm_pdf(x, self.points[_x], self.sigma[_x])
* self.weights[_x]) / self.W_sum
return y | [
"def",
"get_gmm_pdf",
"(",
"self",
",",
"x",
")",
":",
"def",
"my_norm_pdf",
"(",
"xt",
",",
"mu",
",",
"sigma",
")",
":",
"z",
"=",
"(",
"xt",
"-",
"mu",
")",
"/",
"sigma",
"return",
"(",
"math",
".",
"exp",
"(",
"-",
"0.5",
"*",
"z",
"*",
... | Calculate the GMM likelihood for a single point.
.. math::
y = \\sum_{i=1}^{N} w_i
\\times \\text{normpdf}(x, x_i, \\sigma_i)/\\sum_{i=1}^{N} w_i
:label: gmm-likelihood
Arguments
---------
x : float
Point at which likelihood needs to be computed
Returns
-------
float
Likelihood value at x | [
"Calculate",
"the",
"GMM",
"likelihood",
"for",
"a",
"single",
"point",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/hyperparamopt/hpo.py#L126-L158 | train | 204,455 |
brainiak/brainiak | brainiak/hyperparamopt/hpo.py | gmm_1d_distribution.get_samples | def get_samples(self, n):
"""Sample the GMM distribution.
Arguments
---------
n : int
Number of samples needed
Returns
-------
1D array
Samples from the distribution
"""
normalized_w = self.weights / np.sum(self.weights)
get_rand_index = st.rv_discrete(values=(range(self.N),
normalized_w)).rvs(size=n)
samples = np.zeros(n)
k = 0
j = 0
while (k < n):
i = get_rand_index[j]
j = j + 1
if (j == n):
get_rand_index = st.rv_discrete(values=(range(self.N),
normalized_w)).rvs(size=n)
j = 0
v = np.random.normal(loc=self.points[i], scale=self.sigma[i])
if (v > self.max_limit or v < self.min_limit):
continue
else:
samples[k] = v
k = k + 1
if (k == n):
break
return samples | python | def get_samples(self, n):
"""Sample the GMM distribution.
Arguments
---------
n : int
Number of samples needed
Returns
-------
1D array
Samples from the distribution
"""
normalized_w = self.weights / np.sum(self.weights)
get_rand_index = st.rv_discrete(values=(range(self.N),
normalized_w)).rvs(size=n)
samples = np.zeros(n)
k = 0
j = 0
while (k < n):
i = get_rand_index[j]
j = j + 1
if (j == n):
get_rand_index = st.rv_discrete(values=(range(self.N),
normalized_w)).rvs(size=n)
j = 0
v = np.random.normal(loc=self.points[i], scale=self.sigma[i])
if (v > self.max_limit or v < self.min_limit):
continue
else:
samples[k] = v
k = k + 1
if (k == n):
break
return samples | [
"def",
"get_samples",
"(",
"self",
",",
"n",
")",
":",
"normalized_w",
"=",
"self",
".",
"weights",
"/",
"np",
".",
"sum",
"(",
"self",
".",
"weights",
")",
"get_rand_index",
"=",
"st",
".",
"rv_discrete",
"(",
"values",
"=",
"(",
"range",
"(",
"self... | Sample the GMM distribution.
Arguments
---------
n : int
Number of samples needed
Returns
-------
1D array
Samples from the distribution | [
"Sample",
"the",
"GMM",
"distribution",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/hyperparamopt/hpo.py#L181-L216 | train | 204,456 |
brainiak/brainiak | brainiak/fcma/preprocessing.py | _separate_epochs | def _separate_epochs(activity_data, epoch_list):
""" create data epoch by epoch
Separate data into epochs of interest specified in epoch_list
and z-score them for computing correlation
Parameters
----------
activity_data: list of 2D array in shape [nVoxels, nTRs]
the masked activity data organized in voxel*TR formats of all subjects
epoch_list: list of 3D array in shape [condition, nEpochs, nTRs]
specification of epochs and conditions
assuming all subjects have the same number of epochs
len(epoch_list) equals the number of subjects
Returns
-------
raw_data: list of 2D array in shape [epoch length, nVoxels]
the data organized in epochs
and z-scored in preparation of correlation computation
len(raw_data) equals the number of epochs
labels: list of 1D array
the condition labels of the epochs
len(labels) labels equals the number of epochs
"""
time1 = time.time()
raw_data = []
labels = []
for sid in range(len(epoch_list)):
epoch = epoch_list[sid]
for cond in range(epoch.shape[0]):
sub_epoch = epoch[cond, :, :]
for eid in range(epoch.shape[1]):
r = np.sum(sub_epoch[eid, :])
if r > 0: # there is an epoch in this condition
# mat is row-major
# regardless of the order of acitvity_data[sid]
mat = activity_data[sid][:, sub_epoch[eid, :] == 1]
mat = np.ascontiguousarray(mat.T)
mat = zscore(mat, axis=0, ddof=0)
# if zscore fails (standard deviation is zero),
# set all values to be zero
mat = np.nan_to_num(mat)
mat = mat / math.sqrt(r)
raw_data.append(mat)
labels.append(cond)
time2 = time.time()
logger.debug(
'epoch separation done, takes %.2f s' %
(time2 - time1)
)
return raw_data, labels | python | def _separate_epochs(activity_data, epoch_list):
""" create data epoch by epoch
Separate data into epochs of interest specified in epoch_list
and z-score them for computing correlation
Parameters
----------
activity_data: list of 2D array in shape [nVoxels, nTRs]
the masked activity data organized in voxel*TR formats of all subjects
epoch_list: list of 3D array in shape [condition, nEpochs, nTRs]
specification of epochs and conditions
assuming all subjects have the same number of epochs
len(epoch_list) equals the number of subjects
Returns
-------
raw_data: list of 2D array in shape [epoch length, nVoxels]
the data organized in epochs
and z-scored in preparation of correlation computation
len(raw_data) equals the number of epochs
labels: list of 1D array
the condition labels of the epochs
len(labels) labels equals the number of epochs
"""
time1 = time.time()
raw_data = []
labels = []
for sid in range(len(epoch_list)):
epoch = epoch_list[sid]
for cond in range(epoch.shape[0]):
sub_epoch = epoch[cond, :, :]
for eid in range(epoch.shape[1]):
r = np.sum(sub_epoch[eid, :])
if r > 0: # there is an epoch in this condition
# mat is row-major
# regardless of the order of acitvity_data[sid]
mat = activity_data[sid][:, sub_epoch[eid, :] == 1]
mat = np.ascontiguousarray(mat.T)
mat = zscore(mat, axis=0, ddof=0)
# if zscore fails (standard deviation is zero),
# set all values to be zero
mat = np.nan_to_num(mat)
mat = mat / math.sqrt(r)
raw_data.append(mat)
labels.append(cond)
time2 = time.time()
logger.debug(
'epoch separation done, takes %.2f s' %
(time2 - time1)
)
return raw_data, labels | [
"def",
"_separate_epochs",
"(",
"activity_data",
",",
"epoch_list",
")",
":",
"time1",
"=",
"time",
".",
"time",
"(",
")",
"raw_data",
"=",
"[",
"]",
"labels",
"=",
"[",
"]",
"for",
"sid",
"in",
"range",
"(",
"len",
"(",
"epoch_list",
")",
")",
":",
... | create data epoch by epoch
Separate data into epochs of interest specified in epoch_list
and z-score them for computing correlation
Parameters
----------
activity_data: list of 2D array in shape [nVoxels, nTRs]
the masked activity data organized in voxel*TR formats of all subjects
epoch_list: list of 3D array in shape [condition, nEpochs, nTRs]
specification of epochs and conditions
assuming all subjects have the same number of epochs
len(epoch_list) equals the number of subjects
Returns
-------
raw_data: list of 2D array in shape [epoch length, nVoxels]
the data organized in epochs
and z-scored in preparation of correlation computation
len(raw_data) equals the number of epochs
labels: list of 1D array
the condition labels of the epochs
len(labels) labels equals the number of epochs | [
"create",
"data",
"epoch",
"by",
"epoch"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/preprocessing.py#L41-L92 | train | 204,457 |
brainiak/brainiak | brainiak/fcma/preprocessing.py | _randomize_single_subject | def _randomize_single_subject(data, seed=None):
"""Randomly permute the voxels of the subject.
The subject is organized as Voxel x TR,
this method shuffles the voxel dimension in place.
Parameters
----------
data: 2D array in shape [nVoxels, nTRs]
Activity image data to be shuffled.
seed: Optional[int]
Seed for random state used implicitly for shuffling.
Returns
-------
None.
"""
if seed is not None:
np.random.seed(seed)
np.random.shuffle(data) | python | def _randomize_single_subject(data, seed=None):
"""Randomly permute the voxels of the subject.
The subject is organized as Voxel x TR,
this method shuffles the voxel dimension in place.
Parameters
----------
data: 2D array in shape [nVoxels, nTRs]
Activity image data to be shuffled.
seed: Optional[int]
Seed for random state used implicitly for shuffling.
Returns
-------
None.
"""
if seed is not None:
np.random.seed(seed)
np.random.shuffle(data) | [
"def",
"_randomize_single_subject",
"(",
"data",
",",
"seed",
"=",
"None",
")",
":",
"if",
"seed",
"is",
"not",
"None",
":",
"np",
".",
"random",
".",
"seed",
"(",
"seed",
")",
"np",
".",
"random",
".",
"shuffle",
"(",
"data",
")"
] | Randomly permute the voxels of the subject.
The subject is organized as Voxel x TR,
this method shuffles the voxel dimension in place.
Parameters
----------
data: 2D array in shape [nVoxels, nTRs]
Activity image data to be shuffled.
seed: Optional[int]
Seed for random state used implicitly for shuffling.
Returns
-------
None. | [
"Randomly",
"permute",
"the",
"voxels",
"of",
"the",
"subject",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/preprocessing.py#L95-L114 | train | 204,458 |
brainiak/brainiak | brainiak/fcma/preprocessing.py | _randomize_subject_list | def _randomize_subject_list(data_list, random):
"""Randomly permute the voxels of a subject list.
The method shuffles the subject one by one in place according to
the random type. If RandomType.NORANDOM, return the original list.
Parameters
----------
data_list: list of 2D array in shape [nVxels, nTRs]
Activity image data list to be shuffled.
random: RandomType
Randomization type.
Returns
-------
None.
"""
if random == RandomType.REPRODUCIBLE:
for i in range(len(data_list)):
_randomize_single_subject(data_list[i], seed=i)
elif random == RandomType.UNREPRODUCIBLE:
for data in data_list:
_randomize_single_subject(data) | python | def _randomize_subject_list(data_list, random):
"""Randomly permute the voxels of a subject list.
The method shuffles the subject one by one in place according to
the random type. If RandomType.NORANDOM, return the original list.
Parameters
----------
data_list: list of 2D array in shape [nVxels, nTRs]
Activity image data list to be shuffled.
random: RandomType
Randomization type.
Returns
-------
None.
"""
if random == RandomType.REPRODUCIBLE:
for i in range(len(data_list)):
_randomize_single_subject(data_list[i], seed=i)
elif random == RandomType.UNREPRODUCIBLE:
for data in data_list:
_randomize_single_subject(data) | [
"def",
"_randomize_subject_list",
"(",
"data_list",
",",
"random",
")",
":",
"if",
"random",
"==",
"RandomType",
".",
"REPRODUCIBLE",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"data_list",
")",
")",
":",
"_randomize_single_subject",
"(",
"data_list",
... | Randomly permute the voxels of a subject list.
The method shuffles the subject one by one in place according to
the random type. If RandomType.NORANDOM, return the original list.
Parameters
----------
data_list: list of 2D array in shape [nVxels, nTRs]
Activity image data list to be shuffled.
random: RandomType
Randomization type.
Returns
-------
None. | [
"Randomly",
"permute",
"the",
"voxels",
"of",
"a",
"subject",
"list",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/preprocessing.py#L117-L139 | train | 204,459 |
brainiak/brainiak | brainiak/fcma/preprocessing.py | prepare_fcma_data | def prepare_fcma_data(images, conditions, mask1, mask2=None,
random=RandomType.NORANDOM, comm=MPI.COMM_WORLD):
"""Prepare data for correlation-based computation and analysis.
Generate epochs of interests, then broadcast to all workers.
Parameters
----------
images: Iterable[SpatialImage]
Data.
conditions: List[UniqueLabelConditionSpec]
Condition specification.
mask1: np.ndarray
Mask to apply to each image.
mask2: Optional[np.ndarray]
Mask to apply to each image.
If it is not specified, the method will assign None to the returning
variable raw_data2 and the self-correlation on raw_data1 will be
computed
random: Optional[RandomType]
Randomize the image data within subject or not.
comm: MPI.Comm
MPI communicator to use for MPI operations.
Returns
-------
raw_data1: list of 2D array in shape [epoch length, nVoxels]
the data organized in epochs, specified by the first mask.
len(raw_data) equals the number of epochs
raw_data2: Optional, list of 2D array in shape [epoch length, nVoxels]
the data organized in epochs, specified by the second mask if any.
len(raw_data2) equals the number of epochs
labels: list of 1D array
the condition labels of the epochs
len(labels) labels equals the number of epochs
"""
rank = comm.Get_rank()
labels = []
raw_data1 = []
raw_data2 = []
if rank == 0:
logger.info('start to apply masks and separate epochs')
if mask2 is not None:
masks = (mask1, mask2)
activity_data1, activity_data2 = zip(*multimask_images(images,
masks,
np.float32))
_randomize_subject_list(activity_data2, random)
raw_data2, _ = _separate_epochs(activity_data2, conditions)
else:
activity_data1 = list(mask_images(images, mask1, np.float32))
_randomize_subject_list(activity_data1, random)
raw_data1, labels = _separate_epochs(activity_data1, conditions)
time1 = time.time()
raw_data_length = len(raw_data1)
raw_data_length = comm.bcast(raw_data_length)
# broadcast the data subject by subject to prevent size overflow
for i in range(raw_data_length):
if rank != 0:
raw_data1.append(None)
if mask2 is not None:
raw_data2.append(None)
raw_data1[i] = comm.bcast(raw_data1[i], root=0)
if mask2 is not None:
raw_data2[i] = comm.bcast(raw_data2[i], root=0)
if comm.Get_size() > 1:
labels = comm.bcast(labels, root=0)
if rank == 0:
time2 = time.time()
logger.info(
'data broadcasting done, takes %.2f s' %
(time2 - time1)
)
if mask2 is None:
raw_data2 = None
return raw_data1, raw_data2, labels | python | def prepare_fcma_data(images, conditions, mask1, mask2=None,
random=RandomType.NORANDOM, comm=MPI.COMM_WORLD):
"""Prepare data for correlation-based computation and analysis.
Generate epochs of interests, then broadcast to all workers.
Parameters
----------
images: Iterable[SpatialImage]
Data.
conditions: List[UniqueLabelConditionSpec]
Condition specification.
mask1: np.ndarray
Mask to apply to each image.
mask2: Optional[np.ndarray]
Mask to apply to each image.
If it is not specified, the method will assign None to the returning
variable raw_data2 and the self-correlation on raw_data1 will be
computed
random: Optional[RandomType]
Randomize the image data within subject or not.
comm: MPI.Comm
MPI communicator to use for MPI operations.
Returns
-------
raw_data1: list of 2D array in shape [epoch length, nVoxels]
the data organized in epochs, specified by the first mask.
len(raw_data) equals the number of epochs
raw_data2: Optional, list of 2D array in shape [epoch length, nVoxels]
the data organized in epochs, specified by the second mask if any.
len(raw_data2) equals the number of epochs
labels: list of 1D array
the condition labels of the epochs
len(labels) labels equals the number of epochs
"""
rank = comm.Get_rank()
labels = []
raw_data1 = []
raw_data2 = []
if rank == 0:
logger.info('start to apply masks and separate epochs')
if mask2 is not None:
masks = (mask1, mask2)
activity_data1, activity_data2 = zip(*multimask_images(images,
masks,
np.float32))
_randomize_subject_list(activity_data2, random)
raw_data2, _ = _separate_epochs(activity_data2, conditions)
else:
activity_data1 = list(mask_images(images, mask1, np.float32))
_randomize_subject_list(activity_data1, random)
raw_data1, labels = _separate_epochs(activity_data1, conditions)
time1 = time.time()
raw_data_length = len(raw_data1)
raw_data_length = comm.bcast(raw_data_length)
# broadcast the data subject by subject to prevent size overflow
for i in range(raw_data_length):
if rank != 0:
raw_data1.append(None)
if mask2 is not None:
raw_data2.append(None)
raw_data1[i] = comm.bcast(raw_data1[i], root=0)
if mask2 is not None:
raw_data2[i] = comm.bcast(raw_data2[i], root=0)
if comm.Get_size() > 1:
labels = comm.bcast(labels, root=0)
if rank == 0:
time2 = time.time()
logger.info(
'data broadcasting done, takes %.2f s' %
(time2 - time1)
)
if mask2 is None:
raw_data2 = None
return raw_data1, raw_data2, labels | [
"def",
"prepare_fcma_data",
"(",
"images",
",",
"conditions",
",",
"mask1",
",",
"mask2",
"=",
"None",
",",
"random",
"=",
"RandomType",
".",
"NORANDOM",
",",
"comm",
"=",
"MPI",
".",
"COMM_WORLD",
")",
":",
"rank",
"=",
"comm",
".",
"Get_rank",
"(",
"... | Prepare data for correlation-based computation and analysis.
Generate epochs of interests, then broadcast to all workers.
Parameters
----------
images: Iterable[SpatialImage]
Data.
conditions: List[UniqueLabelConditionSpec]
Condition specification.
mask1: np.ndarray
Mask to apply to each image.
mask2: Optional[np.ndarray]
Mask to apply to each image.
If it is not specified, the method will assign None to the returning
variable raw_data2 and the self-correlation on raw_data1 will be
computed
random: Optional[RandomType]
Randomize the image data within subject or not.
comm: MPI.Comm
MPI communicator to use for MPI operations.
Returns
-------
raw_data1: list of 2D array in shape [epoch length, nVoxels]
the data organized in epochs, specified by the first mask.
len(raw_data) equals the number of epochs
raw_data2: Optional, list of 2D array in shape [epoch length, nVoxels]
the data organized in epochs, specified by the second mask if any.
len(raw_data2) equals the number of epochs
labels: list of 1D array
the condition labels of the epochs
len(labels) labels equals the number of epochs | [
"Prepare",
"data",
"for",
"correlation",
"-",
"based",
"computation",
"and",
"analysis",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/preprocessing.py#L156-L232 | train | 204,460 |
brainiak/brainiak | brainiak/fcma/preprocessing.py | generate_epochs_info | def generate_epochs_info(epoch_list):
""" use epoch_list to generate epoch_info defined below
Parameters
----------
epoch_list: list of 3D (binary) array in shape [condition, nEpochs, nTRs]
Contains specification of epochs and conditions, assuming
1. all subjects have the same number of epochs;
2. len(epoch_list) equals the number of subjects;
3. an epoch is always a continuous time course.
Returns
-------
epoch_info: list of tuple (label, sid, start, end).
label is the condition labels of the epochs;
sid is the subject id, corresponding to the index of raw_data;
start is the start TR of an epoch (inclusive);
end is the end TR of an epoch(exclusive).
Assuming len(labels) labels equals the number of epochs and
the epochs of the same sid are adjacent in epoch_info
"""
time1 = time.time()
epoch_info = []
for sid, epoch in enumerate(epoch_list):
for cond in range(epoch.shape[0]):
sub_epoch = epoch[cond, :, :]
for eid in range(epoch.shape[1]):
r = np.sum(sub_epoch[eid, :])
if r > 0: # there is an epoch in this condition
start = np.nonzero(sub_epoch[eid, :])[0][0]
epoch_info.append((cond, sid, start, start + r))
time2 = time.time()
logger.debug(
'epoch separation done, takes %.2f s' %
(time2 - time1)
)
return epoch_info | python | def generate_epochs_info(epoch_list):
""" use epoch_list to generate epoch_info defined below
Parameters
----------
epoch_list: list of 3D (binary) array in shape [condition, nEpochs, nTRs]
Contains specification of epochs and conditions, assuming
1. all subjects have the same number of epochs;
2. len(epoch_list) equals the number of subjects;
3. an epoch is always a continuous time course.
Returns
-------
epoch_info: list of tuple (label, sid, start, end).
label is the condition labels of the epochs;
sid is the subject id, corresponding to the index of raw_data;
start is the start TR of an epoch (inclusive);
end is the end TR of an epoch(exclusive).
Assuming len(labels) labels equals the number of epochs and
the epochs of the same sid are adjacent in epoch_info
"""
time1 = time.time()
epoch_info = []
for sid, epoch in enumerate(epoch_list):
for cond in range(epoch.shape[0]):
sub_epoch = epoch[cond, :, :]
for eid in range(epoch.shape[1]):
r = np.sum(sub_epoch[eid, :])
if r > 0: # there is an epoch in this condition
start = np.nonzero(sub_epoch[eid, :])[0][0]
epoch_info.append((cond, sid, start, start + r))
time2 = time.time()
logger.debug(
'epoch separation done, takes %.2f s' %
(time2 - time1)
)
return epoch_info | [
"def",
"generate_epochs_info",
"(",
"epoch_list",
")",
":",
"time1",
"=",
"time",
".",
"time",
"(",
")",
"epoch_info",
"=",
"[",
"]",
"for",
"sid",
",",
"epoch",
"in",
"enumerate",
"(",
"epoch_list",
")",
":",
"for",
"cond",
"in",
"range",
"(",
"epoch"... | use epoch_list to generate epoch_info defined below
Parameters
----------
epoch_list: list of 3D (binary) array in shape [condition, nEpochs, nTRs]
Contains specification of epochs and conditions, assuming
1. all subjects have the same number of epochs;
2. len(epoch_list) equals the number of subjects;
3. an epoch is always a continuous time course.
Returns
-------
epoch_info: list of tuple (label, sid, start, end).
label is the condition labels of the epochs;
sid is the subject id, corresponding to the index of raw_data;
start is the start TR of an epoch (inclusive);
end is the end TR of an epoch(exclusive).
Assuming len(labels) labels equals the number of epochs and
the epochs of the same sid are adjacent in epoch_info | [
"use",
"epoch_list",
"to",
"generate",
"epoch_info",
"defined",
"below"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/preprocessing.py#L235-L271 | train | 204,461 |
brainiak/brainiak | brainiak/fcma/preprocessing.py | prepare_mvpa_data | def prepare_mvpa_data(images, conditions, mask):
"""Prepare data for activity-based model training and prediction.
Average the activity within epochs and z-scoring within subject.
Parameters
----------
images: Iterable[SpatialImage]
Data.
conditions: List[UniqueLabelConditionSpec]
Condition specification.
mask: np.ndarray
Mask to apply to each image.
Returns
-------
processed_data: 2D array in shape [num_voxels, num_epochs]
averaged epoch by epoch processed data
labels: 1D array
contains labels of the data
"""
activity_data = list(mask_images(images, mask, np.float32))
epoch_info = generate_epochs_info(conditions)
num_epochs = len(epoch_info)
(d1, _) = activity_data[0].shape
processed_data = np.empty([d1, num_epochs])
labels = np.empty(num_epochs)
subject_count = [0] # counting the epochs per subject for z-scoring
cur_sid = -1
# averaging
for idx, epoch in enumerate(epoch_info):
labels[idx] = epoch[0]
if cur_sid != epoch[1]:
subject_count.append(0)
cur_sid = epoch[1]
subject_count[-1] += 1
processed_data[:, idx] = \
np.mean(activity_data[cur_sid][:, epoch[2]:epoch[3]],
axis=1)
# z-scoring
cur_epoch = 0
for i in subject_count:
if i > 1:
processed_data[:, cur_epoch:cur_epoch + i] = \
zscore(processed_data[:, cur_epoch:cur_epoch + i],
axis=1, ddof=0)
cur_epoch += i
# if zscore fails (standard deviation is zero),
# set all values to be zero
processed_data = np.nan_to_num(processed_data)
return processed_data, labels | python | def prepare_mvpa_data(images, conditions, mask):
"""Prepare data for activity-based model training and prediction.
Average the activity within epochs and z-scoring within subject.
Parameters
----------
images: Iterable[SpatialImage]
Data.
conditions: List[UniqueLabelConditionSpec]
Condition specification.
mask: np.ndarray
Mask to apply to each image.
Returns
-------
processed_data: 2D array in shape [num_voxels, num_epochs]
averaged epoch by epoch processed data
labels: 1D array
contains labels of the data
"""
activity_data = list(mask_images(images, mask, np.float32))
epoch_info = generate_epochs_info(conditions)
num_epochs = len(epoch_info)
(d1, _) = activity_data[0].shape
processed_data = np.empty([d1, num_epochs])
labels = np.empty(num_epochs)
subject_count = [0] # counting the epochs per subject for z-scoring
cur_sid = -1
# averaging
for idx, epoch in enumerate(epoch_info):
labels[idx] = epoch[0]
if cur_sid != epoch[1]:
subject_count.append(0)
cur_sid = epoch[1]
subject_count[-1] += 1
processed_data[:, idx] = \
np.mean(activity_data[cur_sid][:, epoch[2]:epoch[3]],
axis=1)
# z-scoring
cur_epoch = 0
for i in subject_count:
if i > 1:
processed_data[:, cur_epoch:cur_epoch + i] = \
zscore(processed_data[:, cur_epoch:cur_epoch + i],
axis=1, ddof=0)
cur_epoch += i
# if zscore fails (standard deviation is zero),
# set all values to be zero
processed_data = np.nan_to_num(processed_data)
return processed_data, labels | [
"def",
"prepare_mvpa_data",
"(",
"images",
",",
"conditions",
",",
"mask",
")",
":",
"activity_data",
"=",
"list",
"(",
"mask_images",
"(",
"images",
",",
"mask",
",",
"np",
".",
"float32",
")",
")",
"epoch_info",
"=",
"generate_epochs_info",
"(",
"condition... | Prepare data for activity-based model training and prediction.
Average the activity within epochs and z-scoring within subject.
Parameters
----------
images: Iterable[SpatialImage]
Data.
conditions: List[UniqueLabelConditionSpec]
Condition specification.
mask: np.ndarray
Mask to apply to each image.
Returns
-------
processed_data: 2D array in shape [num_voxels, num_epochs]
averaged epoch by epoch processed data
labels: 1D array
contains labels of the data | [
"Prepare",
"data",
"for",
"activity",
"-",
"based",
"model",
"training",
"and",
"prediction",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/preprocessing.py#L274-L325 | train | 204,462 |
brainiak/brainiak | brainiak/fcma/preprocessing.py | prepare_searchlight_mvpa_data | def prepare_searchlight_mvpa_data(images, conditions, data_type=np.float32,
random=RandomType.NORANDOM):
""" obtain the data for activity-based voxel selection using Searchlight
Average the activity within epochs and z-scoring within subject,
while maintaining the 3D brain structure. In order to save memory,
the data is processed subject by subject instead of reading all in before
processing. Assuming all subjects live in the identical cube.
Parameters
----------
images: Iterable[SpatialImage]
Data.
conditions: List[UniqueLabelConditionSpec]
Condition specification.
data_type
Type to cast image to.
random: Optional[RandomType]
Randomize the image data within subject or not.
Returns
-------
processed_data: 4D array in shape [brain 3D + epoch]
averaged epoch by epoch processed data
labels: 1D array
contains labels of the data
"""
time1 = time.time()
epoch_info = generate_epochs_info(conditions)
num_epochs = len(epoch_info)
processed_data = None
logger.info(
'there are %d subjects, and in total %d epochs' %
(len(conditions), num_epochs)
)
labels = np.empty(num_epochs)
# assign labels
for idx, epoch in enumerate(epoch_info):
labels[idx] = epoch[0]
# counting the epochs per subject for z-scoring
subject_count = np.zeros(len(conditions), dtype=np.int32)
logger.info('start to apply masks and separate epochs')
for sid, f in enumerate(images):
data = f.get_data().astype(data_type)
[d1, d2, d3, d4] = data.shape
if random == RandomType.REPRODUCIBLE:
data = data.reshape((d1 * d2 * d3, d4))
_randomize_single_subject(data, seed=sid)
data = data.reshape((d1, d2, d3, d4))
elif random == RandomType.UNREPRODUCIBLE:
data = data.reshape((d1 * d2 * d3, d4))
_randomize_single_subject(data)
data = data.reshape((d1, d2, d3, d4))
if processed_data is None:
processed_data = np.empty([d1, d2, d3, num_epochs],
dtype=data_type)
# averaging
for idx, epoch in enumerate(epoch_info):
if sid == epoch[1]:
subject_count[sid] += 1
processed_data[:, :, :, idx] = \
np.mean(data[:, :, :, epoch[2]:epoch[3]], axis=3)
logger.debug(
'file %s is loaded and processed, with data shape %s',
f.get_filename(), data.shape
)
# z-scoring
cur_epoch = 0
for i in subject_count:
if i > 1:
processed_data[:, :, :, cur_epoch:cur_epoch + i] = \
zscore(processed_data[:, :, :, cur_epoch:cur_epoch + i],
axis=3, ddof=0)
cur_epoch += i
# if zscore fails (standard deviation is zero),
# set all values to be zero
processed_data = np.nan_to_num(processed_data)
time2 = time.time()
logger.info(
'data processed for activity-based voxel selection, takes %.2f s' %
(time2 - time1)
)
return processed_data, labels | python | def prepare_searchlight_mvpa_data(images, conditions, data_type=np.float32,
random=RandomType.NORANDOM):
""" obtain the data for activity-based voxel selection using Searchlight
Average the activity within epochs and z-scoring within subject,
while maintaining the 3D brain structure. In order to save memory,
the data is processed subject by subject instead of reading all in before
processing. Assuming all subjects live in the identical cube.
Parameters
----------
images: Iterable[SpatialImage]
Data.
conditions: List[UniqueLabelConditionSpec]
Condition specification.
data_type
Type to cast image to.
random: Optional[RandomType]
Randomize the image data within subject or not.
Returns
-------
processed_data: 4D array in shape [brain 3D + epoch]
averaged epoch by epoch processed data
labels: 1D array
contains labels of the data
"""
time1 = time.time()
epoch_info = generate_epochs_info(conditions)
num_epochs = len(epoch_info)
processed_data = None
logger.info(
'there are %d subjects, and in total %d epochs' %
(len(conditions), num_epochs)
)
labels = np.empty(num_epochs)
# assign labels
for idx, epoch in enumerate(epoch_info):
labels[idx] = epoch[0]
# counting the epochs per subject for z-scoring
subject_count = np.zeros(len(conditions), dtype=np.int32)
logger.info('start to apply masks and separate epochs')
for sid, f in enumerate(images):
data = f.get_data().astype(data_type)
[d1, d2, d3, d4] = data.shape
if random == RandomType.REPRODUCIBLE:
data = data.reshape((d1 * d2 * d3, d4))
_randomize_single_subject(data, seed=sid)
data = data.reshape((d1, d2, d3, d4))
elif random == RandomType.UNREPRODUCIBLE:
data = data.reshape((d1 * d2 * d3, d4))
_randomize_single_subject(data)
data = data.reshape((d1, d2, d3, d4))
if processed_data is None:
processed_data = np.empty([d1, d2, d3, num_epochs],
dtype=data_type)
# averaging
for idx, epoch in enumerate(epoch_info):
if sid == epoch[1]:
subject_count[sid] += 1
processed_data[:, :, :, idx] = \
np.mean(data[:, :, :, epoch[2]:epoch[3]], axis=3)
logger.debug(
'file %s is loaded and processed, with data shape %s',
f.get_filename(), data.shape
)
# z-scoring
cur_epoch = 0
for i in subject_count:
if i > 1:
processed_data[:, :, :, cur_epoch:cur_epoch + i] = \
zscore(processed_data[:, :, :, cur_epoch:cur_epoch + i],
axis=3, ddof=0)
cur_epoch += i
# if zscore fails (standard deviation is zero),
# set all values to be zero
processed_data = np.nan_to_num(processed_data)
time2 = time.time()
logger.info(
'data processed for activity-based voxel selection, takes %.2f s' %
(time2 - time1)
)
return processed_data, labels | [
"def",
"prepare_searchlight_mvpa_data",
"(",
"images",
",",
"conditions",
",",
"data_type",
"=",
"np",
".",
"float32",
",",
"random",
"=",
"RandomType",
".",
"NORANDOM",
")",
":",
"time1",
"=",
"time",
".",
"time",
"(",
")",
"epoch_info",
"=",
"generate_epoc... | obtain the data for activity-based voxel selection using Searchlight
Average the activity within epochs and z-scoring within subject,
while maintaining the 3D brain structure. In order to save memory,
the data is processed subject by subject instead of reading all in before
processing. Assuming all subjects live in the identical cube.
Parameters
----------
images: Iterable[SpatialImage]
Data.
conditions: List[UniqueLabelConditionSpec]
Condition specification.
data_type
Type to cast image to.
random: Optional[RandomType]
Randomize the image data within subject or not.
Returns
-------
processed_data: 4D array in shape [brain 3D + epoch]
averaged epoch by epoch processed data
labels: 1D array
contains labels of the data | [
"obtain",
"the",
"data",
"for",
"activity",
"-",
"based",
"voxel",
"selection",
"using",
"Searchlight"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/preprocessing.py#L328-L414 | train | 204,463 |
brainiak/brainiak | brainiak/utils/utils.py | from_tri_2_sym | def from_tri_2_sym(tri, dim):
"""convert a upper triangular matrix in 1D format
to 2D symmetric matrix
Parameters
----------
tri: 1D array
Contains elements of upper triangular matrix
dim : int
The dimension of target matrix.
Returns
-------
symm : 2D array
Symmetric matrix in shape=[dim, dim]
"""
symm = np.zeros((dim, dim))
symm[np.triu_indices(dim)] = tri
return symm | python | def from_tri_2_sym(tri, dim):
"""convert a upper triangular matrix in 1D format
to 2D symmetric matrix
Parameters
----------
tri: 1D array
Contains elements of upper triangular matrix
dim : int
The dimension of target matrix.
Returns
-------
symm : 2D array
Symmetric matrix in shape=[dim, dim]
"""
symm = np.zeros((dim, dim))
symm[np.triu_indices(dim)] = tri
return symm | [
"def",
"from_tri_2_sym",
"(",
"tri",
",",
"dim",
")",
":",
"symm",
"=",
"np",
".",
"zeros",
"(",
"(",
"dim",
",",
"dim",
")",
")",
"symm",
"[",
"np",
".",
"triu_indices",
"(",
"dim",
")",
"]",
"=",
"tri",
"return",
"symm"
] | convert a upper triangular matrix in 1D format
to 2D symmetric matrix
Parameters
----------
tri: 1D array
Contains elements of upper triangular matrix
dim : int
The dimension of target matrix.
Returns
-------
symm : 2D array
Symmetric matrix in shape=[dim, dim] | [
"convert",
"a",
"upper",
"triangular",
"matrix",
"in",
"1D",
"format",
"to",
"2D",
"symmetric",
"matrix"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/utils.py#L46-L69 | train | 204,464 |
brainiak/brainiak | brainiak/utils/utils.py | from_sym_2_tri | def from_sym_2_tri(symm):
"""convert a 2D symmetric matrix to an upper
triangular matrix in 1D format
Parameters
----------
symm : 2D array
Symmetric matrix
Returns
-------
tri: 1D array
Contains elements of upper triangular matrix
"""
inds = np.triu_indices_from(symm)
tri = symm[inds]
return tri | python | def from_sym_2_tri(symm):
"""convert a 2D symmetric matrix to an upper
triangular matrix in 1D format
Parameters
----------
symm : 2D array
Symmetric matrix
Returns
-------
tri: 1D array
Contains elements of upper triangular matrix
"""
inds = np.triu_indices_from(symm)
tri = symm[inds]
return tri | [
"def",
"from_sym_2_tri",
"(",
"symm",
")",
":",
"inds",
"=",
"np",
".",
"triu_indices_from",
"(",
"symm",
")",
"tri",
"=",
"symm",
"[",
"inds",
"]",
"return",
"tri"
] | convert a 2D symmetric matrix to an upper
triangular matrix in 1D format
Parameters
----------
symm : 2D array
Symmetric matrix
Returns
-------
tri: 1D array
Contains elements of upper triangular matrix | [
"convert",
"a",
"2D",
"symmetric",
"matrix",
"to",
"an",
"upper",
"triangular",
"matrix",
"in",
"1D",
"format"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/utils.py#L72-L92 | train | 204,465 |
brainiak/brainiak | brainiak/utils/utils.py | sumexp_stable | def sumexp_stable(data):
"""Compute the sum of exponents for a list of samples
Parameters
----------
data : array, shape=[features, samples]
A data array containing samples.
Returns
-------
result_sum : array, shape=[samples,]
The sum of exponents for each sample divided by the exponent
of the maximum feature value in the sample.
max_value : array, shape=[samples,]
The maximum feature value for each sample.
result_exp : array, shape=[features, samples]
The exponent of each element in each sample divided by the exponent
of the maximum feature value in the sample.
Note
----
This function is more stable than computing the sum(exp(v)).
It useful for computing the softmax_i(v)=exp(v_i)/sum(exp(v)) function.
"""
max_value = data.max(axis=0)
result_exp = np.exp(data - max_value)
result_sum = np.sum(result_exp, axis=0)
return result_sum, max_value, result_exp | python | def sumexp_stable(data):
"""Compute the sum of exponents for a list of samples
Parameters
----------
data : array, shape=[features, samples]
A data array containing samples.
Returns
-------
result_sum : array, shape=[samples,]
The sum of exponents for each sample divided by the exponent
of the maximum feature value in the sample.
max_value : array, shape=[samples,]
The maximum feature value for each sample.
result_exp : array, shape=[features, samples]
The exponent of each element in each sample divided by the exponent
of the maximum feature value in the sample.
Note
----
This function is more stable than computing the sum(exp(v)).
It useful for computing the softmax_i(v)=exp(v_i)/sum(exp(v)) function.
"""
max_value = data.max(axis=0)
result_exp = np.exp(data - max_value)
result_sum = np.sum(result_exp, axis=0)
return result_sum, max_value, result_exp | [
"def",
"sumexp_stable",
"(",
"data",
")",
":",
"max_value",
"=",
"data",
".",
"max",
"(",
"axis",
"=",
"0",
")",
"result_exp",
"=",
"np",
".",
"exp",
"(",
"data",
"-",
"max_value",
")",
"result_sum",
"=",
"np",
".",
"sum",
"(",
"result_exp",
",",
"... | Compute the sum of exponents for a list of samples
Parameters
----------
data : array, shape=[features, samples]
A data array containing samples.
Returns
-------
result_sum : array, shape=[samples,]
The sum of exponents for each sample divided by the exponent
of the maximum feature value in the sample.
max_value : array, shape=[samples,]
The maximum feature value for each sample.
result_exp : array, shape=[features, samples]
The exponent of each element in each sample divided by the exponent
of the maximum feature value in the sample.
Note
----
This function is more stable than computing the sum(exp(v)).
It useful for computing the softmax_i(v)=exp(v_i)/sum(exp(v)) function. | [
"Compute",
"the",
"sum",
"of",
"exponents",
"for",
"a",
"list",
"of",
"samples"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/utils.py#L95-L128 | train | 204,466 |
brainiak/brainiak | brainiak/utils/utils.py | concatenate_not_none | def concatenate_not_none(l, axis=0):
"""Construct a numpy array by stacking not-None arrays in a list
Parameters
----------
data : list of arrays
The list of arrays to be concatenated. Arrays have same shape in all
but one dimension or are None, in which case they are ignored.
axis : int, default = 0
Axis for the concatenation
Returns
-------
data_stacked : array
The resulting concatenated array.
"""
# Get the indexes of the arrays in the list
mask = []
for i in range(len(l)):
if l[i] is not None:
mask.append(i)
# Concatenate them
l_stacked = np.concatenate([l[i] for i in mask], axis=axis)
return l_stacked | python | def concatenate_not_none(l, axis=0):
"""Construct a numpy array by stacking not-None arrays in a list
Parameters
----------
data : list of arrays
The list of arrays to be concatenated. Arrays have same shape in all
but one dimension or are None, in which case they are ignored.
axis : int, default = 0
Axis for the concatenation
Returns
-------
data_stacked : array
The resulting concatenated array.
"""
# Get the indexes of the arrays in the list
mask = []
for i in range(len(l)):
if l[i] is not None:
mask.append(i)
# Concatenate them
l_stacked = np.concatenate([l[i] for i in mask], axis=axis)
return l_stacked | [
"def",
"concatenate_not_none",
"(",
"l",
",",
"axis",
"=",
"0",
")",
":",
"# Get the indexes of the arrays in the list",
"mask",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"l",
")",
")",
":",
"if",
"l",
"[",
"i",
"]",
"is",
"not",
"No... | Construct a numpy array by stacking not-None arrays in a list
Parameters
----------
data : list of arrays
The list of arrays to be concatenated. Arrays have same shape in all
but one dimension or are None, in which case they are ignored.
axis : int, default = 0
Axis for the concatenation
Returns
-------
data_stacked : array
The resulting concatenated array. | [
"Construct",
"a",
"numpy",
"array",
"by",
"stacking",
"not",
"-",
"None",
"arrays",
"in",
"a",
"list"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/utils.py#L131-L159 | train | 204,467 |
brainiak/brainiak | brainiak/utils/utils.py | cov2corr | def cov2corr(cov):
"""Calculate the correlation matrix based on a
covariance matrix
Parameters
----------
cov: 2D array
Returns
-------
corr: 2D array
correlation converted from the covarince matrix
"""
assert cov.ndim == 2, 'covariance matrix should be 2D array'
inv_sd = 1 / np.sqrt(np.diag(cov))
corr = cov * inv_sd[None, :] * inv_sd[:, None]
return corr | python | def cov2corr(cov):
"""Calculate the correlation matrix based on a
covariance matrix
Parameters
----------
cov: 2D array
Returns
-------
corr: 2D array
correlation converted from the covarince matrix
"""
assert cov.ndim == 2, 'covariance matrix should be 2D array'
inv_sd = 1 / np.sqrt(np.diag(cov))
corr = cov * inv_sd[None, :] * inv_sd[:, None]
return corr | [
"def",
"cov2corr",
"(",
"cov",
")",
":",
"assert",
"cov",
".",
"ndim",
"==",
"2",
",",
"'covariance matrix should be 2D array'",
"inv_sd",
"=",
"1",
"/",
"np",
".",
"sqrt",
"(",
"np",
".",
"diag",
"(",
"cov",
")",
")",
"corr",
"=",
"cov",
"*",
"inv_s... | Calculate the correlation matrix based on a
covariance matrix
Parameters
----------
cov: 2D array
Returns
-------
corr: 2D array
correlation converted from the covarince matrix | [
"Calculate",
"the",
"correlation",
"matrix",
"based",
"on",
"a",
"covariance",
"matrix"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/utils.py#L162-L182 | train | 204,468 |
brainiak/brainiak | brainiak/utils/utils.py | usable_cpu_count | def usable_cpu_count():
"""Get number of CPUs usable by the current process.
Takes into consideration cpusets restrictions.
Returns
-------
int
"""
try:
result = len(os.sched_getaffinity(0))
except AttributeError:
try:
result = len(psutil.Process().cpu_affinity())
except AttributeError:
result = os.cpu_count()
return result | python | def usable_cpu_count():
"""Get number of CPUs usable by the current process.
Takes into consideration cpusets restrictions.
Returns
-------
int
"""
try:
result = len(os.sched_getaffinity(0))
except AttributeError:
try:
result = len(psutil.Process().cpu_affinity())
except AttributeError:
result = os.cpu_count()
return result | [
"def",
"usable_cpu_count",
"(",
")",
":",
"try",
":",
"result",
"=",
"len",
"(",
"os",
".",
"sched_getaffinity",
"(",
"0",
")",
")",
"except",
"AttributeError",
":",
"try",
":",
"result",
"=",
"len",
"(",
"psutil",
".",
"Process",
"(",
")",
".",
"cpu... | Get number of CPUs usable by the current process.
Takes into consideration cpusets restrictions.
Returns
-------
int | [
"Get",
"number",
"of",
"CPUs",
"usable",
"by",
"the",
"current",
"process",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/utils.py#L678-L694 | train | 204,469 |
brainiak/brainiak | brainiak/utils/utils.py | phase_randomize | def phase_randomize(data, voxelwise=False, random_state=None):
"""Randomize phase of time series across subjects
For each subject, apply Fourier transform to voxel time series
and then randomly shift the phase of each frequency before inverting
back into the time domain. This yields time series with the same power
spectrum (and thus the same autocorrelation) as the original time series
but will remove any meaningful temporal relationships among time series
across subjects. By default (voxelwise=False), the same phase shift is
applied across all voxels; however if voxelwise=True, different random
phase shifts are applied to each voxel. The typical input is a time by
voxels by subjects ndarray. The first dimension is assumed to be the
time dimension and will be phase randomized. If a 2-dimensional ndarray
is provided, the last dimension is assumed to be subjects, and different
phase randomizations will be applied to each subject.
The implementation is based on the work in [Lerner2011]_ and
[Simony2016]_.
Parameters
----------
data : ndarray (n_TRs x n_voxels x n_subjects)
Data to be phase randomized (per subject)
voxelwise : bool, default: False
Apply same (False) or different (True) randomizations across voxels
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
Returns
----------
shifted_data : ndarray (n_TRs x n_voxels x n_subjects)
Phase-randomized time series
"""
# Check if input is 2-dimensional
data_ndim = data.ndim
# Get basic shape of data
data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data)
# Random seed to be deterministically re-randomized at each iteration
if isinstance(random_state, np.random.RandomState):
prng = random_state
else:
prng = np.random.RandomState(random_state)
# Get randomized phase shifts
if n_TRs % 2 == 0:
# Why are we indexing from 1 not zero here? n_TRs / -1 long?
pos_freq = np.arange(1, data.shape[0] // 2)
neg_freq = np.arange(data.shape[0] - 1, data.shape[0] // 2, -1)
else:
pos_freq = np.arange(1, (data.shape[0] - 1) // 2 + 1)
neg_freq = np.arange(data.shape[0] - 1,
(data.shape[0] - 1) // 2, -1)
if not voxelwise:
phase_shifts = (prng.rand(len(pos_freq), 1, n_subjects)
* 2 * np.math.pi)
else:
phase_shifts = (prng.rand(len(pos_freq), n_voxels, n_subjects)
* 2 * np.math.pi)
# Fast Fourier transform along time dimension of data
fft_data = fft(data, axis=0)
# Shift pos and neg frequencies symmetrically, to keep signal real
fft_data[pos_freq, :, :] *= np.exp(1j * phase_shifts)
fft_data[neg_freq, :, :] *= np.exp(-1j * phase_shifts)
# Inverse FFT to put data back in time domain
shifted_data = np.real(ifft(fft_data, axis=0))
# Go back to 2-dimensions if input was 2-dimensional
if data_ndim == 2:
shifted_data = shifted_data[:, 0, :]
return shifted_data | python | def phase_randomize(data, voxelwise=False, random_state=None):
"""Randomize phase of time series across subjects
For each subject, apply Fourier transform to voxel time series
and then randomly shift the phase of each frequency before inverting
back into the time domain. This yields time series with the same power
spectrum (and thus the same autocorrelation) as the original time series
but will remove any meaningful temporal relationships among time series
across subjects. By default (voxelwise=False), the same phase shift is
applied across all voxels; however if voxelwise=True, different random
phase shifts are applied to each voxel. The typical input is a time by
voxels by subjects ndarray. The first dimension is assumed to be the
time dimension and will be phase randomized. If a 2-dimensional ndarray
is provided, the last dimension is assumed to be subjects, and different
phase randomizations will be applied to each subject.
The implementation is based on the work in [Lerner2011]_ and
[Simony2016]_.
Parameters
----------
data : ndarray (n_TRs x n_voxels x n_subjects)
Data to be phase randomized (per subject)
voxelwise : bool, default: False
Apply same (False) or different (True) randomizations across voxels
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
Returns
----------
shifted_data : ndarray (n_TRs x n_voxels x n_subjects)
Phase-randomized time series
"""
# Check if input is 2-dimensional
data_ndim = data.ndim
# Get basic shape of data
data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data)
# Random seed to be deterministically re-randomized at each iteration
if isinstance(random_state, np.random.RandomState):
prng = random_state
else:
prng = np.random.RandomState(random_state)
# Get randomized phase shifts
if n_TRs % 2 == 0:
# Why are we indexing from 1 not zero here? n_TRs / -1 long?
pos_freq = np.arange(1, data.shape[0] // 2)
neg_freq = np.arange(data.shape[0] - 1, data.shape[0] // 2, -1)
else:
pos_freq = np.arange(1, (data.shape[0] - 1) // 2 + 1)
neg_freq = np.arange(data.shape[0] - 1,
(data.shape[0] - 1) // 2, -1)
if not voxelwise:
phase_shifts = (prng.rand(len(pos_freq), 1, n_subjects)
* 2 * np.math.pi)
else:
phase_shifts = (prng.rand(len(pos_freq), n_voxels, n_subjects)
* 2 * np.math.pi)
# Fast Fourier transform along time dimension of data
fft_data = fft(data, axis=0)
# Shift pos and neg frequencies symmetrically, to keep signal real
fft_data[pos_freq, :, :] *= np.exp(1j * phase_shifts)
fft_data[neg_freq, :, :] *= np.exp(-1j * phase_shifts)
# Inverse FFT to put data back in time domain
shifted_data = np.real(ifft(fft_data, axis=0))
# Go back to 2-dimensions if input was 2-dimensional
if data_ndim == 2:
shifted_data = shifted_data[:, 0, :]
return shifted_data | [
"def",
"phase_randomize",
"(",
"data",
",",
"voxelwise",
"=",
"False",
",",
"random_state",
"=",
"None",
")",
":",
"# Check if input is 2-dimensional",
"data_ndim",
"=",
"data",
".",
"ndim",
"# Get basic shape of data",
"data",
",",
"n_TRs",
",",
"n_voxels",
",",
... | Randomize phase of time series across subjects
For each subject, apply Fourier transform to voxel time series
and then randomly shift the phase of each frequency before inverting
back into the time domain. This yields time series with the same power
spectrum (and thus the same autocorrelation) as the original time series
but will remove any meaningful temporal relationships among time series
across subjects. By default (voxelwise=False), the same phase shift is
applied across all voxels; however if voxelwise=True, different random
phase shifts are applied to each voxel. The typical input is a time by
voxels by subjects ndarray. The first dimension is assumed to be the
time dimension and will be phase randomized. If a 2-dimensional ndarray
is provided, the last dimension is assumed to be subjects, and different
phase randomizations will be applied to each subject.
The implementation is based on the work in [Lerner2011]_ and
[Simony2016]_.
Parameters
----------
data : ndarray (n_TRs x n_voxels x n_subjects)
Data to be phase randomized (per subject)
voxelwise : bool, default: False
Apply same (False) or different (True) randomizations across voxels
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
Returns
----------
shifted_data : ndarray (n_TRs x n_voxels x n_subjects)
Phase-randomized time series | [
"Randomize",
"phase",
"of",
"time",
"series",
"across",
"subjects"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/utils.py#L697-L777 | train | 204,470 |
brainiak/brainiak | brainiak/utils/utils.py | p_from_null | def p_from_null(observed, distribution,
side='two-sided', exact=False,
axis=None):
"""Compute p-value from null distribution
Returns the p-value for an observed test statistic given a null
distribution. Performs either a 'two-sided' (i.e., two-tailed)
test (default) or a one-sided (i.e., one-tailed) test for either the
'left' or 'right' side. For an exact test (exact=True), does not adjust
for the observed test statistic; otherwise, adjusts for observed
test statistic (prevents p-values of zero). If a multidimensional
distribution is provided, use axis argument to specify which axis indexes
resampling iterations.
The implementation is based on the work in [PhipsonSmyth2010]_.
.. [PhipsonSmyth2010] "Permutation p-values should never be zero:
calculating exact p-values when permutations are randomly drawn.",
B. Phipson, G. K., Smyth, 2010, Statistical Applications in Genetics
and Molecular Biology, 9, 1544-6115.
https://doi.org/10.2202/1544-6115.1585
Parameters
----------
observed : float
Observed test statistic
distribution : ndarray
Null distribution of test statistic
side : str, default:'two-sided'
Perform one-sided ('left' or 'right') or 'two-sided' test
axis: None or int, default:None
Axis indicating resampling iterations in input distribution
Returns
-------
p : float
p-value for observed test statistic based on null distribution
"""
if side not in ('two-sided', 'left', 'right'):
raise ValueError("The value for 'side' must be either "
"'two-sided', 'left', or 'right', got {0}".
format(side))
n_samples = len(distribution)
logger.info("Assuming {0} resampling iterations".format(n_samples))
if side == 'two-sided':
# Numerator for two-sided test
numerator = np.sum(np.abs(distribution) >= np.abs(observed), axis=axis)
elif side == 'left':
# Numerator for one-sided test in left tail
numerator = np.sum(distribution <= observed, axis=axis)
elif side == 'right':
# Numerator for one-sided test in right tail
numerator = np.sum(distribution >= observed, axis=axis)
# If exact test all possible permutations and do not adjust
if exact:
p = numerator / n_samples
# If not exact test, adjust number of samples to account for
# observed statistic; prevents p-value from being zero
else:
p = (numerator + 1) / (n_samples + 1)
return p | python | def p_from_null(observed, distribution,
side='two-sided', exact=False,
axis=None):
"""Compute p-value from null distribution
Returns the p-value for an observed test statistic given a null
distribution. Performs either a 'two-sided' (i.e., two-tailed)
test (default) or a one-sided (i.e., one-tailed) test for either the
'left' or 'right' side. For an exact test (exact=True), does not adjust
for the observed test statistic; otherwise, adjusts for observed
test statistic (prevents p-values of zero). If a multidimensional
distribution is provided, use axis argument to specify which axis indexes
resampling iterations.
The implementation is based on the work in [PhipsonSmyth2010]_.
.. [PhipsonSmyth2010] "Permutation p-values should never be zero:
calculating exact p-values when permutations are randomly drawn.",
B. Phipson, G. K., Smyth, 2010, Statistical Applications in Genetics
and Molecular Biology, 9, 1544-6115.
https://doi.org/10.2202/1544-6115.1585
Parameters
----------
observed : float
Observed test statistic
distribution : ndarray
Null distribution of test statistic
side : str, default:'two-sided'
Perform one-sided ('left' or 'right') or 'two-sided' test
axis: None or int, default:None
Axis indicating resampling iterations in input distribution
Returns
-------
p : float
p-value for observed test statistic based on null distribution
"""
if side not in ('two-sided', 'left', 'right'):
raise ValueError("The value for 'side' must be either "
"'two-sided', 'left', or 'right', got {0}".
format(side))
n_samples = len(distribution)
logger.info("Assuming {0} resampling iterations".format(n_samples))
if side == 'two-sided':
# Numerator for two-sided test
numerator = np.sum(np.abs(distribution) >= np.abs(observed), axis=axis)
elif side == 'left':
# Numerator for one-sided test in left tail
numerator = np.sum(distribution <= observed, axis=axis)
elif side == 'right':
# Numerator for one-sided test in right tail
numerator = np.sum(distribution >= observed, axis=axis)
# If exact test all possible permutations and do not adjust
if exact:
p = numerator / n_samples
# If not exact test, adjust number of samples to account for
# observed statistic; prevents p-value from being zero
else:
p = (numerator + 1) / (n_samples + 1)
return p | [
"def",
"p_from_null",
"(",
"observed",
",",
"distribution",
",",
"side",
"=",
"'two-sided'",
",",
"exact",
"=",
"False",
",",
"axis",
"=",
"None",
")",
":",
"if",
"side",
"not",
"in",
"(",
"'two-sided'",
",",
"'left'",
",",
"'right'",
")",
":",
"raise"... | Compute p-value from null distribution
Returns the p-value for an observed test statistic given a null
distribution. Performs either a 'two-sided' (i.e., two-tailed)
test (default) or a one-sided (i.e., one-tailed) test for either the
'left' or 'right' side. For an exact test (exact=True), does not adjust
for the observed test statistic; otherwise, adjusts for observed
test statistic (prevents p-values of zero). If a multidimensional
distribution is provided, use axis argument to specify which axis indexes
resampling iterations.
The implementation is based on the work in [PhipsonSmyth2010]_.
.. [PhipsonSmyth2010] "Permutation p-values should never be zero:
calculating exact p-values when permutations are randomly drawn.",
B. Phipson, G. K., Smyth, 2010, Statistical Applications in Genetics
and Molecular Biology, 9, 1544-6115.
https://doi.org/10.2202/1544-6115.1585
Parameters
----------
observed : float
Observed test statistic
distribution : ndarray
Null distribution of test statistic
side : str, default:'two-sided'
Perform one-sided ('left' or 'right') or 'two-sided' test
axis: None or int, default:None
Axis indicating resampling iterations in input distribution
Returns
-------
p : float
p-value for observed test statistic based on null distribution | [
"Compute",
"p",
"-",
"value",
"from",
"null",
"distribution"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/utils.py#L780-L849 | train | 204,471 |
brainiak/brainiak | brainiak/utils/utils.py | array_correlation | def array_correlation(x, y, axis=0):
"""Column- or row-wise Pearson correlation between two arrays
Computes sample Pearson correlation between two 1D or 2D arrays (e.g.,
two n_TRs by n_voxels arrays). For 2D arrays, computes correlation
between each corresponding column (axis=0) or row (axis=1) where axis
indexes observations. If axis=0 (default), each column is considered to
be a variable and each row is an observation; if axis=1, each row is a
variable and each column is an observation (equivalent to transposing
the input arrays). Input arrays must be the same shape with corresponding
variables and observations. This is intended to be an efficient method
for computing correlations between two corresponding arrays with many
variables (e.g., many voxels).
Parameters
----------
x : 1D or 2D ndarray
Array of observations for one or more variables
y : 1D or 2D ndarray
Array of observations for one or more variables (same shape as x)
axis : int (0 or 1), default: 0
Correlation between columns (axis=0) or rows (axis=1)
Returns
-------
r : float or 1D ndarray
Pearson correlation values for input variables
"""
# Accommodate array-like inputs
if not isinstance(x, np.ndarray):
x = np.asarray(x)
if not isinstance(y, np.ndarray):
y = np.asarray(y)
# Check that inputs are same shape
if x.shape != y.shape:
raise ValueError("Input arrays must be the same shape")
# Transpose if axis=1 requested (to avoid broadcasting
# issues introduced by switching axis in mean and sum)
if axis == 1:
x, y = x.T, y.T
# Center (de-mean) input variables
x_demean = x - np.mean(x, axis=0)
y_demean = y - np.mean(y, axis=0)
# Compute summed product of centered variables
numerator = np.sum(x_demean * y_demean, axis=0)
# Compute sum squared error
denominator = np.sqrt(np.sum(x_demean ** 2, axis=0) *
np.sum(y_demean ** 2, axis=0))
return numerator / denominator | python | def array_correlation(x, y, axis=0):
"""Column- or row-wise Pearson correlation between two arrays
Computes sample Pearson correlation between two 1D or 2D arrays (e.g.,
two n_TRs by n_voxels arrays). For 2D arrays, computes correlation
between each corresponding column (axis=0) or row (axis=1) where axis
indexes observations. If axis=0 (default), each column is considered to
be a variable and each row is an observation; if axis=1, each row is a
variable and each column is an observation (equivalent to transposing
the input arrays). Input arrays must be the same shape with corresponding
variables and observations. This is intended to be an efficient method
for computing correlations between two corresponding arrays with many
variables (e.g., many voxels).
Parameters
----------
x : 1D or 2D ndarray
Array of observations for one or more variables
y : 1D or 2D ndarray
Array of observations for one or more variables (same shape as x)
axis : int (0 or 1), default: 0
Correlation between columns (axis=0) or rows (axis=1)
Returns
-------
r : float or 1D ndarray
Pearson correlation values for input variables
"""
# Accommodate array-like inputs
if not isinstance(x, np.ndarray):
x = np.asarray(x)
if not isinstance(y, np.ndarray):
y = np.asarray(y)
# Check that inputs are same shape
if x.shape != y.shape:
raise ValueError("Input arrays must be the same shape")
# Transpose if axis=1 requested (to avoid broadcasting
# issues introduced by switching axis in mean and sum)
if axis == 1:
x, y = x.T, y.T
# Center (de-mean) input variables
x_demean = x - np.mean(x, axis=0)
y_demean = y - np.mean(y, axis=0)
# Compute summed product of centered variables
numerator = np.sum(x_demean * y_demean, axis=0)
# Compute sum squared error
denominator = np.sqrt(np.sum(x_demean ** 2, axis=0) *
np.sum(y_demean ** 2, axis=0))
return numerator / denominator | [
"def",
"array_correlation",
"(",
"x",
",",
"y",
",",
"axis",
"=",
"0",
")",
":",
"# Accommodate array-like inputs",
"if",
"not",
"isinstance",
"(",
"x",
",",
"np",
".",
"ndarray",
")",
":",
"x",
"=",
"np",
".",
"asarray",
"(",
"x",
")",
"if",
"not",
... | Column- or row-wise Pearson correlation between two arrays
Computes sample Pearson correlation between two 1D or 2D arrays (e.g.,
two n_TRs by n_voxels arrays). For 2D arrays, computes correlation
between each corresponding column (axis=0) or row (axis=1) where axis
indexes observations. If axis=0 (default), each column is considered to
be a variable and each row is an observation; if axis=1, each row is a
variable and each column is an observation (equivalent to transposing
the input arrays). Input arrays must be the same shape with corresponding
variables and observations. This is intended to be an efficient method
for computing correlations between two corresponding arrays with many
variables (e.g., many voxels).
Parameters
----------
x : 1D or 2D ndarray
Array of observations for one or more variables
y : 1D or 2D ndarray
Array of observations for one or more variables (same shape as x)
axis : int (0 or 1), default: 0
Correlation between columns (axis=0) or rows (axis=1)
Returns
-------
r : float or 1D ndarray
Pearson correlation values for input variables | [
"Column",
"-",
"or",
"row",
"-",
"wise",
"Pearson",
"correlation",
"between",
"two",
"arrays"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/utils/utils.py#L915-L973 | train | 204,472 |
brainiak/brainiak | brainiak/fcma/classifier.py | Classifier._prepare_corerelation_data | def _prepare_corerelation_data(self, X1, X2,
start_voxel=0,
num_processed_voxels=None):
"""Compute auto-correlation for the input data X1 and X2.
it will generate the correlation between some voxels and all voxels
Parameters
----------
X1: a list of numpy array in shape [num_TRs, num_voxels1]
X1 contains the activity data filtered by ROIs
and prepared for correlation computation.
All elements of X1 must have the same num_voxels value.
X2: a list of numpy array in shape [num_TRs, num_voxels2]
len(X1) equals len(X2).
All elements of X2 must have the same num_voxels value.
X2 can be identical to X1; if not, X1 must have more voxels
than X2 (guaranteed by self.fit and/or self.predict).
start_voxel: int, default 0
the starting voxel id for correlation computation
num_processed_voxels: int, default None
the number of voxels it computes for correlation computation
if it is None, it is set to self.num_voxels
Returns
-------
corr_data: the correlation data
in shape [len(X), num_processed_voxels, num_voxels2]
"""
num_samples = len(X1)
assert num_samples > 0, \
'at least one sample is needed for correlation computation'
num_voxels1 = X1[0].shape[1]
num_voxels2 = X2[0].shape[1]
assert num_voxels1 * num_voxels2 == self.num_features_, \
'the number of features provided by the input data ' \
'does not match the number of features defined in the model'
assert X1[0].shape[0] == X2[0].shape[0], \
'the numbers of TRs of X1 and X2 are not identical'
if num_processed_voxels is None:
num_processed_voxels = num_voxels1
corr_data = np.zeros((num_samples, num_processed_voxels, num_voxels2),
np.float32, order='C')
# compute correlation
for idx, data in enumerate(X1):
data2 = X2[idx]
num_TRs = data.shape[0]
blas.compute_corr_vectors('N', 'T',
num_voxels2, num_processed_voxels,
num_TRs,
1.0, data2, num_voxels2,
data, num_voxels1,
0.0, corr_data, num_voxels2,
start_voxel, idx)
logger.debug(
'correlation computation done'
)
return corr_data | python | def _prepare_corerelation_data(self, X1, X2,
start_voxel=0,
num_processed_voxels=None):
"""Compute auto-correlation for the input data X1 and X2.
it will generate the correlation between some voxels and all voxels
Parameters
----------
X1: a list of numpy array in shape [num_TRs, num_voxels1]
X1 contains the activity data filtered by ROIs
and prepared for correlation computation.
All elements of X1 must have the same num_voxels value.
X2: a list of numpy array in shape [num_TRs, num_voxels2]
len(X1) equals len(X2).
All elements of X2 must have the same num_voxels value.
X2 can be identical to X1; if not, X1 must have more voxels
than X2 (guaranteed by self.fit and/or self.predict).
start_voxel: int, default 0
the starting voxel id for correlation computation
num_processed_voxels: int, default None
the number of voxels it computes for correlation computation
if it is None, it is set to self.num_voxels
Returns
-------
corr_data: the correlation data
in shape [len(X), num_processed_voxels, num_voxels2]
"""
num_samples = len(X1)
assert num_samples > 0, \
'at least one sample is needed for correlation computation'
num_voxels1 = X1[0].shape[1]
num_voxels2 = X2[0].shape[1]
assert num_voxels1 * num_voxels2 == self.num_features_, \
'the number of features provided by the input data ' \
'does not match the number of features defined in the model'
assert X1[0].shape[0] == X2[0].shape[0], \
'the numbers of TRs of X1 and X2 are not identical'
if num_processed_voxels is None:
num_processed_voxels = num_voxels1
corr_data = np.zeros((num_samples, num_processed_voxels, num_voxels2),
np.float32, order='C')
# compute correlation
for idx, data in enumerate(X1):
data2 = X2[idx]
num_TRs = data.shape[0]
blas.compute_corr_vectors('N', 'T',
num_voxels2, num_processed_voxels,
num_TRs,
1.0, data2, num_voxels2,
data, num_voxels1,
0.0, corr_data, num_voxels2,
start_voxel, idx)
logger.debug(
'correlation computation done'
)
return corr_data | [
"def",
"_prepare_corerelation_data",
"(",
"self",
",",
"X1",
",",
"X2",
",",
"start_voxel",
"=",
"0",
",",
"num_processed_voxels",
"=",
"None",
")",
":",
"num_samples",
"=",
"len",
"(",
"X1",
")",
"assert",
"num_samples",
">",
"0",
",",
"'at least one sample... | Compute auto-correlation for the input data X1 and X2.
it will generate the correlation between some voxels and all voxels
Parameters
----------
X1: a list of numpy array in shape [num_TRs, num_voxels1]
X1 contains the activity data filtered by ROIs
and prepared for correlation computation.
All elements of X1 must have the same num_voxels value.
X2: a list of numpy array in shape [num_TRs, num_voxels2]
len(X1) equals len(X2).
All elements of X2 must have the same num_voxels value.
X2 can be identical to X1; if not, X1 must have more voxels
than X2 (guaranteed by self.fit and/or self.predict).
start_voxel: int, default 0
the starting voxel id for correlation computation
num_processed_voxels: int, default None
the number of voxels it computes for correlation computation
if it is None, it is set to self.num_voxels
Returns
-------
corr_data: the correlation data
in shape [len(X), num_processed_voxels, num_voxels2] | [
"Compute",
"auto",
"-",
"correlation",
"for",
"the",
"input",
"data",
"X1",
"and",
"X2",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/classifier.py#L125-L182 | train | 204,473 |
brainiak/brainiak | brainiak/fcma/classifier.py | Classifier._normalize_correlation_data | def _normalize_correlation_data(self, corr_data, norm_unit):
"""Normalize the correlation data if necessary.
Fisher-transform and then z-score the data for every norm_unit samples
if norm_unit > 1.
Parameters
----------
corr_data: the correlation data
in shape [num_samples, num_processed_voxels, num_voxels]
norm_unit: int
the number of samples on which the normalization
is performed
Returns
-------
normalized_corr_data: the normalized correlation data
in shape [num_samples, num_voxels, num_voxels]
"""
# normalize if necessary
if norm_unit > 1:
num_samples = len(corr_data)
[_, d2, d3] = corr_data.shape
second_dimension = d2 * d3
# this is a shallow copy
normalized_corr_data = corr_data.reshape(1,
num_samples,
second_dimension)
fcma_extension.normalization(normalized_corr_data, norm_unit)
normalized_corr_data = normalized_corr_data.reshape(num_samples,
d2, d3)
logger.debug(
'normalization done'
)
else:
normalized_corr_data = corr_data
return normalized_corr_data | python | def _normalize_correlation_data(self, corr_data, norm_unit):
"""Normalize the correlation data if necessary.
Fisher-transform and then z-score the data for every norm_unit samples
if norm_unit > 1.
Parameters
----------
corr_data: the correlation data
in shape [num_samples, num_processed_voxels, num_voxels]
norm_unit: int
the number of samples on which the normalization
is performed
Returns
-------
normalized_corr_data: the normalized correlation data
in shape [num_samples, num_voxels, num_voxels]
"""
# normalize if necessary
if norm_unit > 1:
num_samples = len(corr_data)
[_, d2, d3] = corr_data.shape
second_dimension = d2 * d3
# this is a shallow copy
normalized_corr_data = corr_data.reshape(1,
num_samples,
second_dimension)
fcma_extension.normalization(normalized_corr_data, norm_unit)
normalized_corr_data = normalized_corr_data.reshape(num_samples,
d2, d3)
logger.debug(
'normalization done'
)
else:
normalized_corr_data = corr_data
return normalized_corr_data | [
"def",
"_normalize_correlation_data",
"(",
"self",
",",
"corr_data",
",",
"norm_unit",
")",
":",
"# normalize if necessary",
"if",
"norm_unit",
">",
"1",
":",
"num_samples",
"=",
"len",
"(",
"corr_data",
")",
"[",
"_",
",",
"d2",
",",
"d3",
"]",
"=",
"corr... | Normalize the correlation data if necessary.
Fisher-transform and then z-score the data for every norm_unit samples
if norm_unit > 1.
Parameters
----------
corr_data: the correlation data
in shape [num_samples, num_processed_voxels, num_voxels]
norm_unit: int
the number of samples on which the normalization
is performed
Returns
-------
normalized_corr_data: the normalized correlation data
in shape [num_samples, num_voxels, num_voxels] | [
"Normalize",
"the",
"correlation",
"data",
"if",
"necessary",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/classifier.py#L184-L220 | train | 204,474 |
brainiak/brainiak | brainiak/fcma/classifier.py | Classifier._compute_kernel_matrix_in_portion | def _compute_kernel_matrix_in_portion(self, X1, X2):
"""Compute kernel matrix for sklearn.svm.SVC with precomputed kernel.
The method generates the kernel matrix (similarity matrix) for
sklearn.svm.SVC with precomputed kernel. It first computes
the correlation from X, then normalizes the correlation if needed,
and finally computes the kernel matrix. It is worth noting that if
the resulting correlation is large, the kernel matrix will be computed
portion by portion to save memory usage (the portion size is specified
in self.num_processed_voxels.
Parameters
----------
X1: a list of numpy array in shape [num_TRs, num_voxels]
X1 contains the activity data filtered by ROIs
and prepared for correlation computation.
All elements of X1 must have the same num_voxels value,
X2: a list of numpy array in shape [num_TRs, num_voxels]
len(X) equals len(X2).
All elements of X2 must have the same num_voxels value.
X2 can be identical to X1; if not, X1 always has more voxels
than X2.
Returns
-------
kernel_matrix: 2D array in shape [num_samples, num_samples]
the kernel matrix to be used in sklearn.svm.SVC
normalized_corr_data: 2D array in shape [num_samples, num_features]
the training data to be used in self.predict() if
the kernel matrix is computed in one portion,
otherwise it will not be used.
"""
kernel_matrix = np.zeros((self.num_samples_, self.num_samples_),
np.float32,
order='C')
sr = 0
row_length = self.num_processed_voxels
num_voxels2 = X2[0].shape[1]
normalized_corr_data = None
while sr < self.num_voxels_:
if row_length >= self.num_voxels_ - sr:
row_length = self.num_voxels_ - sr
# compute sub-correlation
corr_data = self._prepare_corerelation_data(X1, X2,
sr, row_length)
# normalization
normalized_corr_data = self._normalize_correlation_data(
corr_data,
self.epochs_per_subj)
# compute partial kernel matrices
# for using kernel matrix computation from voxel selection
normalized_corr_data = normalized_corr_data.reshape(
1,
self.num_samples_,
row_length * num_voxels2)
blas.compute_kernel_matrix('L', 'T',
self.num_samples_,
row_length * num_voxels2,
1.0, normalized_corr_data,
0, row_length * num_voxels2,
1.0, kernel_matrix, self.num_samples_)
sr += row_length
# shrink the values for getting more stable alpha values
# in SVM training iteration
num_digits = len(str(int(kernel_matrix[0, 0])))
self.num_digits_ = num_digits
if num_digits > 2:
proportion = 10**(2-num_digits)
kernel_matrix *= proportion
return kernel_matrix, normalized_corr_data | python | def _compute_kernel_matrix_in_portion(self, X1, X2):
"""Compute kernel matrix for sklearn.svm.SVC with precomputed kernel.
The method generates the kernel matrix (similarity matrix) for
sklearn.svm.SVC with precomputed kernel. It first computes
the correlation from X, then normalizes the correlation if needed,
and finally computes the kernel matrix. It is worth noting that if
the resulting correlation is large, the kernel matrix will be computed
portion by portion to save memory usage (the portion size is specified
in self.num_processed_voxels.
Parameters
----------
X1: a list of numpy array in shape [num_TRs, num_voxels]
X1 contains the activity data filtered by ROIs
and prepared for correlation computation.
All elements of X1 must have the same num_voxels value,
X2: a list of numpy array in shape [num_TRs, num_voxels]
len(X) equals len(X2).
All elements of X2 must have the same num_voxels value.
X2 can be identical to X1; if not, X1 always has more voxels
than X2.
Returns
-------
kernel_matrix: 2D array in shape [num_samples, num_samples]
the kernel matrix to be used in sklearn.svm.SVC
normalized_corr_data: 2D array in shape [num_samples, num_features]
the training data to be used in self.predict() if
the kernel matrix is computed in one portion,
otherwise it will not be used.
"""
kernel_matrix = np.zeros((self.num_samples_, self.num_samples_),
np.float32,
order='C')
sr = 0
row_length = self.num_processed_voxels
num_voxels2 = X2[0].shape[1]
normalized_corr_data = None
while sr < self.num_voxels_:
if row_length >= self.num_voxels_ - sr:
row_length = self.num_voxels_ - sr
# compute sub-correlation
corr_data = self._prepare_corerelation_data(X1, X2,
sr, row_length)
# normalization
normalized_corr_data = self._normalize_correlation_data(
corr_data,
self.epochs_per_subj)
# compute partial kernel matrices
# for using kernel matrix computation from voxel selection
normalized_corr_data = normalized_corr_data.reshape(
1,
self.num_samples_,
row_length * num_voxels2)
blas.compute_kernel_matrix('L', 'T',
self.num_samples_,
row_length * num_voxels2,
1.0, normalized_corr_data,
0, row_length * num_voxels2,
1.0, kernel_matrix, self.num_samples_)
sr += row_length
# shrink the values for getting more stable alpha values
# in SVM training iteration
num_digits = len(str(int(kernel_matrix[0, 0])))
self.num_digits_ = num_digits
if num_digits > 2:
proportion = 10**(2-num_digits)
kernel_matrix *= proportion
return kernel_matrix, normalized_corr_data | [
"def",
"_compute_kernel_matrix_in_portion",
"(",
"self",
",",
"X1",
",",
"X2",
")",
":",
"kernel_matrix",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"num_samples_",
",",
"self",
".",
"num_samples_",
")",
",",
"np",
".",
"float32",
",",
"order",
"=",... | Compute kernel matrix for sklearn.svm.SVC with precomputed kernel.
The method generates the kernel matrix (similarity matrix) for
sklearn.svm.SVC with precomputed kernel. It first computes
the correlation from X, then normalizes the correlation if needed,
and finally computes the kernel matrix. It is worth noting that if
the resulting correlation is large, the kernel matrix will be computed
portion by portion to save memory usage (the portion size is specified
in self.num_processed_voxels.
Parameters
----------
X1: a list of numpy array in shape [num_TRs, num_voxels]
X1 contains the activity data filtered by ROIs
and prepared for correlation computation.
All elements of X1 must have the same num_voxels value,
X2: a list of numpy array in shape [num_TRs, num_voxels]
len(X) equals len(X2).
All elements of X2 must have the same num_voxels value.
X2 can be identical to X1; if not, X1 always has more voxels
than X2.
Returns
-------
kernel_matrix: 2D array in shape [num_samples, num_samples]
the kernel matrix to be used in sklearn.svm.SVC
normalized_corr_data: 2D array in shape [num_samples, num_features]
the training data to be used in self.predict() if
the kernel matrix is computed in one portion,
otherwise it will not be used. | [
"Compute",
"kernel",
"matrix",
"for",
"sklearn",
".",
"svm",
".",
"SVC",
"with",
"precomputed",
"kernel",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/classifier.py#L279-L348 | train | 204,475 |
brainiak/brainiak | brainiak/fcma/classifier.py | Classifier._generate_training_data | def _generate_training_data(self, X1, X2, num_training_samples):
"""Generate training data for the classifier.
Compute the correlation, do the normalization if necessary,
and compute the kernel matrix if the classifier is
sklearn.svm.SVC with precomputed kernel.
Parameters
----------
X1: a list of numpy array in shape [num_TRs, num_voxels]
X1 contains the activity data filtered by ROIs
and prepared for correlation computation.
All elements of X1 must have the same num_voxels value,
X2: a list of numpy array in shape [num_TRs, num_voxels]
len(X1) equals len(X2).
All elements of X2 must have the same num_voxels value.
X2 can be identical to X1; if not, X1 must have more voxels
than X2 (guaranteed by self.fit).
num_training_samples: Optional[int]
Default None.
The number of samples used in the training,
which is set when the kernel matrix is constructed
portion by portion so the similarity vectors of the
test data have to be computed here.
This is ONLY set when sklearn.svm.SVC with
precomputed kernel is used.
If it is set, only those samples will be used to fit the model.
Returns
-------
data: 2D numpy array
If the classifier is sklearn.svm.SVC with precomputed kernel,
data is the kenrl matrix in shape [num_samples, num_samples];
otherwise, data is in shape [num_samples, num_features] as
the training data.
"""
if not (isinstance(self.clf, sklearn.svm.SVC)
and self.clf.kernel == 'precomputed'):
# correlation computation
corr_data = self._prepare_corerelation_data(X1, X2)
# normalization
normalized_corr_data = self._normalize_correlation_data(
corr_data,
self.epochs_per_subj)
# training data prepare
data = normalized_corr_data.reshape(self.num_samples_,
self.num_features_)
self.training_data_ = None
else: # SVM with precomputed kernel
if self.num_processed_voxels < self.num_voxels_:
if num_training_samples is None:
raise RuntimeError('the kernel matrix will be '
'computed portion by portion, '
'the test samples must be predefined '
'by specifying '
'num_training_samples')
if num_training_samples >= self.num_samples_:
raise ValueError('the number of training samples '
'must be smaller than '
'the number of total samples')
data, normalized_corr_data = \
self._compute_kernel_matrix_in_portion(X1, X2)
if self.num_processed_voxels >= self.num_voxels_:
# training data is in shape
# [num_samples, num_voxels * num_voxels]
self.training_data_ = normalized_corr_data.reshape(
self.num_samples_,
self.num_features_)
else:
# do not store training data because it was partially computed
self.training_data_ = None
logger.debug(
'kernel computation done'
)
return data | python | def _generate_training_data(self, X1, X2, num_training_samples):
"""Generate training data for the classifier.
Compute the correlation, do the normalization if necessary,
and compute the kernel matrix if the classifier is
sklearn.svm.SVC with precomputed kernel.
Parameters
----------
X1: a list of numpy array in shape [num_TRs, num_voxels]
X1 contains the activity data filtered by ROIs
and prepared for correlation computation.
All elements of X1 must have the same num_voxels value,
X2: a list of numpy array in shape [num_TRs, num_voxels]
len(X1) equals len(X2).
All elements of X2 must have the same num_voxels value.
X2 can be identical to X1; if not, X1 must have more voxels
than X2 (guaranteed by self.fit).
num_training_samples: Optional[int]
Default None.
The number of samples used in the training,
which is set when the kernel matrix is constructed
portion by portion so the similarity vectors of the
test data have to be computed here.
This is ONLY set when sklearn.svm.SVC with
precomputed kernel is used.
If it is set, only those samples will be used to fit the model.
Returns
-------
data: 2D numpy array
If the classifier is sklearn.svm.SVC with precomputed kernel,
data is the kenrl matrix in shape [num_samples, num_samples];
otherwise, data is in shape [num_samples, num_features] as
the training data.
"""
if not (isinstance(self.clf, sklearn.svm.SVC)
and self.clf.kernel == 'precomputed'):
# correlation computation
corr_data = self._prepare_corerelation_data(X1, X2)
# normalization
normalized_corr_data = self._normalize_correlation_data(
corr_data,
self.epochs_per_subj)
# training data prepare
data = normalized_corr_data.reshape(self.num_samples_,
self.num_features_)
self.training_data_ = None
else: # SVM with precomputed kernel
if self.num_processed_voxels < self.num_voxels_:
if num_training_samples is None:
raise RuntimeError('the kernel matrix will be '
'computed portion by portion, '
'the test samples must be predefined '
'by specifying '
'num_training_samples')
if num_training_samples >= self.num_samples_:
raise ValueError('the number of training samples '
'must be smaller than '
'the number of total samples')
data, normalized_corr_data = \
self._compute_kernel_matrix_in_portion(X1, X2)
if self.num_processed_voxels >= self.num_voxels_:
# training data is in shape
# [num_samples, num_voxels * num_voxels]
self.training_data_ = normalized_corr_data.reshape(
self.num_samples_,
self.num_features_)
else:
# do not store training data because it was partially computed
self.training_data_ = None
logger.debug(
'kernel computation done'
)
return data | [
"def",
"_generate_training_data",
"(",
"self",
",",
"X1",
",",
"X2",
",",
"num_training_samples",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"self",
".",
"clf",
",",
"sklearn",
".",
"svm",
".",
"SVC",
")",
"and",
"self",
".",
"clf",
".",
"kernel",
... | Generate training data for the classifier.
Compute the correlation, do the normalization if necessary,
and compute the kernel matrix if the classifier is
sklearn.svm.SVC with precomputed kernel.
Parameters
----------
X1: a list of numpy array in shape [num_TRs, num_voxels]
X1 contains the activity data filtered by ROIs
and prepared for correlation computation.
All elements of X1 must have the same num_voxels value,
X2: a list of numpy array in shape [num_TRs, num_voxels]
len(X1) equals len(X2).
All elements of X2 must have the same num_voxels value.
X2 can be identical to X1; if not, X1 must have more voxels
than X2 (guaranteed by self.fit).
num_training_samples: Optional[int]
Default None.
The number of samples used in the training,
which is set when the kernel matrix is constructed
portion by portion so the similarity vectors of the
test data have to be computed here.
This is ONLY set when sklearn.svm.SVC with
precomputed kernel is used.
If it is set, only those samples will be used to fit the model.
Returns
-------
data: 2D numpy array
If the classifier is sklearn.svm.SVC with precomputed kernel,
data is the kenrl matrix in shape [num_samples, num_samples];
otherwise, data is in shape [num_samples, num_features] as
the training data. | [
"Generate",
"training",
"data",
"for",
"the",
"classifier",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/classifier.py#L350-L424 | train | 204,476 |
brainiak/brainiak | brainiak/fcma/classifier.py | Classifier.fit | def fit(self, X, y, num_training_samples=None):
"""Use correlation data to train a model.
First compute the correlation of the input data,
and then normalize within subject
if more than one sample in one subject,
and then fit to a model defined by self.clf.
Parameters
----------
X: list of tuple (data1, data2)
data1 and data2 are numpy array in shape [num_TRs, num_voxels]
to be computed for correlation.
They contain the activity data filtered by ROIs
and prepared for correlation computation.
Within list, all data1s must have the same num_voxels value,
all data2s must have the same num_voxels value.
y: 1D numpy array
labels, len(X) equals len(y)
num_training_samples: Optional[int]
The number of samples used in the training.
Set it to construct the kernel matrix
portion by portion so the similarity vectors of the
test data have to be computed here.
Only set num_training_samples when sklearn.svm.SVC with
precomputed kernel is used.
If it is set, only those samples will be used to fit the model.
Returns
-------
Classifier:
self.
"""
time1 = time.time()
assert len(X) == len(y), \
'the number of samples must be equal to the number of labels'
for x in X:
assert len(x) == 2, \
'there must be two parts for each correlation computation'
X1, X2 = zip(*X)
if not (isinstance(self.clf, sklearn.svm.SVC)
and self.clf.kernel == 'precomputed'):
if num_training_samples is not None:
num_training_samples = None
logger.warn(
'num_training_samples should not be set for classifiers '
'other than SVM with precomputed kernels'
)
num_samples = len(X1)
num_voxels1 = X1[0].shape[1]
num_voxels2 = X2[0].shape[1]
# make sure X1 always has more voxels
if num_voxels1 < num_voxels2:
X1, X2 = X2, X1
num_voxels1, num_voxels2 = num_voxels2, num_voxels1
self.num_voxels_ = num_voxels1
self.num_features_ = num_voxels1 * num_voxels2
self.num_samples_ = num_samples
data = self._generate_training_data(X1, X2, num_training_samples)
if num_training_samples is not None:
self.test_raw_data_ = None
self.test_data_ = data[num_training_samples:,
0:num_training_samples]
# limit training to the data specified by num_training_samples
data = data[0:num_training_samples, 0:num_training_samples]
# training
self.clf = self.clf.fit(data, y[0:num_training_samples])
# set the test data
if num_training_samples is None:
self.test_raw_data_ = None
self.test_data_ = None
time2 = time.time()
logger.info(
'training done, takes %.2f s' %
(time2 - time1)
)
return self | python | def fit(self, X, y, num_training_samples=None):
"""Use correlation data to train a model.
First compute the correlation of the input data,
and then normalize within subject
if more than one sample in one subject,
and then fit to a model defined by self.clf.
Parameters
----------
X: list of tuple (data1, data2)
data1 and data2 are numpy array in shape [num_TRs, num_voxels]
to be computed for correlation.
They contain the activity data filtered by ROIs
and prepared for correlation computation.
Within list, all data1s must have the same num_voxels value,
all data2s must have the same num_voxels value.
y: 1D numpy array
labels, len(X) equals len(y)
num_training_samples: Optional[int]
The number of samples used in the training.
Set it to construct the kernel matrix
portion by portion so the similarity vectors of the
test data have to be computed here.
Only set num_training_samples when sklearn.svm.SVC with
precomputed kernel is used.
If it is set, only those samples will be used to fit the model.
Returns
-------
Classifier:
self.
"""
time1 = time.time()
assert len(X) == len(y), \
'the number of samples must be equal to the number of labels'
for x in X:
assert len(x) == 2, \
'there must be two parts for each correlation computation'
X1, X2 = zip(*X)
if not (isinstance(self.clf, sklearn.svm.SVC)
and self.clf.kernel == 'precomputed'):
if num_training_samples is not None:
num_training_samples = None
logger.warn(
'num_training_samples should not be set for classifiers '
'other than SVM with precomputed kernels'
)
num_samples = len(X1)
num_voxels1 = X1[0].shape[1]
num_voxels2 = X2[0].shape[1]
# make sure X1 always has more voxels
if num_voxels1 < num_voxels2:
X1, X2 = X2, X1
num_voxels1, num_voxels2 = num_voxels2, num_voxels1
self.num_voxels_ = num_voxels1
self.num_features_ = num_voxels1 * num_voxels2
self.num_samples_ = num_samples
data = self._generate_training_data(X1, X2, num_training_samples)
if num_training_samples is not None:
self.test_raw_data_ = None
self.test_data_ = data[num_training_samples:,
0:num_training_samples]
# limit training to the data specified by num_training_samples
data = data[0:num_training_samples, 0:num_training_samples]
# training
self.clf = self.clf.fit(data, y[0:num_training_samples])
# set the test data
if num_training_samples is None:
self.test_raw_data_ = None
self.test_data_ = None
time2 = time.time()
logger.info(
'training done, takes %.2f s' %
(time2 - time1)
)
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
",",
"num_training_samples",
"=",
"None",
")",
":",
"time1",
"=",
"time",
".",
"time",
"(",
")",
"assert",
"len",
"(",
"X",
")",
"==",
"len",
"(",
"y",
")",
",",
"'the number of samples must be equal to th... | Use correlation data to train a model.
First compute the correlation of the input data,
and then normalize within subject
if more than one sample in one subject,
and then fit to a model defined by self.clf.
Parameters
----------
X: list of tuple (data1, data2)
data1 and data2 are numpy array in shape [num_TRs, num_voxels]
to be computed for correlation.
They contain the activity data filtered by ROIs
and prepared for correlation computation.
Within list, all data1s must have the same num_voxels value,
all data2s must have the same num_voxels value.
y: 1D numpy array
labels, len(X) equals len(y)
num_training_samples: Optional[int]
The number of samples used in the training.
Set it to construct the kernel matrix
portion by portion so the similarity vectors of the
test data have to be computed here.
Only set num_training_samples when sklearn.svm.SVC with
precomputed kernel is used.
If it is set, only those samples will be used to fit the model.
Returns
-------
Classifier:
self. | [
"Use",
"correlation",
"data",
"to",
"train",
"a",
"model",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/classifier.py#L426-L504 | train | 204,477 |
brainiak/brainiak | brainiak/fcma/classifier.py | Classifier.predict | def predict(self, X=None):
"""Use a trained model to predict correlation data.
first compute the correlation of the input data,
and then normalize across all samples in the list
if there are more than one sample,
and then predict via self.clf.
If X is None, use the similarity vectors produced in fit
to predict
Parameters
----------
X: Optional[list of tuple (data1, data2)]
data1 and data2 are numpy array in shape [num_TRs, num_voxels]
to be computed for correlation.
default None, meaning that the data to be predicted
have been processed in the fit method.
Otherwise, X contains the activity data filtered by ROIs
and prepared for correlation computation.
len(X) is the number of test samples.
if len(X) > 1: normalization is done on all test samples.
Within list, all data1s must have the same num_voxels value,
all data2s must have the same num_voxels value.
Returns
-------
y_pred: the predicted label of X, in shape [len(X),]
"""
time1 = time.time()
if X is not None:
for x in X:
assert len(x) == 2, \
'there must be two parts for each correlation computation'
X1, X2 = zip(*X)
num_voxels1 = X1[0].shape[1]
num_voxels2 = X2[0].shape[1]
# make sure X1 always has more voxels
if num_voxels1 < num_voxels2:
X1, X2 = X2, X1
num_voxels1, num_voxels2 = num_voxels2, num_voxels1
assert self.num_features_ == num_voxels1 * num_voxels2, \
'the number of features does not match the model'
num_test_samples = len(X1)
self.test_raw_data_ = X
# correlation computation
corr_data = self._prepare_corerelation_data(X1, X2)
# normalization
normalized_corr_data = self._normalize_correlation_data(
corr_data,
num_test_samples)
# test data generation
self.test_data_ = self._prepare_test_data(normalized_corr_data)
# prediction
y_pred = self.clf.predict(self.test_data_)
time2 = time.time()
logger.info(
'prediction done, takes %.2f s' %
(time2 - time1)
)
return y_pred | python | def predict(self, X=None):
"""Use a trained model to predict correlation data.
first compute the correlation of the input data,
and then normalize across all samples in the list
if there are more than one sample,
and then predict via self.clf.
If X is None, use the similarity vectors produced in fit
to predict
Parameters
----------
X: Optional[list of tuple (data1, data2)]
data1 and data2 are numpy array in shape [num_TRs, num_voxels]
to be computed for correlation.
default None, meaning that the data to be predicted
have been processed in the fit method.
Otherwise, X contains the activity data filtered by ROIs
and prepared for correlation computation.
len(X) is the number of test samples.
if len(X) > 1: normalization is done on all test samples.
Within list, all data1s must have the same num_voxels value,
all data2s must have the same num_voxels value.
Returns
-------
y_pred: the predicted label of X, in shape [len(X),]
"""
time1 = time.time()
if X is not None:
for x in X:
assert len(x) == 2, \
'there must be two parts for each correlation computation'
X1, X2 = zip(*X)
num_voxels1 = X1[0].shape[1]
num_voxels2 = X2[0].shape[1]
# make sure X1 always has more voxels
if num_voxels1 < num_voxels2:
X1, X2 = X2, X1
num_voxels1, num_voxels2 = num_voxels2, num_voxels1
assert self.num_features_ == num_voxels1 * num_voxels2, \
'the number of features does not match the model'
num_test_samples = len(X1)
self.test_raw_data_ = X
# correlation computation
corr_data = self._prepare_corerelation_data(X1, X2)
# normalization
normalized_corr_data = self._normalize_correlation_data(
corr_data,
num_test_samples)
# test data generation
self.test_data_ = self._prepare_test_data(normalized_corr_data)
# prediction
y_pred = self.clf.predict(self.test_data_)
time2 = time.time()
logger.info(
'prediction done, takes %.2f s' %
(time2 - time1)
)
return y_pred | [
"def",
"predict",
"(",
"self",
",",
"X",
"=",
"None",
")",
":",
"time1",
"=",
"time",
".",
"time",
"(",
")",
"if",
"X",
"is",
"not",
"None",
":",
"for",
"x",
"in",
"X",
":",
"assert",
"len",
"(",
"x",
")",
"==",
"2",
",",
"'there must be two pa... | Use a trained model to predict correlation data.
first compute the correlation of the input data,
and then normalize across all samples in the list
if there are more than one sample,
and then predict via self.clf.
If X is None, use the similarity vectors produced in fit
to predict
Parameters
----------
X: Optional[list of tuple (data1, data2)]
data1 and data2 are numpy array in shape [num_TRs, num_voxels]
to be computed for correlation.
default None, meaning that the data to be predicted
have been processed in the fit method.
Otherwise, X contains the activity data filtered by ROIs
and prepared for correlation computation.
len(X) is the number of test samples.
if len(X) > 1: normalization is done on all test samples.
Within list, all data1s must have the same num_voxels value,
all data2s must have the same num_voxels value.
Returns
-------
y_pred: the predicted label of X, in shape [len(X),] | [
"Use",
"a",
"trained",
"model",
"to",
"predict",
"correlation",
"data",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/classifier.py#L506-L566 | train | 204,478 |
brainiak/brainiak | brainiak/fcma/classifier.py | Classifier.decision_function | def decision_function(self, X=None):
"""Output the decision value of the prediction.
if X is not equal to self.test_raw_data\\_, i.e. predict is not called,
first generate the test_data
after getting the test_data, get the decision value via self.clf.
if X is None, test_data\\_ is ready to be used
Parameters
----------
X: Optional[list of tuple (data1, data2)]
data1 and data2 are numpy array in shape [num_TRs, num_voxels]
to be computed for correlation.
default None, meaning that the data to be predicted
have been processed in the fit method.
Otherwise, X contains the activity data filtered by ROIs
and prepared for correlation computation.
len(X) is the number of test samples.
if len(X) > 1: normalization is done on all test samples.
Within list, all data1s must have the same num_voxels value,
all data2s must have the same num_voxels value.
Returns
-------
confidence: the predictions confidence values of X, in shape [len(X),]
"""
if X is not None and not self._is_equal_to_test_raw_data(X):
for x in X:
assert len(x) == 2, \
'there must be two parts for each correlation computation'
X1, X2 = zip(*X)
num_voxels1 = X1[0].shape[1]
num_voxels2 = X2[0].shape[1]
assert len(X1) == len(X2), \
'the list lengths do not match'
# make sure X1 always has more voxels
if num_voxels1 < num_voxels2:
X1, X2 = X2, X1
num_voxels1, num_voxels2 = num_voxels2, num_voxels1
assert self.num_features_ == num_voxels1 * num_voxels2, \
'the number of features does not match the model'
num_test_samples = len(X1)
self.test_raw_data_ = X
# generate the test_data first
# correlation computation
corr_data = self._prepare_corerelation_data(X1, X2)
# normalization
normalized_corr_data = \
self._normalize_correlation_data(corr_data,
num_test_samples)
# test data generation
self.test_data_ = self._prepare_test_data(normalized_corr_data)
confidence = self.clf.decision_function(self.test_data_)
return confidence | python | def decision_function(self, X=None):
"""Output the decision value of the prediction.
if X is not equal to self.test_raw_data\\_, i.e. predict is not called,
first generate the test_data
after getting the test_data, get the decision value via self.clf.
if X is None, test_data\\_ is ready to be used
Parameters
----------
X: Optional[list of tuple (data1, data2)]
data1 and data2 are numpy array in shape [num_TRs, num_voxels]
to be computed for correlation.
default None, meaning that the data to be predicted
have been processed in the fit method.
Otherwise, X contains the activity data filtered by ROIs
and prepared for correlation computation.
len(X) is the number of test samples.
if len(X) > 1: normalization is done on all test samples.
Within list, all data1s must have the same num_voxels value,
all data2s must have the same num_voxels value.
Returns
-------
confidence: the predictions confidence values of X, in shape [len(X),]
"""
if X is not None and not self._is_equal_to_test_raw_data(X):
for x in X:
assert len(x) == 2, \
'there must be two parts for each correlation computation'
X1, X2 = zip(*X)
num_voxels1 = X1[0].shape[1]
num_voxels2 = X2[0].shape[1]
assert len(X1) == len(X2), \
'the list lengths do not match'
# make sure X1 always has more voxels
if num_voxels1 < num_voxels2:
X1, X2 = X2, X1
num_voxels1, num_voxels2 = num_voxels2, num_voxels1
assert self.num_features_ == num_voxels1 * num_voxels2, \
'the number of features does not match the model'
num_test_samples = len(X1)
self.test_raw_data_ = X
# generate the test_data first
# correlation computation
corr_data = self._prepare_corerelation_data(X1, X2)
# normalization
normalized_corr_data = \
self._normalize_correlation_data(corr_data,
num_test_samples)
# test data generation
self.test_data_ = self._prepare_test_data(normalized_corr_data)
confidence = self.clf.decision_function(self.test_data_)
return confidence | [
"def",
"decision_function",
"(",
"self",
",",
"X",
"=",
"None",
")",
":",
"if",
"X",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"_is_equal_to_test_raw_data",
"(",
"X",
")",
":",
"for",
"x",
"in",
"X",
":",
"assert",
"len",
"(",
"x",
")",
"==",... | Output the decision value of the prediction.
if X is not equal to self.test_raw_data\\_, i.e. predict is not called,
first generate the test_data
after getting the test_data, get the decision value via self.clf.
if X is None, test_data\\_ is ready to be used
Parameters
----------
X: Optional[list of tuple (data1, data2)]
data1 and data2 are numpy array in shape [num_TRs, num_voxels]
to be computed for correlation.
default None, meaning that the data to be predicted
have been processed in the fit method.
Otherwise, X contains the activity data filtered by ROIs
and prepared for correlation computation.
len(X) is the number of test samples.
if len(X) > 1: normalization is done on all test samples.
Within list, all data1s must have the same num_voxels value,
all data2s must have the same num_voxels value.
Returns
-------
confidence: the predictions confidence values of X, in shape [len(X),] | [
"Output",
"the",
"decision",
"value",
"of",
"the",
"prediction",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/classifier.py#L597-L650 | train | 204,479 |
brainiak/brainiak | brainiak/isc.py | _check_isc_input | def _check_isc_input(iscs, pairwise=False):
"""Checks ISC inputs for statistical tests
Input ISCs should be n_subjects (leave-one-out approach) or
n_pairs (pairwise approach) by n_voxels or n_ROIs array or a 1D
array (or list) of ISC values for a single voxel or ROI. This
function is only intended to be used internally by other
functions in this module (e.g., bootstrap_isc, permutation_isc).
Parameters
----------
iscs : ndarray or list
ISC values
Returns
-------
iscs : ndarray
Array of ISC values
n_subjects : int
Number of subjects
n_voxels : int
Number of voxels (or ROIs)
"""
# Standardize structure of input data
if type(iscs) == list:
iscs = np.array(iscs)[:, np.newaxis]
elif isinstance(iscs, np.ndarray):
if iscs.ndim == 1:
iscs = iscs[:, np.newaxis]
# Check if incoming pairwise matrix is vectorized triangle
if pairwise:
try:
test_square = squareform(iscs[:, 0])
n_subjects = test_square.shape[0]
except ValueError:
raise ValueError("For pairwise input, ISCs must be the "
"vectorized triangle of a square matrix.")
elif not pairwise:
n_subjects = iscs.shape[0]
# Infer subjects, voxels and print for user to check
n_voxels = iscs.shape[1]
logger.info("Assuming {0} subjects with and {1} "
"voxel(s) or ROI(s) in bootstrap ISC test.".format(n_subjects,
n_voxels))
return iscs, n_subjects, n_voxels | python | def _check_isc_input(iscs, pairwise=False):
"""Checks ISC inputs for statistical tests
Input ISCs should be n_subjects (leave-one-out approach) or
n_pairs (pairwise approach) by n_voxels or n_ROIs array or a 1D
array (or list) of ISC values for a single voxel or ROI. This
function is only intended to be used internally by other
functions in this module (e.g., bootstrap_isc, permutation_isc).
Parameters
----------
iscs : ndarray or list
ISC values
Returns
-------
iscs : ndarray
Array of ISC values
n_subjects : int
Number of subjects
n_voxels : int
Number of voxels (or ROIs)
"""
# Standardize structure of input data
if type(iscs) == list:
iscs = np.array(iscs)[:, np.newaxis]
elif isinstance(iscs, np.ndarray):
if iscs.ndim == 1:
iscs = iscs[:, np.newaxis]
# Check if incoming pairwise matrix is vectorized triangle
if pairwise:
try:
test_square = squareform(iscs[:, 0])
n_subjects = test_square.shape[0]
except ValueError:
raise ValueError("For pairwise input, ISCs must be the "
"vectorized triangle of a square matrix.")
elif not pairwise:
n_subjects = iscs.shape[0]
# Infer subjects, voxels and print for user to check
n_voxels = iscs.shape[1]
logger.info("Assuming {0} subjects with and {1} "
"voxel(s) or ROI(s) in bootstrap ISC test.".format(n_subjects,
n_voxels))
return iscs, n_subjects, n_voxels | [
"def",
"_check_isc_input",
"(",
"iscs",
",",
"pairwise",
"=",
"False",
")",
":",
"# Standardize structure of input data",
"if",
"type",
"(",
"iscs",
")",
"==",
"list",
":",
"iscs",
"=",
"np",
".",
"array",
"(",
"iscs",
")",
"[",
":",
",",
"np",
".",
"n... | Checks ISC inputs for statistical tests
Input ISCs should be n_subjects (leave-one-out approach) or
n_pairs (pairwise approach) by n_voxels or n_ROIs array or a 1D
array (or list) of ISC values for a single voxel or ROI. This
function is only intended to be used internally by other
functions in this module (e.g., bootstrap_isc, permutation_isc).
Parameters
----------
iscs : ndarray or list
ISC values
Returns
-------
iscs : ndarray
Array of ISC values
n_subjects : int
Number of subjects
n_voxels : int
Number of voxels (or ROIs) | [
"Checks",
"ISC",
"inputs",
"for",
"statistical",
"tests"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/isc.py#L370-L422 | train | 204,480 |
brainiak/brainiak | brainiak/isc.py | _check_targets_input | def _check_targets_input(targets, data):
"""Checks ISFC targets input array
For ISFC analysis, targets input array should either be a list
of n_TRs by n_targets arrays (where each array corresponds to
a subject), or an n_TRs by n_targets by n_subjects ndarray. This
function also checks the shape of the targets array against the
input data array.
Parameters
----------
data : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data for which to compute ISFC
targets : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data to use as targets for ISFC
Returns
-------
targets : ndarray (n_TRs x n_voxels x n_subjects)
ISFC targets with standadized structure
n_TRs : int
Number of time points (TRs) for targets array
n_voxels : int
Number of voxels (or ROIs) for targets array
n_subjects : int
Number of subjects for targets array
symmetric : bool
Indicator for symmetric vs. asymmetric
"""
if isinstance(targets, np.ndarray) or isinstance(targets, list):
targets, n_TRs, n_voxels, n_subjects = (
_check_timeseries_input(targets))
if data.shape[0] != n_TRs:
raise ValueError("Targets array must have same number of "
"TRs as input data")
if data.shape[2] != n_subjects:
raise ValueError("Targets array must have same number of "
"subjects as input data")
symmetric = False
else:
targets = data
n_TRs, n_voxels, n_subjects = data.shape
symmetric = True
return targets, n_TRs, n_voxels, n_subjects, symmetric | python | def _check_targets_input(targets, data):
"""Checks ISFC targets input array
For ISFC analysis, targets input array should either be a list
of n_TRs by n_targets arrays (where each array corresponds to
a subject), or an n_TRs by n_targets by n_subjects ndarray. This
function also checks the shape of the targets array against the
input data array.
Parameters
----------
data : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data for which to compute ISFC
targets : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data to use as targets for ISFC
Returns
-------
targets : ndarray (n_TRs x n_voxels x n_subjects)
ISFC targets with standadized structure
n_TRs : int
Number of time points (TRs) for targets array
n_voxels : int
Number of voxels (or ROIs) for targets array
n_subjects : int
Number of subjects for targets array
symmetric : bool
Indicator for symmetric vs. asymmetric
"""
if isinstance(targets, np.ndarray) or isinstance(targets, list):
targets, n_TRs, n_voxels, n_subjects = (
_check_timeseries_input(targets))
if data.shape[0] != n_TRs:
raise ValueError("Targets array must have same number of "
"TRs as input data")
if data.shape[2] != n_subjects:
raise ValueError("Targets array must have same number of "
"subjects as input data")
symmetric = False
else:
targets = data
n_TRs, n_voxels, n_subjects = data.shape
symmetric = True
return targets, n_TRs, n_voxels, n_subjects, symmetric | [
"def",
"_check_targets_input",
"(",
"targets",
",",
"data",
")",
":",
"if",
"isinstance",
"(",
"targets",
",",
"np",
".",
"ndarray",
")",
"or",
"isinstance",
"(",
"targets",
",",
"list",
")",
":",
"targets",
",",
"n_TRs",
",",
"n_voxels",
",",
"n_subject... | Checks ISFC targets input array
For ISFC analysis, targets input array should either be a list
of n_TRs by n_targets arrays (where each array corresponds to
a subject), or an n_TRs by n_targets by n_subjects ndarray. This
function also checks the shape of the targets array against the
input data array.
Parameters
----------
data : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data for which to compute ISFC
targets : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data to use as targets for ISFC
Returns
-------
targets : ndarray (n_TRs x n_voxels x n_subjects)
ISFC targets with standadized structure
n_TRs : int
Number of time points (TRs) for targets array
n_voxels : int
Number of voxels (or ROIs) for targets array
n_subjects : int
Number of subjects for targets array
symmetric : bool
Indicator for symmetric vs. asymmetric | [
"Checks",
"ISFC",
"targets",
"input",
"array"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/isc.py#L425-L476 | train | 204,481 |
brainiak/brainiak | brainiak/isc.py | compute_summary_statistic | def compute_summary_statistic(iscs, summary_statistic='mean', axis=None):
"""Computes summary statistics for ISCs
Computes either the 'mean' or 'median' across a set of ISCs. In the
case of the mean, ISC values are first Fisher Z transformed (arctanh),
averaged, then inverse Fisher Z transformed (tanh).
The implementation is based on the work in [SilverDunlap1987]_.
.. [SilverDunlap1987] "Averaging corrlelation coefficients: should
Fisher's z transformation be used?", N. C. Silver, W. P. Dunlap, 1987,
Journal of Applied Psychology, 72, 146-148.
https://doi.org/10.1037/0021-9010.72.1.146
Parameters
----------
iscs : list or ndarray
ISC values
summary_statistic : str, default: 'mean'
Summary statistic, 'mean' or 'median'
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
Returns
-------
statistic : float or ndarray
Summary statistic of ISC values
"""
if summary_statistic not in ('mean', 'median'):
raise ValueError("Summary statistic must be 'mean' or 'median'")
# Compute summary statistic
if summary_statistic == 'mean':
statistic = np.tanh(np.nanmean(np.arctanh(iscs), axis=axis))
elif summary_statistic == 'median':
statistic = np.nanmedian(iscs, axis=axis)
return statistic | python | def compute_summary_statistic(iscs, summary_statistic='mean', axis=None):
"""Computes summary statistics for ISCs
Computes either the 'mean' or 'median' across a set of ISCs. In the
case of the mean, ISC values are first Fisher Z transformed (arctanh),
averaged, then inverse Fisher Z transformed (tanh).
The implementation is based on the work in [SilverDunlap1987]_.
.. [SilverDunlap1987] "Averaging corrlelation coefficients: should
Fisher's z transformation be used?", N. C. Silver, W. P. Dunlap, 1987,
Journal of Applied Psychology, 72, 146-148.
https://doi.org/10.1037/0021-9010.72.1.146
Parameters
----------
iscs : list or ndarray
ISC values
summary_statistic : str, default: 'mean'
Summary statistic, 'mean' or 'median'
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
Returns
-------
statistic : float or ndarray
Summary statistic of ISC values
"""
if summary_statistic not in ('mean', 'median'):
raise ValueError("Summary statistic must be 'mean' or 'median'")
# Compute summary statistic
if summary_statistic == 'mean':
statistic = np.tanh(np.nanmean(np.arctanh(iscs), axis=axis))
elif summary_statistic == 'median':
statistic = np.nanmedian(iscs, axis=axis)
return statistic | [
"def",
"compute_summary_statistic",
"(",
"iscs",
",",
"summary_statistic",
"=",
"'mean'",
",",
"axis",
"=",
"None",
")",
":",
"if",
"summary_statistic",
"not",
"in",
"(",
"'mean'",
",",
"'median'",
")",
":",
"raise",
"ValueError",
"(",
"\"Summary statistic must ... | Computes summary statistics for ISCs
Computes either the 'mean' or 'median' across a set of ISCs. In the
case of the mean, ISC values are first Fisher Z transformed (arctanh),
averaged, then inverse Fisher Z transformed (tanh).
The implementation is based on the work in [SilverDunlap1987]_.
.. [SilverDunlap1987] "Averaging corrlelation coefficients: should
Fisher's z transformation be used?", N. C. Silver, W. P. Dunlap, 1987,
Journal of Applied Psychology, 72, 146-148.
https://doi.org/10.1037/0021-9010.72.1.146
Parameters
----------
iscs : list or ndarray
ISC values
summary_statistic : str, default: 'mean'
Summary statistic, 'mean' or 'median'
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
Returns
-------
statistic : float or ndarray
Summary statistic of ISC values | [
"Computes",
"summary",
"statistics",
"for",
"ISCs"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/isc.py#L479-L522 | train | 204,482 |
brainiak/brainiak | brainiak/isc.py | _threshold_nans | def _threshold_nans(data, tolerate_nans):
"""Thresholds data based on proportion of subjects with NaNs
Takes in data and a threshold value (float between 0.0 and 1.0) determining
the permissible proportion of subjects with non-NaN values. For example, if
threshold=.8, any voxel where >= 80% of subjects have non-NaN values will
be left unchanged, while any voxel with < 80% non-NaN values will be
assigned all NaN values and included in the nan_mask output. Note that the
output data has not been masked and will be same shape as the input data,
but may have a different number of NaNs based on the threshold.
Parameters
----------
data : ndarray (n_TRs x n_voxels x n_subjects)
fMRI time series data
tolerate_nans : bool or float (0.0 <= threshold <= 1.0)
Proportion of subjects with non-NaN values required to keep voxel
Returns
-------
data : ndarray (n_TRs x n_voxels x n_subjects)
fMRI time series data with adjusted NaNs
nan_mask : ndarray (n_voxels,)
Boolean mask array of voxels with too many NaNs based on threshold
"""
nans = np.all(np.any(np.isnan(data), axis=0), axis=1)
# Check tolerate_nans input and use either mean/nanmean and exclude voxels
if tolerate_nans is True:
logger.info("ISC computation will tolerate all NaNs when averaging")
elif type(tolerate_nans) is float:
if not 0.0 <= tolerate_nans <= 1.0:
raise ValueError("If threshold to tolerate NaNs is a float, "
"it must be between 0.0 and 1.0; got {0}".format(
tolerate_nans))
nans += ~(np.sum(~np.any(np.isnan(data), axis=0), axis=1) >=
data.shape[-1] * tolerate_nans)
logger.info("ISC computation will tolerate voxels with at least "
"{0} non-NaN values: {1} voxels do not meet "
"threshold".format(tolerate_nans,
np.sum(nans)))
else:
logger.info("ISC computation will not tolerate NaNs when averaging")
mask = ~nans
data = data[:, mask, :]
return data, mask | python | def _threshold_nans(data, tolerate_nans):
"""Thresholds data based on proportion of subjects with NaNs
Takes in data and a threshold value (float between 0.0 and 1.0) determining
the permissible proportion of subjects with non-NaN values. For example, if
threshold=.8, any voxel where >= 80% of subjects have non-NaN values will
be left unchanged, while any voxel with < 80% non-NaN values will be
assigned all NaN values and included in the nan_mask output. Note that the
output data has not been masked and will be same shape as the input data,
but may have a different number of NaNs based on the threshold.
Parameters
----------
data : ndarray (n_TRs x n_voxels x n_subjects)
fMRI time series data
tolerate_nans : bool or float (0.0 <= threshold <= 1.0)
Proportion of subjects with non-NaN values required to keep voxel
Returns
-------
data : ndarray (n_TRs x n_voxels x n_subjects)
fMRI time series data with adjusted NaNs
nan_mask : ndarray (n_voxels,)
Boolean mask array of voxels with too many NaNs based on threshold
"""
nans = np.all(np.any(np.isnan(data), axis=0), axis=1)
# Check tolerate_nans input and use either mean/nanmean and exclude voxels
if tolerate_nans is True:
logger.info("ISC computation will tolerate all NaNs when averaging")
elif type(tolerate_nans) is float:
if not 0.0 <= tolerate_nans <= 1.0:
raise ValueError("If threshold to tolerate NaNs is a float, "
"it must be between 0.0 and 1.0; got {0}".format(
tolerate_nans))
nans += ~(np.sum(~np.any(np.isnan(data), axis=0), axis=1) >=
data.shape[-1] * tolerate_nans)
logger.info("ISC computation will tolerate voxels with at least "
"{0} non-NaN values: {1} voxels do not meet "
"threshold".format(tolerate_nans,
np.sum(nans)))
else:
logger.info("ISC computation will not tolerate NaNs when averaging")
mask = ~nans
data = data[:, mask, :]
return data, mask | [
"def",
"_threshold_nans",
"(",
"data",
",",
"tolerate_nans",
")",
":",
"nans",
"=",
"np",
".",
"all",
"(",
"np",
".",
"any",
"(",
"np",
".",
"isnan",
"(",
"data",
")",
",",
"axis",
"=",
"0",
")",
",",
"axis",
"=",
"1",
")",
"# Check tolerate_nans i... | Thresholds data based on proportion of subjects with NaNs
Takes in data and a threshold value (float between 0.0 and 1.0) determining
the permissible proportion of subjects with non-NaN values. For example, if
threshold=.8, any voxel where >= 80% of subjects have non-NaN values will
be left unchanged, while any voxel with < 80% non-NaN values will be
assigned all NaN values and included in the nan_mask output. Note that the
output data has not been masked and will be same shape as the input data,
but may have a different number of NaNs based on the threshold.
Parameters
----------
data : ndarray (n_TRs x n_voxels x n_subjects)
fMRI time series data
tolerate_nans : bool or float (0.0 <= threshold <= 1.0)
Proportion of subjects with non-NaN values required to keep voxel
Returns
-------
data : ndarray (n_TRs x n_voxels x n_subjects)
fMRI time series data with adjusted NaNs
nan_mask : ndarray (n_voxels,)
Boolean mask array of voxels with too many NaNs based on threshold | [
"Thresholds",
"data",
"based",
"on",
"proportion",
"of",
"subjects",
"with",
"NaNs"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/isc.py#L588-L642 | train | 204,483 |
brainiak/brainiak | brainiak/isc.py | bootstrap_isc | def bootstrap_isc(iscs, pairwise=False, summary_statistic='median',
n_bootstraps=1000, ci_percentile=95, random_state=None):
"""One-sample group-level bootstrap hypothesis test for ISCs
For ISCs from one more voxels or ROIs, resample subjects with replacement
to construct a bootstrap distribution. Input is a list or ndarray of
ISCs for a single voxel/ROI, or an ISCs-by-voxels ndarray. ISC values
should be either N ISC values for N subjects in the leave-one-out appraoch
(pairwise=False), N(N-1)/2 ISC values for N subjects in the pairwise
approach (pairwise=True). In the pairwise approach, ISC values should
correspond to the vectorized upper triangle of a square corrlation matrix
(see scipy.stats.distance.squareform). Shifts bootstrap distribution by
actual summary statistic (effectively to zero) for two-tailed null
hypothesis test (Hall & Wilson, 1991). Uses subject-wise (not pair-wise)
resampling in the pairwise approach. Returns the observed ISC, the
confidence interval, and a p-value for the bootstrap hypothesis test, as
well as the bootstrap distribution of summary statistics. According to
Chen et al., 2016, this is the preferred nonparametric approach for
controlling false positive rates (FPR) for one-sample tests in the pairwise
approach.
The implementation is based on the work in [Chen2016]_ and
[HallWilson1991]_.
.. [HallWilson1991] "Two guidelines for bootstrap hypothesis testing.",
P. Hall, S. R., Wilson, 1991, Biometrics, 757-762.
https://doi.org/10.2307/2532163
Parameters
----------
iscs : list or ndarray, ISCs by voxels array
ISC values for one or more voxels
pairwise : bool, default: False
Indicator of pairwise or leave-one-out, should match ISCs structure
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
n_bootstraps : int, default: 1000
Number of bootstrap samples (subject-level with replacement)
ci_percentile : int, default: 95
Percentile for computing confidence intervals
random_state = int or None, default: None
Initial random seed
Returns
-------
observed : float, median (or mean) ISC value
Summary statistic for actual ISCs
ci : tuple, bootstrap confidence intervals
Confidence intervals generated from bootstrap distribution
p : float, p-value
p-value based on bootstrap hypothesis test
distribution : ndarray, bootstraps by voxels (optional)
Bootstrap distribution if return_bootstrap=True
"""
# Standardize structure of input data
iscs, n_subjects, n_voxels = _check_isc_input(iscs, pairwise=pairwise)
# Check for valid summary statistic
if summary_statistic not in ('mean', 'median'):
raise ValueError("Summary statistic must be 'mean' or 'median'")
# Compute summary statistic for observed ISCs
observed = compute_summary_statistic(iscs,
summary_statistic=summary_statistic,
axis=0)
# Set up an empty list to build our bootstrap distribution
distribution = []
# Loop through n bootstrap iterations and populate distribution
for i in np.arange(n_bootstraps):
# Random seed to be deterministically re-randomized at each iteration
if isinstance(random_state, np.random.RandomState):
prng = random_state
else:
prng = np.random.RandomState(random_state)
# Randomly sample subject IDs with replacement
subject_sample = sorted(prng.choice(np.arange(n_subjects),
size=n_subjects))
# Squareform and shuffle rows/columns of pairwise ISC matrix to
# to retain correlation structure among ISCs, then get triangle
if pairwise:
# Loop through voxels
isc_sample = []
for voxel_iscs in iscs.T:
# Square the triangle and fill diagonal
voxel_iscs = squareform(voxel_iscs)
np.fill_diagonal(voxel_iscs, 1)
# Check that pairwise ISC matrix is square and symmetric
assert voxel_iscs.shape[0] == voxel_iscs.shape[1]
assert np.allclose(voxel_iscs, voxel_iscs.T)
# Shuffle square correlation matrix and get triangle
voxel_sample = voxel_iscs[subject_sample, :][:, subject_sample]
voxel_sample = squareform(voxel_sample, checks=False)
# Censor off-diagonal 1s for same-subject pairs
voxel_sample[voxel_sample == 1.] = np.NaN
isc_sample.append(voxel_sample)
isc_sample = np.column_stack(isc_sample)
# Get simple bootstrap sample if not pairwise
elif not pairwise:
isc_sample = iscs[subject_sample, :]
# Compute summary statistic for bootstrap ISCs per voxel
# (alternatively could construct distribution for all voxels
# then compute statistics, but larger memory footprint)
distribution.append(compute_summary_statistic(
isc_sample,
summary_statistic=summary_statistic,
axis=0))
# Update random state for next iteration
random_state = np.random.RandomState(prng.randint(0, MAX_RANDOM_SEED))
# Convert distribution to numpy array
distribution = np.array(distribution)
# Compute CIs of median from bootstrap distribution (default: 95%)
ci = (np.percentile(distribution, (100 - ci_percentile)/2, axis=0),
np.percentile(distribution, ci_percentile + (100 - ci_percentile)/2,
axis=0))
# Shift bootstrap distribution to 0 for hypothesis test
shifted = distribution - observed
# Get p-value for actual median from shifted distribution
p = p_from_null(observed, shifted,
side='two-sided', exact=False,
axis=0)
return observed, ci, p, distribution | python | def bootstrap_isc(iscs, pairwise=False, summary_statistic='median',
n_bootstraps=1000, ci_percentile=95, random_state=None):
"""One-sample group-level bootstrap hypothesis test for ISCs
For ISCs from one more voxels or ROIs, resample subjects with replacement
to construct a bootstrap distribution. Input is a list or ndarray of
ISCs for a single voxel/ROI, or an ISCs-by-voxels ndarray. ISC values
should be either N ISC values for N subjects in the leave-one-out appraoch
(pairwise=False), N(N-1)/2 ISC values for N subjects in the pairwise
approach (pairwise=True). In the pairwise approach, ISC values should
correspond to the vectorized upper triangle of a square corrlation matrix
(see scipy.stats.distance.squareform). Shifts bootstrap distribution by
actual summary statistic (effectively to zero) for two-tailed null
hypothesis test (Hall & Wilson, 1991). Uses subject-wise (not pair-wise)
resampling in the pairwise approach. Returns the observed ISC, the
confidence interval, and a p-value for the bootstrap hypothesis test, as
well as the bootstrap distribution of summary statistics. According to
Chen et al., 2016, this is the preferred nonparametric approach for
controlling false positive rates (FPR) for one-sample tests in the pairwise
approach.
The implementation is based on the work in [Chen2016]_ and
[HallWilson1991]_.
.. [HallWilson1991] "Two guidelines for bootstrap hypothesis testing.",
P. Hall, S. R., Wilson, 1991, Biometrics, 757-762.
https://doi.org/10.2307/2532163
Parameters
----------
iscs : list or ndarray, ISCs by voxels array
ISC values for one or more voxels
pairwise : bool, default: False
Indicator of pairwise or leave-one-out, should match ISCs structure
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
n_bootstraps : int, default: 1000
Number of bootstrap samples (subject-level with replacement)
ci_percentile : int, default: 95
Percentile for computing confidence intervals
random_state = int or None, default: None
Initial random seed
Returns
-------
observed : float, median (or mean) ISC value
Summary statistic for actual ISCs
ci : tuple, bootstrap confidence intervals
Confidence intervals generated from bootstrap distribution
p : float, p-value
p-value based on bootstrap hypothesis test
distribution : ndarray, bootstraps by voxels (optional)
Bootstrap distribution if return_bootstrap=True
"""
# Standardize structure of input data
iscs, n_subjects, n_voxels = _check_isc_input(iscs, pairwise=pairwise)
# Check for valid summary statistic
if summary_statistic not in ('mean', 'median'):
raise ValueError("Summary statistic must be 'mean' or 'median'")
# Compute summary statistic for observed ISCs
observed = compute_summary_statistic(iscs,
summary_statistic=summary_statistic,
axis=0)
# Set up an empty list to build our bootstrap distribution
distribution = []
# Loop through n bootstrap iterations and populate distribution
for i in np.arange(n_bootstraps):
# Random seed to be deterministically re-randomized at each iteration
if isinstance(random_state, np.random.RandomState):
prng = random_state
else:
prng = np.random.RandomState(random_state)
# Randomly sample subject IDs with replacement
subject_sample = sorted(prng.choice(np.arange(n_subjects),
size=n_subjects))
# Squareform and shuffle rows/columns of pairwise ISC matrix to
# to retain correlation structure among ISCs, then get triangle
if pairwise:
# Loop through voxels
isc_sample = []
for voxel_iscs in iscs.T:
# Square the triangle and fill diagonal
voxel_iscs = squareform(voxel_iscs)
np.fill_diagonal(voxel_iscs, 1)
# Check that pairwise ISC matrix is square and symmetric
assert voxel_iscs.shape[0] == voxel_iscs.shape[1]
assert np.allclose(voxel_iscs, voxel_iscs.T)
# Shuffle square correlation matrix and get triangle
voxel_sample = voxel_iscs[subject_sample, :][:, subject_sample]
voxel_sample = squareform(voxel_sample, checks=False)
# Censor off-diagonal 1s for same-subject pairs
voxel_sample[voxel_sample == 1.] = np.NaN
isc_sample.append(voxel_sample)
isc_sample = np.column_stack(isc_sample)
# Get simple bootstrap sample if not pairwise
elif not pairwise:
isc_sample = iscs[subject_sample, :]
# Compute summary statistic for bootstrap ISCs per voxel
# (alternatively could construct distribution for all voxels
# then compute statistics, but larger memory footprint)
distribution.append(compute_summary_statistic(
isc_sample,
summary_statistic=summary_statistic,
axis=0))
# Update random state for next iteration
random_state = np.random.RandomState(prng.randint(0, MAX_RANDOM_SEED))
# Convert distribution to numpy array
distribution = np.array(distribution)
# Compute CIs of median from bootstrap distribution (default: 95%)
ci = (np.percentile(distribution, (100 - ci_percentile)/2, axis=0),
np.percentile(distribution, ci_percentile + (100 - ci_percentile)/2,
axis=0))
# Shift bootstrap distribution to 0 for hypothesis test
shifted = distribution - observed
# Get p-value for actual median from shifted distribution
p = p_from_null(observed, shifted,
side='two-sided', exact=False,
axis=0)
return observed, ci, p, distribution | [
"def",
"bootstrap_isc",
"(",
"iscs",
",",
"pairwise",
"=",
"False",
",",
"summary_statistic",
"=",
"'median'",
",",
"n_bootstraps",
"=",
"1000",
",",
"ci_percentile",
"=",
"95",
",",
"random_state",
"=",
"None",
")",
":",
"# Standardize structure of input data",
... | One-sample group-level bootstrap hypothesis test for ISCs
For ISCs from one more voxels or ROIs, resample subjects with replacement
to construct a bootstrap distribution. Input is a list or ndarray of
ISCs for a single voxel/ROI, or an ISCs-by-voxels ndarray. ISC values
should be either N ISC values for N subjects in the leave-one-out appraoch
(pairwise=False), N(N-1)/2 ISC values for N subjects in the pairwise
approach (pairwise=True). In the pairwise approach, ISC values should
correspond to the vectorized upper triangle of a square corrlation matrix
(see scipy.stats.distance.squareform). Shifts bootstrap distribution by
actual summary statistic (effectively to zero) for two-tailed null
hypothesis test (Hall & Wilson, 1991). Uses subject-wise (not pair-wise)
resampling in the pairwise approach. Returns the observed ISC, the
confidence interval, and a p-value for the bootstrap hypothesis test, as
well as the bootstrap distribution of summary statistics. According to
Chen et al., 2016, this is the preferred nonparametric approach for
controlling false positive rates (FPR) for one-sample tests in the pairwise
approach.
The implementation is based on the work in [Chen2016]_ and
[HallWilson1991]_.
.. [HallWilson1991] "Two guidelines for bootstrap hypothesis testing.",
P. Hall, S. R., Wilson, 1991, Biometrics, 757-762.
https://doi.org/10.2307/2532163
Parameters
----------
iscs : list or ndarray, ISCs by voxels array
ISC values for one or more voxels
pairwise : bool, default: False
Indicator of pairwise or leave-one-out, should match ISCs structure
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
n_bootstraps : int, default: 1000
Number of bootstrap samples (subject-level with replacement)
ci_percentile : int, default: 95
Percentile for computing confidence intervals
random_state = int or None, default: None
Initial random seed
Returns
-------
observed : float, median (or mean) ISC value
Summary statistic for actual ISCs
ci : tuple, bootstrap confidence intervals
Confidence intervals generated from bootstrap distribution
p : float, p-value
p-value based on bootstrap hypothesis test
distribution : ndarray, bootstraps by voxels (optional)
Bootstrap distribution if return_bootstrap=True | [
"One",
"-",
"sample",
"group",
"-",
"level",
"bootstrap",
"hypothesis",
"test",
"for",
"ISCs"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/isc.py#L645-L796 | train | 204,484 |
brainiak/brainiak | brainiak/isc.py | _permute_one_sample_iscs | def _permute_one_sample_iscs(iscs, group_parameters, i, pairwise=False,
summary_statistic='median', group_matrix=None,
exact_permutations=None, prng=None):
"""Applies one-sample permutations to ISC data
Input ISCs should be n_subjects (leave-one-out approach) or
n_pairs (pairwise approach) by n_voxels or n_ROIs array.
This function is only intended to be used internally by the
permutation_isc function in this module.
Parameters
----------
iscs : ndarray or list
ISC values
group_parameters : dict
Dictionary of group parameters
i : int
Permutation iteration
pairwise : bool, default: False
Indicator of pairwise or leave-one-out, should match ISCs variable
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
exact_permutations : list
List of permutations
prng = None or np.random.RandomState, default: None
Initial random seed
Returns
-------
isc_sample : ndarray
Array of permuted ISC values
"""
# Randomized sign-flips
if exact_permutations:
sign_flipper = np.array(exact_permutations[i])
else:
sign_flipper = prng.choice([-1, 1],
size=group_parameters['n_subjects'],
replace=True)
# If pairwise, apply sign-flips by rows and columns
if pairwise:
matrix_flipped = (group_parameters['group_matrix'] * sign_flipper
* sign_flipper[
:, np.newaxis])
sign_flipper = squareform(matrix_flipped, checks=False)
# Apply flips along ISC axis (same across voxels)
isc_flipped = iscs * sign_flipper[:, np.newaxis]
# Get summary statistics on sign-flipped ISCs
isc_sample = compute_summary_statistic(
isc_flipped,
summary_statistic=summary_statistic,
axis=0)
return isc_sample | python | def _permute_one_sample_iscs(iscs, group_parameters, i, pairwise=False,
summary_statistic='median', group_matrix=None,
exact_permutations=None, prng=None):
"""Applies one-sample permutations to ISC data
Input ISCs should be n_subjects (leave-one-out approach) or
n_pairs (pairwise approach) by n_voxels or n_ROIs array.
This function is only intended to be used internally by the
permutation_isc function in this module.
Parameters
----------
iscs : ndarray or list
ISC values
group_parameters : dict
Dictionary of group parameters
i : int
Permutation iteration
pairwise : bool, default: False
Indicator of pairwise or leave-one-out, should match ISCs variable
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
exact_permutations : list
List of permutations
prng = None or np.random.RandomState, default: None
Initial random seed
Returns
-------
isc_sample : ndarray
Array of permuted ISC values
"""
# Randomized sign-flips
if exact_permutations:
sign_flipper = np.array(exact_permutations[i])
else:
sign_flipper = prng.choice([-1, 1],
size=group_parameters['n_subjects'],
replace=True)
# If pairwise, apply sign-flips by rows and columns
if pairwise:
matrix_flipped = (group_parameters['group_matrix'] * sign_flipper
* sign_flipper[
:, np.newaxis])
sign_flipper = squareform(matrix_flipped, checks=False)
# Apply flips along ISC axis (same across voxels)
isc_flipped = iscs * sign_flipper[:, np.newaxis]
# Get summary statistics on sign-flipped ISCs
isc_sample = compute_summary_statistic(
isc_flipped,
summary_statistic=summary_statistic,
axis=0)
return isc_sample | [
"def",
"_permute_one_sample_iscs",
"(",
"iscs",
",",
"group_parameters",
",",
"i",
",",
"pairwise",
"=",
"False",
",",
"summary_statistic",
"=",
"'median'",
",",
"group_matrix",
"=",
"None",
",",
"exact_permutations",
"=",
"None",
",",
"prng",
"=",
"None",
")"... | Applies one-sample permutations to ISC data
Input ISCs should be n_subjects (leave-one-out approach) or
n_pairs (pairwise approach) by n_voxels or n_ROIs array.
This function is only intended to be used internally by the
permutation_isc function in this module.
Parameters
----------
iscs : ndarray or list
ISC values
group_parameters : dict
Dictionary of group parameters
i : int
Permutation iteration
pairwise : bool, default: False
Indicator of pairwise or leave-one-out, should match ISCs variable
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
exact_permutations : list
List of permutations
prng = None or np.random.RandomState, default: None
Initial random seed
Returns
-------
isc_sample : ndarray
Array of permuted ISC values | [
"Applies",
"one",
"-",
"sample",
"permutations",
"to",
"ISC",
"data"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/isc.py#L893-L958 | train | 204,485 |
brainiak/brainiak | brainiak/isc.py | _permute_two_sample_iscs | def _permute_two_sample_iscs(iscs, group_parameters, i, pairwise=False,
summary_statistic='median',
exact_permutations=None, prng=None):
"""Applies two-sample permutations to ISC data
Input ISCs should be n_subjects (leave-one-out approach) or
n_pairs (pairwise approach) by n_voxels or n_ROIs array.
This function is only intended to be used internally by the
permutation_isc function in this module.
Parameters
----------
iscs : ndarray or list
ISC values
group_parameters : dict
Dictionary of group parameters
i : int
Permutation iteration
pairwise : bool, default: False
Indicator of pairwise or leave-one-out, should match ISCs variable
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
exact_permutations : list
List of permutations
prng = None or np.random.RandomState, default: None
Initial random seed
Indicator of pairwise or leave-one-out, should match ISCs variable
Returns
-------
isc_sample : ndarray
Array of permuted ISC values
"""
# Shuffle the group assignments
if exact_permutations:
group_shuffler = np.array(exact_permutations[i])
elif not exact_permutations and pairwise:
group_shuffler = prng.permutation(np.arange(
len(np.array(group_parameters['group_assignment'])[
group_parameters['sorter']])))
elif not exact_permutations and not pairwise:
group_shuffler = prng.permutation(np.arange(
len(group_parameters['group_assignment'])))
# If pairwise approach, convert group assignments to matrix
if pairwise:
# Apply shuffler to group matrix rows/columns
group_shuffled = group_parameters['group_matrix'][
group_shuffler, :][:, group_shuffler]
# Unsort shuffled matrix and squareform to create selector
group_selector = squareform(group_shuffled[
group_parameters['unsorter'], :]
[:, group_parameters['unsorter']],
checks=False)
# Shuffle group assignments in leave-one-out two sample test
elif not pairwise:
# Apply shuffler to group matrix rows/columns
group_selector = np.array(
group_parameters['group_assignment'])[group_shuffler]
# Get difference of within-group summary statistics
# with group permutation
isc_sample = (compute_summary_statistic(
iscs[group_selector == group_parameters[
'group_labels'][0], :],
summary_statistic=summary_statistic,
axis=0) -
compute_summary_statistic(
iscs[group_selector == group_parameters[
'group_labels'][1], :],
summary_statistic=summary_statistic,
axis=0))
return isc_sample | python | def _permute_two_sample_iscs(iscs, group_parameters, i, pairwise=False,
summary_statistic='median',
exact_permutations=None, prng=None):
"""Applies two-sample permutations to ISC data
Input ISCs should be n_subjects (leave-one-out approach) or
n_pairs (pairwise approach) by n_voxels or n_ROIs array.
This function is only intended to be used internally by the
permutation_isc function in this module.
Parameters
----------
iscs : ndarray or list
ISC values
group_parameters : dict
Dictionary of group parameters
i : int
Permutation iteration
pairwise : bool, default: False
Indicator of pairwise or leave-one-out, should match ISCs variable
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
exact_permutations : list
List of permutations
prng = None or np.random.RandomState, default: None
Initial random seed
Indicator of pairwise or leave-one-out, should match ISCs variable
Returns
-------
isc_sample : ndarray
Array of permuted ISC values
"""
# Shuffle the group assignments
if exact_permutations:
group_shuffler = np.array(exact_permutations[i])
elif not exact_permutations and pairwise:
group_shuffler = prng.permutation(np.arange(
len(np.array(group_parameters['group_assignment'])[
group_parameters['sorter']])))
elif not exact_permutations and not pairwise:
group_shuffler = prng.permutation(np.arange(
len(group_parameters['group_assignment'])))
# If pairwise approach, convert group assignments to matrix
if pairwise:
# Apply shuffler to group matrix rows/columns
group_shuffled = group_parameters['group_matrix'][
group_shuffler, :][:, group_shuffler]
# Unsort shuffled matrix and squareform to create selector
group_selector = squareform(group_shuffled[
group_parameters['unsorter'], :]
[:, group_parameters['unsorter']],
checks=False)
# Shuffle group assignments in leave-one-out two sample test
elif not pairwise:
# Apply shuffler to group matrix rows/columns
group_selector = np.array(
group_parameters['group_assignment'])[group_shuffler]
# Get difference of within-group summary statistics
# with group permutation
isc_sample = (compute_summary_statistic(
iscs[group_selector == group_parameters[
'group_labels'][0], :],
summary_statistic=summary_statistic,
axis=0) -
compute_summary_statistic(
iscs[group_selector == group_parameters[
'group_labels'][1], :],
summary_statistic=summary_statistic,
axis=0))
return isc_sample | [
"def",
"_permute_two_sample_iscs",
"(",
"iscs",
",",
"group_parameters",
",",
"i",
",",
"pairwise",
"=",
"False",
",",
"summary_statistic",
"=",
"'median'",
",",
"exact_permutations",
"=",
"None",
",",
"prng",
"=",
"None",
")",
":",
"# Shuffle the group assignment... | Applies two-sample permutations to ISC data
Input ISCs should be n_subjects (leave-one-out approach) or
n_pairs (pairwise approach) by n_voxels or n_ROIs array.
This function is only intended to be used internally by the
permutation_isc function in this module.
Parameters
----------
iscs : ndarray or list
ISC values
group_parameters : dict
Dictionary of group parameters
i : int
Permutation iteration
pairwise : bool, default: False
Indicator of pairwise or leave-one-out, should match ISCs variable
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
exact_permutations : list
List of permutations
prng = None or np.random.RandomState, default: None
Initial random seed
Indicator of pairwise or leave-one-out, should match ISCs variable
Returns
-------
isc_sample : ndarray
Array of permuted ISC values | [
"Applies",
"two",
"-",
"sample",
"permutations",
"to",
"ISC",
"data"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/isc.py#L961-L1047 | train | 204,486 |
brainiak/brainiak | brainiak/isc.py | timeshift_isc | def timeshift_isc(data, pairwise=False, summary_statistic='median',
n_shifts=1000, tolerate_nans=True, random_state=None):
"""Circular time-shift randomization for one-sample ISC test
For each voxel or ROI, compute the actual ISC and p-values
from a null distribution of ISCs where response time series
are first circularly shifted by random intervals. If pairwise,
apply time-shift randomization to each subjects and compute pairwise
ISCs. If leave-one-out approach is used (pairwise=False), apply
the random time-shift to only the left-out subject in each iteration
of the leave-one-out procedure. Input data should be a list where
each item is a time-points by voxels ndarray for a given subject.
Multiple input ndarrays must be the same shape. If a single ndarray is
supplied, the last dimension is assumed to correspond to subjects.
When using leave-one-out approach, NaNs are ignored when computing mean
time series of N-1 subjects (default: tolerate_nans=True). Alternatively,
you may supply a float between 0 and 1 indicating a threshold proportion
of N subjects with non-NaN values required when computing the average time
series for a given voxel. For example, if tolerate_nans=.8, ISCs will be
computed for any voxel where >= 80% of subjects have non-NaN values,
while voxels with < 80% non-NaN values will be assigned NaNs. If set to
False, NaNs are not tolerated and voxels with one or more NaNs among the
N-1 subjects will be assigned NaN. Setting tolerate_nans to True or False
will not affect the pairwise approach; however, if a threshold float is
provided, voxels that do not reach this threshold will be excluded. Note
that accommodating NaNs may be notably slower than setting tolerate_nans to
False. Returns the observed ISC and p-values (two-tailed test), as well as
the null distribution of ISCs computed on randomly time-shifted data.
The implementation is based on the work in [Kauppi2010]_ and
[Kauppi2014]_.
.. [Kauppi2010] "Inter-subject correlation of brain hemodynamic
responses during watching a movie: localization in space and
frequency.", J. P. Kauppi, I. P. Jääskeläinen, M. Sams, J. Tohka,
2010, Frontiers in Neuroinformatics, 4, 5.
https://doi.org/10.3389/fninf.2010.00005
Parameters
----------
data : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data for which to compute ISFC
pairwise : bool, default: False
Whether to use pairwise (True) or leave-one-out (False) approach
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
n_shifts : int, default: 1000
Number of randomly shifted samples
tolerate_nans : bool or float, default: True
Accommodate NaNs (when averaging in leave-one-out approach)
random_state = int, None, or np.random.RandomState, default: None
Initial random seed
Returns
-------
observed : float, observed ISC (without time-shifting)
Actual ISCs
p : float, p-value
p-value based on time-shifting randomization test
distribution : ndarray, time-shifts by voxels (optional)
Time-shifted null distribution if return_bootstrap=True
"""
# Check response time series input format
data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data)
# Get actual observed ISC
observed = isc(data, pairwise=pairwise,
summary_statistic=summary_statistic,
tolerate_nans=tolerate_nans)
# Roll axis to get subjects in first dimension for loop
if pairwise:
data = np.rollaxis(data, 2, 0)
# Iterate through randomized shifts to create null distribution
distribution = []
for i in np.arange(n_shifts):
# Random seed to be deterministically re-randomized at each iteration
if isinstance(random_state, np.random.RandomState):
prng = random_state
else:
prng = np.random.RandomState(random_state)
# Get a random set of shifts based on number of TRs,
shifts = prng.choice(np.arange(n_TRs), size=n_subjects,
replace=True)
# In pairwise approach, apply all shifts then compute pairwise ISCs
if pairwise:
# Apply circular shift to each subject's time series
shifted_data = []
for subject, shift in zip(data, shifts):
shifted_data.append(np.concatenate(
(subject[-shift:, :],
subject[:-shift, :])))
shifted_data = np.dstack(shifted_data)
# Compute null ISC on shifted data for pairwise approach
shifted_isc = isc(shifted_data, pairwise=pairwise,
summary_statistic=summary_statistic,
tolerate_nans=tolerate_nans)
# In leave-one-out, apply shift only to each left-out participant
elif not pairwise:
shifted_isc = []
for s, shift in enumerate(shifts):
shifted_subject = np.concatenate((data[-shift:, :, s],
data[:-shift, :, s]))
nonshifted_mean = np.mean(np.delete(data, s, 2), axis=2)
loo_isc = isc(np.dstack((shifted_subject, nonshifted_mean)),
pairwise=False,
summary_statistic=None,
tolerate_nans=tolerate_nans)
shifted_isc.append(loo_isc)
# Get summary statistics across left-out subjects
shifted_isc = compute_summary_statistic(
np.dstack(shifted_isc),
summary_statistic=summary_statistic,
axis=2)
distribution.append(shifted_isc)
# Update random state for next iteration
random_state = np.random.RandomState(prng.randint(0, MAX_RANDOM_SEED))
# Convert distribution to numpy array
distribution = np.vstack(distribution)
# Get p-value for actual median from shifted distribution
p = p_from_null(observed, distribution,
side='two-sided', exact=False,
axis=0)
return observed, p, distribution | python | def timeshift_isc(data, pairwise=False, summary_statistic='median',
n_shifts=1000, tolerate_nans=True, random_state=None):
"""Circular time-shift randomization for one-sample ISC test
For each voxel or ROI, compute the actual ISC and p-values
from a null distribution of ISCs where response time series
are first circularly shifted by random intervals. If pairwise,
apply time-shift randomization to each subjects and compute pairwise
ISCs. If leave-one-out approach is used (pairwise=False), apply
the random time-shift to only the left-out subject in each iteration
of the leave-one-out procedure. Input data should be a list where
each item is a time-points by voxels ndarray for a given subject.
Multiple input ndarrays must be the same shape. If a single ndarray is
supplied, the last dimension is assumed to correspond to subjects.
When using leave-one-out approach, NaNs are ignored when computing mean
time series of N-1 subjects (default: tolerate_nans=True). Alternatively,
you may supply a float between 0 and 1 indicating a threshold proportion
of N subjects with non-NaN values required when computing the average time
series for a given voxel. For example, if tolerate_nans=.8, ISCs will be
computed for any voxel where >= 80% of subjects have non-NaN values,
while voxels with < 80% non-NaN values will be assigned NaNs. If set to
False, NaNs are not tolerated and voxels with one or more NaNs among the
N-1 subjects will be assigned NaN. Setting tolerate_nans to True or False
will not affect the pairwise approach; however, if a threshold float is
provided, voxels that do not reach this threshold will be excluded. Note
that accommodating NaNs may be notably slower than setting tolerate_nans to
False. Returns the observed ISC and p-values (two-tailed test), as well as
the null distribution of ISCs computed on randomly time-shifted data.
The implementation is based on the work in [Kauppi2010]_ and
[Kauppi2014]_.
.. [Kauppi2010] "Inter-subject correlation of brain hemodynamic
responses during watching a movie: localization in space and
frequency.", J. P. Kauppi, I. P. Jääskeläinen, M. Sams, J. Tohka,
2010, Frontiers in Neuroinformatics, 4, 5.
https://doi.org/10.3389/fninf.2010.00005
Parameters
----------
data : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data for which to compute ISFC
pairwise : bool, default: False
Whether to use pairwise (True) or leave-one-out (False) approach
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
n_shifts : int, default: 1000
Number of randomly shifted samples
tolerate_nans : bool or float, default: True
Accommodate NaNs (when averaging in leave-one-out approach)
random_state = int, None, or np.random.RandomState, default: None
Initial random seed
Returns
-------
observed : float, observed ISC (without time-shifting)
Actual ISCs
p : float, p-value
p-value based on time-shifting randomization test
distribution : ndarray, time-shifts by voxels (optional)
Time-shifted null distribution if return_bootstrap=True
"""
# Check response time series input format
data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data)
# Get actual observed ISC
observed = isc(data, pairwise=pairwise,
summary_statistic=summary_statistic,
tolerate_nans=tolerate_nans)
# Roll axis to get subjects in first dimension for loop
if pairwise:
data = np.rollaxis(data, 2, 0)
# Iterate through randomized shifts to create null distribution
distribution = []
for i in np.arange(n_shifts):
# Random seed to be deterministically re-randomized at each iteration
if isinstance(random_state, np.random.RandomState):
prng = random_state
else:
prng = np.random.RandomState(random_state)
# Get a random set of shifts based on number of TRs,
shifts = prng.choice(np.arange(n_TRs), size=n_subjects,
replace=True)
# In pairwise approach, apply all shifts then compute pairwise ISCs
if pairwise:
# Apply circular shift to each subject's time series
shifted_data = []
for subject, shift in zip(data, shifts):
shifted_data.append(np.concatenate(
(subject[-shift:, :],
subject[:-shift, :])))
shifted_data = np.dstack(shifted_data)
# Compute null ISC on shifted data for pairwise approach
shifted_isc = isc(shifted_data, pairwise=pairwise,
summary_statistic=summary_statistic,
tolerate_nans=tolerate_nans)
# In leave-one-out, apply shift only to each left-out participant
elif not pairwise:
shifted_isc = []
for s, shift in enumerate(shifts):
shifted_subject = np.concatenate((data[-shift:, :, s],
data[:-shift, :, s]))
nonshifted_mean = np.mean(np.delete(data, s, 2), axis=2)
loo_isc = isc(np.dstack((shifted_subject, nonshifted_mean)),
pairwise=False,
summary_statistic=None,
tolerate_nans=tolerate_nans)
shifted_isc.append(loo_isc)
# Get summary statistics across left-out subjects
shifted_isc = compute_summary_statistic(
np.dstack(shifted_isc),
summary_statistic=summary_statistic,
axis=2)
distribution.append(shifted_isc)
# Update random state for next iteration
random_state = np.random.RandomState(prng.randint(0, MAX_RANDOM_SEED))
# Convert distribution to numpy array
distribution = np.vstack(distribution)
# Get p-value for actual median from shifted distribution
p = p_from_null(observed, distribution,
side='two-sided', exact=False,
axis=0)
return observed, p, distribution | [
"def",
"timeshift_isc",
"(",
"data",
",",
"pairwise",
"=",
"False",
",",
"summary_statistic",
"=",
"'median'",
",",
"n_shifts",
"=",
"1000",
",",
"tolerate_nans",
"=",
"True",
",",
"random_state",
"=",
"None",
")",
":",
"# Check response time series input format",... | Circular time-shift randomization for one-sample ISC test
For each voxel or ROI, compute the actual ISC and p-values
from a null distribution of ISCs where response time series
are first circularly shifted by random intervals. If pairwise,
apply time-shift randomization to each subjects and compute pairwise
ISCs. If leave-one-out approach is used (pairwise=False), apply
the random time-shift to only the left-out subject in each iteration
of the leave-one-out procedure. Input data should be a list where
each item is a time-points by voxels ndarray for a given subject.
Multiple input ndarrays must be the same shape. If a single ndarray is
supplied, the last dimension is assumed to correspond to subjects.
When using leave-one-out approach, NaNs are ignored when computing mean
time series of N-1 subjects (default: tolerate_nans=True). Alternatively,
you may supply a float between 0 and 1 indicating a threshold proportion
of N subjects with non-NaN values required when computing the average time
series for a given voxel. For example, if tolerate_nans=.8, ISCs will be
computed for any voxel where >= 80% of subjects have non-NaN values,
while voxels with < 80% non-NaN values will be assigned NaNs. If set to
False, NaNs are not tolerated and voxels with one or more NaNs among the
N-1 subjects will be assigned NaN. Setting tolerate_nans to True or False
will not affect the pairwise approach; however, if a threshold float is
provided, voxels that do not reach this threshold will be excluded. Note
that accommodating NaNs may be notably slower than setting tolerate_nans to
False. Returns the observed ISC and p-values (two-tailed test), as well as
the null distribution of ISCs computed on randomly time-shifted data.
The implementation is based on the work in [Kauppi2010]_ and
[Kauppi2014]_.
.. [Kauppi2010] "Inter-subject correlation of brain hemodynamic
responses during watching a movie: localization in space and
frequency.", J. P. Kauppi, I. P. Jääskeläinen, M. Sams, J. Tohka,
2010, Frontiers in Neuroinformatics, 4, 5.
https://doi.org/10.3389/fninf.2010.00005
Parameters
----------
data : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data for which to compute ISFC
pairwise : bool, default: False
Whether to use pairwise (True) or leave-one-out (False) approach
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
n_shifts : int, default: 1000
Number of randomly shifted samples
tolerate_nans : bool or float, default: True
Accommodate NaNs (when averaging in leave-one-out approach)
random_state = int, None, or np.random.RandomState, default: None
Initial random seed
Returns
-------
observed : float, observed ISC (without time-shifting)
Actual ISCs
p : float, p-value
p-value based on time-shifting randomization test
distribution : ndarray, time-shifts by voxels (optional)
Time-shifted null distribution if return_bootstrap=True | [
"Circular",
"time",
"-",
"shift",
"randomization",
"for",
"one",
"-",
"sample",
"ISC",
"test"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/isc.py#L1237-L1383 | train | 204,487 |
brainiak/brainiak | brainiak/isc.py | phaseshift_isc | def phaseshift_isc(data, pairwise=False, summary_statistic='median',
n_shifts=1000, tolerate_nans=True, random_state=None):
"""Phase randomization for one-sample ISC test
For each voxel or ROI, compute the actual ISC and p-values
from a null distribution of ISCs where response time series
are phase randomized prior to computing ISC. If pairwise,
apply phase randomization to each subject and compute pairwise
ISCs. If leave-one-out approach is used (pairwise=False), only
apply phase randomization to the left-out subject in each iteration
of the leave-one-out procedure. Input data should be a list where
each item is a time-points by voxels ndarray for a given subject.
Multiple input ndarrays must be the same shape. If a single ndarray is
supplied, the last dimension is assumed to correspond to subjects.
When using leave-one-out approach, NaNs are ignored when computing mean
time series of N-1 subjects (default: tolerate_nans=True). Alternatively,
you may supply a float between 0 and 1 indicating a threshold proportion
of N subjects with non-NaN values required when computing the average time
series for a given voxel. For example, if tolerate_nans=.8, ISCs will be
computed for any voxel where >= 80% of subjects have non-NaN values,
while voxels with < 80% non-NaN values will be assigned NaNs. If set to
False, NaNs are not tolerated and voxels with one or more NaNs among the
N-1 subjects will be assigned NaN. Setting tolerate_nans to True or False
will not affect the pairwise approach; however, if a threshold float is
provided, voxels that do not reach this threshold will be excluded. Note
that accommodating NaNs may be notably slower than setting tolerate_nans to
False. Returns the observed ISC and p-values (two-tailed test), as well as
the null distribution of ISCs computed on phase-randomized data.
The implementation is based on the work in [Lerner2011]_ and
[Simony2016]_.
.. [Lerner2011] "Topographic mapping of a hierarchy of temporal
receptive windows using a narrated story.", Y. Lerner, C. J. Honey,
L. J. Silbert, U. Hasson, 2011, Journal of Neuroscience, 31, 2906-2915.
https://doi.org/10.1523/jneurosci.3684-10.2011
Parameters
----------
data : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data for which to compute ISFC
pairwise : bool, default: False
Whether to use pairwise (True) or leave-one-out (False) approach
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
n_shifts : int, default: 1000
Number of randomly shifted samples
tolerate_nans : bool or float, default: True
Accommodate NaNs (when averaging in leave-one-out approach)
random_state = int, None, or np.random.RandomState, default: None
Initial random seed
Returns
-------
observed : float, observed ISC (without time-shifting)
Actual ISCs
p : float, p-value
p-value based on time-shifting randomization test
distribution : ndarray, time-shifts by voxels (optional)
Time-shifted null distribution if return_bootstrap=True
"""
# Check response time series input format
data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data)
# Get actual observed ISC
observed = isc(data, pairwise=pairwise,
summary_statistic=summary_statistic,
tolerate_nans=tolerate_nans)
# Iterate through randomized shifts to create null distribution
distribution = []
for i in np.arange(n_shifts):
# Random seed to be deterministically re-randomized at each iteration
if isinstance(random_state, np.random.RandomState):
prng = random_state
else:
prng = np.random.RandomState(random_state)
# Get shifted version of data
shifted_data = phase_randomize(data, random_state=prng)
# In pairwise approach, apply all shifts then compute pairwise ISCs
if pairwise:
# Compute null ISC on shifted data for pairwise approach
shifted_isc = isc(shifted_data, pairwise=True,
summary_statistic=summary_statistic,
tolerate_nans=tolerate_nans)
# In leave-one-out, apply shift only to each left-out participant
elif not pairwise:
# Roll subject axis of phase-randomized data
shifted_data = np.rollaxis(shifted_data, 2, 0)
shifted_isc = []
for s, shifted_subject in enumerate(shifted_data):
# ISC of shifted left-out subject vs mean of N-1 subjects
nonshifted_mean = np.mean(np.delete(data, s, axis=2),
axis=2)
loo_isc = isc(np.dstack((shifted_subject, nonshifted_mean)),
pairwise=False, summary_statistic=None,
tolerate_nans=tolerate_nans)
shifted_isc.append(loo_isc)
# Get summary statistics across left-out subjects
shifted_isc = compute_summary_statistic(
np.dstack(shifted_isc),
summary_statistic=summary_statistic, axis=2)
distribution.append(shifted_isc)
# Update random state for next iteration
random_state = np.random.RandomState(prng.randint(0, MAX_RANDOM_SEED))
# Convert distribution to numpy array
distribution = np.vstack(distribution)
# Get p-value for actual median from shifted distribution
p = p_from_null(observed, distribution,
side='two-sided', exact=False,
axis=0)
return observed, p, distribution | python | def phaseshift_isc(data, pairwise=False, summary_statistic='median',
n_shifts=1000, tolerate_nans=True, random_state=None):
"""Phase randomization for one-sample ISC test
For each voxel or ROI, compute the actual ISC and p-values
from a null distribution of ISCs where response time series
are phase randomized prior to computing ISC. If pairwise,
apply phase randomization to each subject and compute pairwise
ISCs. If leave-one-out approach is used (pairwise=False), only
apply phase randomization to the left-out subject in each iteration
of the leave-one-out procedure. Input data should be a list where
each item is a time-points by voxels ndarray for a given subject.
Multiple input ndarrays must be the same shape. If a single ndarray is
supplied, the last dimension is assumed to correspond to subjects.
When using leave-one-out approach, NaNs are ignored when computing mean
time series of N-1 subjects (default: tolerate_nans=True). Alternatively,
you may supply a float between 0 and 1 indicating a threshold proportion
of N subjects with non-NaN values required when computing the average time
series for a given voxel. For example, if tolerate_nans=.8, ISCs will be
computed for any voxel where >= 80% of subjects have non-NaN values,
while voxels with < 80% non-NaN values will be assigned NaNs. If set to
False, NaNs are not tolerated and voxels with one or more NaNs among the
N-1 subjects will be assigned NaN. Setting tolerate_nans to True or False
will not affect the pairwise approach; however, if a threshold float is
provided, voxels that do not reach this threshold will be excluded. Note
that accommodating NaNs may be notably slower than setting tolerate_nans to
False. Returns the observed ISC and p-values (two-tailed test), as well as
the null distribution of ISCs computed on phase-randomized data.
The implementation is based on the work in [Lerner2011]_ and
[Simony2016]_.
.. [Lerner2011] "Topographic mapping of a hierarchy of temporal
receptive windows using a narrated story.", Y. Lerner, C. J. Honey,
L. J. Silbert, U. Hasson, 2011, Journal of Neuroscience, 31, 2906-2915.
https://doi.org/10.1523/jneurosci.3684-10.2011
Parameters
----------
data : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data for which to compute ISFC
pairwise : bool, default: False
Whether to use pairwise (True) or leave-one-out (False) approach
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
n_shifts : int, default: 1000
Number of randomly shifted samples
tolerate_nans : bool or float, default: True
Accommodate NaNs (when averaging in leave-one-out approach)
random_state = int, None, or np.random.RandomState, default: None
Initial random seed
Returns
-------
observed : float, observed ISC (without time-shifting)
Actual ISCs
p : float, p-value
p-value based on time-shifting randomization test
distribution : ndarray, time-shifts by voxels (optional)
Time-shifted null distribution if return_bootstrap=True
"""
# Check response time series input format
data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data)
# Get actual observed ISC
observed = isc(data, pairwise=pairwise,
summary_statistic=summary_statistic,
tolerate_nans=tolerate_nans)
# Iterate through randomized shifts to create null distribution
distribution = []
for i in np.arange(n_shifts):
# Random seed to be deterministically re-randomized at each iteration
if isinstance(random_state, np.random.RandomState):
prng = random_state
else:
prng = np.random.RandomState(random_state)
# Get shifted version of data
shifted_data = phase_randomize(data, random_state=prng)
# In pairwise approach, apply all shifts then compute pairwise ISCs
if pairwise:
# Compute null ISC on shifted data for pairwise approach
shifted_isc = isc(shifted_data, pairwise=True,
summary_statistic=summary_statistic,
tolerate_nans=tolerate_nans)
# In leave-one-out, apply shift only to each left-out participant
elif not pairwise:
# Roll subject axis of phase-randomized data
shifted_data = np.rollaxis(shifted_data, 2, 0)
shifted_isc = []
for s, shifted_subject in enumerate(shifted_data):
# ISC of shifted left-out subject vs mean of N-1 subjects
nonshifted_mean = np.mean(np.delete(data, s, axis=2),
axis=2)
loo_isc = isc(np.dstack((shifted_subject, nonshifted_mean)),
pairwise=False, summary_statistic=None,
tolerate_nans=tolerate_nans)
shifted_isc.append(loo_isc)
# Get summary statistics across left-out subjects
shifted_isc = compute_summary_statistic(
np.dstack(shifted_isc),
summary_statistic=summary_statistic, axis=2)
distribution.append(shifted_isc)
# Update random state for next iteration
random_state = np.random.RandomState(prng.randint(0, MAX_RANDOM_SEED))
# Convert distribution to numpy array
distribution = np.vstack(distribution)
# Get p-value for actual median from shifted distribution
p = p_from_null(observed, distribution,
side='two-sided', exact=False,
axis=0)
return observed, p, distribution | [
"def",
"phaseshift_isc",
"(",
"data",
",",
"pairwise",
"=",
"False",
",",
"summary_statistic",
"=",
"'median'",
",",
"n_shifts",
"=",
"1000",
",",
"tolerate_nans",
"=",
"True",
",",
"random_state",
"=",
"None",
")",
":",
"# Check response time series input format"... | Phase randomization for one-sample ISC test
For each voxel or ROI, compute the actual ISC and p-values
from a null distribution of ISCs where response time series
are phase randomized prior to computing ISC. If pairwise,
apply phase randomization to each subject and compute pairwise
ISCs. If leave-one-out approach is used (pairwise=False), only
apply phase randomization to the left-out subject in each iteration
of the leave-one-out procedure. Input data should be a list where
each item is a time-points by voxels ndarray for a given subject.
Multiple input ndarrays must be the same shape. If a single ndarray is
supplied, the last dimension is assumed to correspond to subjects.
When using leave-one-out approach, NaNs are ignored when computing mean
time series of N-1 subjects (default: tolerate_nans=True). Alternatively,
you may supply a float between 0 and 1 indicating a threshold proportion
of N subjects with non-NaN values required when computing the average time
series for a given voxel. For example, if tolerate_nans=.8, ISCs will be
computed for any voxel where >= 80% of subjects have non-NaN values,
while voxels with < 80% non-NaN values will be assigned NaNs. If set to
False, NaNs are not tolerated and voxels with one or more NaNs among the
N-1 subjects will be assigned NaN. Setting tolerate_nans to True or False
will not affect the pairwise approach; however, if a threshold float is
provided, voxels that do not reach this threshold will be excluded. Note
that accommodating NaNs may be notably slower than setting tolerate_nans to
False. Returns the observed ISC and p-values (two-tailed test), as well as
the null distribution of ISCs computed on phase-randomized data.
The implementation is based on the work in [Lerner2011]_ and
[Simony2016]_.
.. [Lerner2011] "Topographic mapping of a hierarchy of temporal
receptive windows using a narrated story.", Y. Lerner, C. J. Honey,
L. J. Silbert, U. Hasson, 2011, Journal of Neuroscience, 31, 2906-2915.
https://doi.org/10.1523/jneurosci.3684-10.2011
Parameters
----------
data : list or ndarray (n_TRs x n_voxels x n_subjects)
fMRI data for which to compute ISFC
pairwise : bool, default: False
Whether to use pairwise (True) or leave-one-out (False) approach
summary_statistic : str, default: 'median'
Summary statistic, either 'median' (default) or 'mean'
n_shifts : int, default: 1000
Number of randomly shifted samples
tolerate_nans : bool or float, default: True
Accommodate NaNs (when averaging in leave-one-out approach)
random_state = int, None, or np.random.RandomState, default: None
Initial random seed
Returns
-------
observed : float, observed ISC (without time-shifting)
Actual ISCs
p : float, p-value
p-value based on time-shifting randomization test
distribution : ndarray, time-shifts by voxels (optional)
Time-shifted null distribution if return_bootstrap=True | [
"Phase",
"randomization",
"for",
"one",
"-",
"sample",
"ISC",
"test"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/isc.py#L1386-L1519 | train | 204,488 |
brainiak/brainiak | brainiak/factoranalysis/tfa.py | TFA.init_prior | def init_prior(self, R):
"""initialize prior for the subject
Returns
-------
TFA
Returns the instance itself.
"""
centers, widths = self.init_centers_widths(R)
# update prior
prior = np.zeros(self.K * (self.n_dim + 1))
self.set_centers(prior, centers)
self.set_widths(prior, widths)
self.set_prior(prior)
return self | python | def init_prior(self, R):
"""initialize prior for the subject
Returns
-------
TFA
Returns the instance itself.
"""
centers, widths = self.init_centers_widths(R)
# update prior
prior = np.zeros(self.K * (self.n_dim + 1))
self.set_centers(prior, centers)
self.set_widths(prior, widths)
self.set_prior(prior)
return self | [
"def",
"init_prior",
"(",
"self",
",",
"R",
")",
":",
"centers",
",",
"widths",
"=",
"self",
".",
"init_centers_widths",
"(",
"R",
")",
"# update prior",
"prior",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"K",
"*",
"(",
"self",
".",
"n_dim",
"+",
... | initialize prior for the subject
Returns
-------
TFA
Returns the instance itself. | [
"initialize",
"prior",
"for",
"the",
"subject"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L225-L240 | train | 204,489 |
brainiak/brainiak | brainiak/factoranalysis/tfa.py | TFA._assign_posterior | def _assign_posterior(self):
"""assign posterior to prior based on Hungarian algorithm
Returns
-------
TFA
Returns the instance itself.
"""
prior_centers = self.get_centers(self.local_prior)
posterior_centers = self.get_centers(self.local_posterior_)
posterior_widths = self.get_widths(self.local_posterior_)
# linear assignment on centers
cost = distance.cdist(prior_centers, posterior_centers, 'euclidean')
_, col_ind = linear_sum_assignment(cost)
# reorder centers/widths based on cost assignment
self.set_centers(self.local_posterior_, posterior_centers[col_ind])
self.set_widths(self.local_posterior_, posterior_widths[col_ind])
return self | python | def _assign_posterior(self):
"""assign posterior to prior based on Hungarian algorithm
Returns
-------
TFA
Returns the instance itself.
"""
prior_centers = self.get_centers(self.local_prior)
posterior_centers = self.get_centers(self.local_posterior_)
posterior_widths = self.get_widths(self.local_posterior_)
# linear assignment on centers
cost = distance.cdist(prior_centers, posterior_centers, 'euclidean')
_, col_ind = linear_sum_assignment(cost)
# reorder centers/widths based on cost assignment
self.set_centers(self.local_posterior_, posterior_centers[col_ind])
self.set_widths(self.local_posterior_, posterior_widths[col_ind])
return self | [
"def",
"_assign_posterior",
"(",
"self",
")",
":",
"prior_centers",
"=",
"self",
".",
"get_centers",
"(",
"self",
".",
"local_prior",
")",
"posterior_centers",
"=",
"self",
".",
"get_centers",
"(",
"self",
".",
"local_posterior_",
")",
"posterior_widths",
"=",
... | assign posterior to prior based on Hungarian algorithm
Returns
-------
TFA
Returns the instance itself. | [
"assign",
"posterior",
"to",
"prior",
"based",
"on",
"Hungarian",
"algorithm"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L242-L260 | train | 204,490 |
brainiak/brainiak | brainiak/factoranalysis/tfa.py | TFA._mse_converged | def _mse_converged(self):
"""Check convergence based on mean squared error
Returns
-------
converged : boolean
Whether the parameter estimation converged.
mse : float
Mean squared error between prior and posterior.
"""
mse = mean_squared_error(self.local_prior, self.local_posterior_,
multioutput='uniform_average')
if mse > self.threshold:
return False, mse
else:
return True, mse | python | def _mse_converged(self):
"""Check convergence based on mean squared error
Returns
-------
converged : boolean
Whether the parameter estimation converged.
mse : float
Mean squared error between prior and posterior.
"""
mse = mean_squared_error(self.local_prior, self.local_posterior_,
multioutput='uniform_average')
if mse > self.threshold:
return False, mse
else:
return True, mse | [
"def",
"_mse_converged",
"(",
"self",
")",
":",
"mse",
"=",
"mean_squared_error",
"(",
"self",
".",
"local_prior",
",",
"self",
".",
"local_posterior_",
",",
"multioutput",
"=",
"'uniform_average'",
")",
"if",
"mse",
">",
"self",
".",
"threshold",
":",
"retu... | Check convergence based on mean squared error
Returns
-------
converged : boolean
Whether the parameter estimation converged.
mse : float
Mean squared error between prior and posterior. | [
"Check",
"convergence",
"based",
"on",
"mean",
"squared",
"error"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L288-L307 | train | 204,491 |
brainiak/brainiak | brainiak/factoranalysis/tfa.py | TFA.init_centers_widths | def init_centers_widths(self, R):
"""Initialize prior of centers and widths
Returns
-------
centers : 2D array, with shape [K, n_dim]
Prior of factors' centers.
widths : 1D array, with shape [K, 1]
Prior of factors' widths.
"""
kmeans = KMeans(
init='k-means++',
n_clusters=self.K,
n_init=10,
random_state=100)
kmeans.fit(R)
centers = kmeans.cluster_centers_
widths = self._get_max_sigma(R) * np.ones((self.K, 1))
return centers, widths | python | def init_centers_widths(self, R):
"""Initialize prior of centers and widths
Returns
-------
centers : 2D array, with shape [K, n_dim]
Prior of factors' centers.
widths : 1D array, with shape [K, 1]
Prior of factors' widths.
"""
kmeans = KMeans(
init='k-means++',
n_clusters=self.K,
n_init=10,
random_state=100)
kmeans.fit(R)
centers = kmeans.cluster_centers_
widths = self._get_max_sigma(R) * np.ones((self.K, 1))
return centers, widths | [
"def",
"init_centers_widths",
"(",
"self",
",",
"R",
")",
":",
"kmeans",
"=",
"KMeans",
"(",
"init",
"=",
"'k-means++'",
",",
"n_clusters",
"=",
"self",
".",
"K",
",",
"n_init",
"=",
"10",
",",
"random_state",
"=",
"100",
")",
"kmeans",
".",
"fit",
"... | Initialize prior of centers and widths
Returns
-------
centers : 2D array, with shape [K, n_dim]
Prior of factors' centers.
widths : 1D array, with shape [K, 1]
Prior of factors' widths. | [
"Initialize",
"prior",
"of",
"centers",
"and",
"widths"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L328-L350 | train | 204,492 |
brainiak/brainiak | brainiak/factoranalysis/tfa.py | TFA.get_template | def get_template(self, R):
"""Compute a template on latent factors
Parameters
----------
R : 2D array, in format [n_voxel, n_dim]
The scanner coordinate matrix of one subject's fMRI data
Returns
-------
template_prior : 1D array
The template prior.
template_centers_cov: 2D array, in shape [n_dim, n_dim]
The template on centers' covariance.
template_widths_var: float
The template on widths' variance
"""
centers, widths = self.init_centers_widths(R)
template_prior =\
np.zeros(self.K * (self.n_dim + 2 + self.cov_vec_size))
# template centers cov and widths var are const
template_centers_cov = np.cov(R.T) * math.pow(self.K, -2 / 3.0)
template_widths_var = self._get_max_sigma(R)
centers_cov_all = np.tile(from_sym_2_tri(template_centers_cov), self.K)
widths_var_all = np.tile(template_widths_var, self.K)
# initial mean of centers' mean
self.set_centers(template_prior, centers)
self.set_widths(template_prior, widths)
self.set_centers_mean_cov(template_prior, centers_cov_all)
self.set_widths_mean_var(template_prior, widths_var_all)
return template_prior, template_centers_cov, template_widths_var | python | def get_template(self, R):
"""Compute a template on latent factors
Parameters
----------
R : 2D array, in format [n_voxel, n_dim]
The scanner coordinate matrix of one subject's fMRI data
Returns
-------
template_prior : 1D array
The template prior.
template_centers_cov: 2D array, in shape [n_dim, n_dim]
The template on centers' covariance.
template_widths_var: float
The template on widths' variance
"""
centers, widths = self.init_centers_widths(R)
template_prior =\
np.zeros(self.K * (self.n_dim + 2 + self.cov_vec_size))
# template centers cov and widths var are const
template_centers_cov = np.cov(R.T) * math.pow(self.K, -2 / 3.0)
template_widths_var = self._get_max_sigma(R)
centers_cov_all = np.tile(from_sym_2_tri(template_centers_cov), self.K)
widths_var_all = np.tile(template_widths_var, self.K)
# initial mean of centers' mean
self.set_centers(template_prior, centers)
self.set_widths(template_prior, widths)
self.set_centers_mean_cov(template_prior, centers_cov_all)
self.set_widths_mean_var(template_prior, widths_var_all)
return template_prior, template_centers_cov, template_widths_var | [
"def",
"get_template",
"(",
"self",
",",
"R",
")",
":",
"centers",
",",
"widths",
"=",
"self",
".",
"init_centers_widths",
"(",
"R",
")",
"template_prior",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"K",
"*",
"(",
"self",
".",
"n_dim",
"+",
"2",
"+... | Compute a template on latent factors
Parameters
----------
R : 2D array, in format [n_voxel, n_dim]
The scanner coordinate matrix of one subject's fMRI data
Returns
-------
template_prior : 1D array
The template prior.
template_centers_cov: 2D array, in shape [n_dim, n_dim]
The template on centers' covariance.
template_widths_var: float
The template on widths' variance | [
"Compute",
"a",
"template",
"on",
"latent",
"factors"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L352-L385 | train | 204,493 |
brainiak/brainiak | brainiak/factoranalysis/tfa.py | TFA.set_widths | def set_widths(self, estimation, widths):
"""Set estimation on widths
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
widths : 2D array, in shape [K, 1]
Estimation on widths
"""
estimation[self.map_offset[1]:self.map_offset[2]] = widths.ravel() | python | def set_widths(self, estimation, widths):
"""Set estimation on widths
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
widths : 2D array, in shape [K, 1]
Estimation on widths
"""
estimation[self.map_offset[1]:self.map_offset[2]] = widths.ravel() | [
"def",
"set_widths",
"(",
"self",
",",
"estimation",
",",
"widths",
")",
":",
"estimation",
"[",
"self",
".",
"map_offset",
"[",
"1",
"]",
":",
"self",
".",
"map_offset",
"[",
"2",
"]",
"]",
"=",
"widths",
".",
"ravel",
"(",
")"
] | Set estimation on widths
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
widths : 2D array, in shape [K, 1]
Estimation on widths | [
"Set",
"estimation",
"on",
"widths"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L401-L413 | train | 204,494 |
brainiak/brainiak | brainiak/factoranalysis/tfa.py | TFA.set_centers_mean_cov | def set_centers_mean_cov(self, estimation, centers_mean_cov):
"""Set estimation on centers
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
centers : 2D array, in shape [K, n_dim]
Estimation on centers
"""
estimation[self.map_offset[2]:self.map_offset[3]] =\
centers_mean_cov.ravel() | python | def set_centers_mean_cov(self, estimation, centers_mean_cov):
"""Set estimation on centers
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
centers : 2D array, in shape [K, n_dim]
Estimation on centers
"""
estimation[self.map_offset[2]:self.map_offset[3]] =\
centers_mean_cov.ravel() | [
"def",
"set_centers_mean_cov",
"(",
"self",
",",
"estimation",
",",
"centers_mean_cov",
")",
":",
"estimation",
"[",
"self",
".",
"map_offset",
"[",
"2",
"]",
":",
"self",
".",
"map_offset",
"[",
"3",
"]",
"]",
"=",
"centers_mean_cov",
".",
"ravel",
"(",
... | Set estimation on centers
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
centers : 2D array, in shape [K, n_dim]
Estimation on centers | [
"Set",
"estimation",
"on",
"centers"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L415-L428 | train | 204,495 |
brainiak/brainiak | brainiak/factoranalysis/tfa.py | TFA.get_centers | def get_centers(self, estimation):
"""Get estimation on centers
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
Returns
-------
centers : 2D array, in shape [K, n_dim]
Estimation on centers
"""
centers = estimation[0:self.map_offset[1]]\
.reshape(self.K, self.n_dim)
return centers | python | def get_centers(self, estimation):
"""Get estimation on centers
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
Returns
-------
centers : 2D array, in shape [K, n_dim]
Estimation on centers
"""
centers = estimation[0:self.map_offset[1]]\
.reshape(self.K, self.n_dim)
return centers | [
"def",
"get_centers",
"(",
"self",
",",
"estimation",
")",
":",
"centers",
"=",
"estimation",
"[",
"0",
":",
"self",
".",
"map_offset",
"[",
"1",
"]",
"]",
".",
"reshape",
"(",
"self",
".",
"K",
",",
"self",
".",
"n_dim",
")",
"return",
"centers"
] | Get estimation on centers
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
Returns
-------
centers : 2D array, in shape [K, n_dim]
Estimation on centers | [
"Get",
"estimation",
"on",
"centers"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L444-L461 | train | 204,496 |
brainiak/brainiak | brainiak/factoranalysis/tfa.py | TFA.get_widths | def get_widths(self, estimation):
"""Get estimation on widths
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
Returns
-------
fields : 2D array, in shape [K, 1]
Estimation of widths
"""
widths = estimation[self.map_offset[1]:self.map_offset[2]]\
.reshape(self.K, 1)
return widths | python | def get_widths(self, estimation):
"""Get estimation on widths
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
Returns
-------
fields : 2D array, in shape [K, 1]
Estimation of widths
"""
widths = estimation[self.map_offset[1]:self.map_offset[2]]\
.reshape(self.K, 1)
return widths | [
"def",
"get_widths",
"(",
"self",
",",
"estimation",
")",
":",
"widths",
"=",
"estimation",
"[",
"self",
".",
"map_offset",
"[",
"1",
"]",
":",
"self",
".",
"map_offset",
"[",
"2",
"]",
"]",
".",
"reshape",
"(",
"self",
".",
"K",
",",
"1",
")",
"... | Get estimation on widths
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
Returns
-------
fields : 2D array, in shape [K, 1]
Estimation of widths | [
"Get",
"estimation",
"on",
"widths"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L463-L481 | train | 204,497 |
brainiak/brainiak | brainiak/factoranalysis/tfa.py | TFA.get_centers_mean_cov | def get_centers_mean_cov(self, estimation):
"""Get estimation on the covariance of centers' mean
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
Returns
-------
centers_mean_cov : 2D array, in shape [K, cov_vec_size]
Estimation of the covariance of centers' mean
"""
centers_mean_cov = estimation[self.map_offset[2]:self.map_offset[3]]\
.reshape(self.K, self.cov_vec_size)
return centers_mean_cov | python | def get_centers_mean_cov(self, estimation):
"""Get estimation on the covariance of centers' mean
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
Returns
-------
centers_mean_cov : 2D array, in shape [K, cov_vec_size]
Estimation of the covariance of centers' mean
"""
centers_mean_cov = estimation[self.map_offset[2]:self.map_offset[3]]\
.reshape(self.K, self.cov_vec_size)
return centers_mean_cov | [
"def",
"get_centers_mean_cov",
"(",
"self",
",",
"estimation",
")",
":",
"centers_mean_cov",
"=",
"estimation",
"[",
"self",
".",
"map_offset",
"[",
"2",
"]",
":",
"self",
".",
"map_offset",
"[",
"3",
"]",
"]",
".",
"reshape",
"(",
"self",
".",
"K",
",... | Get estimation on the covariance of centers' mean
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
Returns
-------
centers_mean_cov : 2D array, in shape [K, cov_vec_size]
Estimation of the covariance of centers' mean | [
"Get",
"estimation",
"on",
"the",
"covariance",
"of",
"centers",
"mean"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L483-L502 | train | 204,498 |
brainiak/brainiak | brainiak/factoranalysis/tfa.py | TFA.get_widths_mean_var | def get_widths_mean_var(self, estimation):
"""Get estimation on the variance of widths' mean
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
Returns
-------
widths_mean_var : 2D array, in shape [K, 1]
Estimation on variance of widths' mean
"""
widths_mean_var = \
estimation[self.map_offset[3]:].reshape(self.K, 1)
return widths_mean_var | python | def get_widths_mean_var(self, estimation):
"""Get estimation on the variance of widths' mean
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
Returns
-------
widths_mean_var : 2D array, in shape [K, 1]
Estimation on variance of widths' mean
"""
widths_mean_var = \
estimation[self.map_offset[3]:].reshape(self.K, 1)
return widths_mean_var | [
"def",
"get_widths_mean_var",
"(",
"self",
",",
"estimation",
")",
":",
"widths_mean_var",
"=",
"estimation",
"[",
"self",
".",
"map_offset",
"[",
"3",
"]",
":",
"]",
".",
"reshape",
"(",
"self",
".",
"K",
",",
"1",
")",
"return",
"widths_mean_var"
] | Get estimation on the variance of widths' mean
Parameters
----------
estimation : 1D arrary
Either prior of posterior estimation
Returns
-------
widths_mean_var : 2D array, in shape [K, 1]
Estimation on variance of widths' mean | [
"Get",
"estimation",
"on",
"the",
"variance",
"of",
"widths",
"mean"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L504-L523 | train | 204,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.