repo stringlengths 7 55 | path stringlengths 4 223 | func_name stringlengths 1 134 | original_string stringlengths 75 104k | language stringclasses 1 value | code stringlengths 75 104k | code_tokens listlengths 19 28.4k | docstring stringlengths 1 46.9k | docstring_tokens listlengths 1 1.97k | sha stringlengths 40 40 | url stringlengths 87 315 | partition stringclasses 1 value |
|---|---|---|---|---|---|---|---|---|---|---|---|
mieubrisse/wunderpy2 | wunderpy2/wunderclient.py | WunderClient.update_task_positions_obj | def update_task_positions_obj(self, positions_obj_id, revision, values):
'''
Updates the ordering of tasks in the positions object with the given ID to the ordering in the given values.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
The updated TaskPositionsObj-mapped object defining the order of list layout
'''
return positions_endpoints.update_task_positions_obj(self, positions_obj_id, revision, values) | python | def update_task_positions_obj(self, positions_obj_id, revision, values):
'''
Updates the ordering of tasks in the positions object with the given ID to the ordering in the given values.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
The updated TaskPositionsObj-mapped object defining the order of list layout
'''
return positions_endpoints.update_task_positions_obj(self, positions_obj_id, revision, values) | [
"def",
"update_task_positions_obj",
"(",
"self",
",",
"positions_obj_id",
",",
"revision",
",",
"values",
")",
":",
"return",
"positions_endpoints",
".",
"update_task_positions_obj",
"(",
"self",
",",
"positions_obj_id",
",",
"revision",
",",
"values",
")"
] | Updates the ordering of tasks in the positions object with the given ID to the ordering in the given values.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
The updated TaskPositionsObj-mapped object defining the order of list layout | [
"Updates",
"the",
"ordering",
"of",
"tasks",
"in",
"the",
"positions",
"object",
"with",
"the",
"given",
"ID",
"to",
"the",
"ordering",
"in",
"the",
"given",
"values",
"."
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/wunderclient.py#L221-L230 | train |
mieubrisse/wunderpy2 | wunderpy2/wunderclient.py | WunderClient.update_subtask_positions_obj | def update_subtask_positions_obj(self, positions_obj_id, revision, values):
'''
Updates the ordering of subtasks in the positions object with the given ID to the ordering in the given values.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
The updated SubtaskPositionsObj-mapped object defining the order of list layout
'''
return positions_endpoints.update_subtask_positions_obj(self, positions_obj_id, revision, values) | python | def update_subtask_positions_obj(self, positions_obj_id, revision, values):
'''
Updates the ordering of subtasks in the positions object with the given ID to the ordering in the given values.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
The updated SubtaskPositionsObj-mapped object defining the order of list layout
'''
return positions_endpoints.update_subtask_positions_obj(self, positions_obj_id, revision, values) | [
"def",
"update_subtask_positions_obj",
"(",
"self",
",",
"positions_obj_id",
",",
"revision",
",",
"values",
")",
":",
"return",
"positions_endpoints",
".",
"update_subtask_positions_obj",
"(",
"self",
",",
"positions_obj_id",
",",
"revision",
",",
"values",
")"
] | Updates the ordering of subtasks in the positions object with the given ID to the ordering in the given values.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
The updated SubtaskPositionsObj-mapped object defining the order of list layout | [
"Updates",
"the",
"ordering",
"of",
"subtasks",
"in",
"the",
"positions",
"object",
"with",
"the",
"given",
"ID",
"to",
"the",
"ordering",
"in",
"the",
"given",
"values",
"."
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/wunderclient.py#L263-L272 | train |
mieubrisse/wunderpy2 | wunderpy2/tasks_endpoint.py | _check_date_format | def _check_date_format(date, api):
''' Checks that the given date string conforms to the given API's date format specification '''
try:
datetime.datetime.strptime(date, api.DATE_FORMAT)
except ValueError:
raise ValueError("Date '{}' does not conform to API format: {}".format(date, api.DATE_FORMAT)) | python | def _check_date_format(date, api):
''' Checks that the given date string conforms to the given API's date format specification '''
try:
datetime.datetime.strptime(date, api.DATE_FORMAT)
except ValueError:
raise ValueError("Date '{}' does not conform to API format: {}".format(date, api.DATE_FORMAT)) | [
"def",
"_check_date_format",
"(",
"date",
",",
"api",
")",
":",
"try",
":",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"date",
",",
"api",
".",
"DATE_FORMAT",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Date '{}' does not conform t... | Checks that the given date string conforms to the given API's date format specification | [
"Checks",
"that",
"the",
"given",
"date",
"string",
"conforms",
"to",
"the",
"given",
"API",
"s",
"date",
"format",
"specification"
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/tasks_endpoint.py#L11-L16 | train |
mieubrisse/wunderpy2 | wunderpy2/tasks_endpoint.py | get_tasks | def get_tasks(client, list_id, completed=False):
''' Gets un/completed tasks for the given list ID '''
params = {
'list_id' : str(list_id),
'completed' : completed
}
response = client.authenticated_request(client.api.Endpoints.TASKS, params=params)
return response.json() | python | def get_tasks(client, list_id, completed=False):
''' Gets un/completed tasks for the given list ID '''
params = {
'list_id' : str(list_id),
'completed' : completed
}
response = client.authenticated_request(client.api.Endpoints.TASKS, params=params)
return response.json() | [
"def",
"get_tasks",
"(",
"client",
",",
"list_id",
",",
"completed",
"=",
"False",
")",
":",
"params",
"=",
"{",
"'list_id'",
":",
"str",
"(",
"list_id",
")",
",",
"'completed'",
":",
"completed",
"}",
"response",
"=",
"client",
".",
"authenticated_request... | Gets un/completed tasks for the given list ID | [
"Gets",
"un",
"/",
"completed",
"tasks",
"for",
"the",
"given",
"list",
"ID"
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/tasks_endpoint.py#L18-L25 | train |
mieubrisse/wunderpy2 | wunderpy2/tasks_endpoint.py | get_task | def get_task(client, task_id):
''' Gets task information for the given ID '''
endpoint = '/'.join([client.api.Endpoints.TASKS, str(task_id)])
response = client.authenticated_request(endpoint)
return response.json() | python | def get_task(client, task_id):
''' Gets task information for the given ID '''
endpoint = '/'.join([client.api.Endpoints.TASKS, str(task_id)])
response = client.authenticated_request(endpoint)
return response.json() | [
"def",
"get_task",
"(",
"client",
",",
"task_id",
")",
":",
"endpoint",
"=",
"'/'",
".",
"join",
"(",
"[",
"client",
".",
"api",
".",
"Endpoints",
".",
"TASKS",
",",
"str",
"(",
"task_id",
")",
"]",
")",
"response",
"=",
"client",
".",
"authenticated... | Gets task information for the given ID | [
"Gets",
"task",
"information",
"for",
"the",
"given",
"ID"
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/tasks_endpoint.py#L27-L31 | train |
mieubrisse/wunderpy2 | wunderpy2/tasks_endpoint.py | create_task | def create_task(client, list_id, title, assignee_id=None, completed=None, recurrence_type=None, recurrence_count=None, due_date=None, starred=None):
'''
Creates a task in the given list
See https://developer.wunderlist.com/documentation/endpoints/task for detailed parameter information
'''
_check_title_length(title, client.api)
if (recurrence_type is None and recurrence_count is not None) or (recurrence_type is not None and recurrence_count is None):
raise ValueError("recurrence_type and recurrence_count are required are required together")
if due_date is not None:
_check_date_format(due_date, client.api)
data = {
'list_id' : int(list_id) if list_id else None,
'title' : title,
'assignee_id' : int(assignee_id) if assignee_id else None,
'completed' : completed,
'recurrence_type' : recurrence_type,
'recurrence_count' : int(recurrence_count) if recurrence_count else None,
'due_date' : due_date,
'starred' : starred,
}
data = { key: value for key, value in data.items() if value is not None }
response = client.authenticated_request(client.api.Endpoints.TASKS, 'POST', data=data)
return response.json() | python | def create_task(client, list_id, title, assignee_id=None, completed=None, recurrence_type=None, recurrence_count=None, due_date=None, starred=None):
'''
Creates a task in the given list
See https://developer.wunderlist.com/documentation/endpoints/task for detailed parameter information
'''
_check_title_length(title, client.api)
if (recurrence_type is None and recurrence_count is not None) or (recurrence_type is not None and recurrence_count is None):
raise ValueError("recurrence_type and recurrence_count are required are required together")
if due_date is not None:
_check_date_format(due_date, client.api)
data = {
'list_id' : int(list_id) if list_id else None,
'title' : title,
'assignee_id' : int(assignee_id) if assignee_id else None,
'completed' : completed,
'recurrence_type' : recurrence_type,
'recurrence_count' : int(recurrence_count) if recurrence_count else None,
'due_date' : due_date,
'starred' : starred,
}
data = { key: value for key, value in data.items() if value is not None }
response = client.authenticated_request(client.api.Endpoints.TASKS, 'POST', data=data)
return response.json() | [
"def",
"create_task",
"(",
"client",
",",
"list_id",
",",
"title",
",",
"assignee_id",
"=",
"None",
",",
"completed",
"=",
"None",
",",
"recurrence_type",
"=",
"None",
",",
"recurrence_count",
"=",
"None",
",",
"due_date",
"=",
"None",
",",
"starred",
"=",... | Creates a task in the given list
See https://developer.wunderlist.com/documentation/endpoints/task for detailed parameter information | [
"Creates",
"a",
"task",
"in",
"the",
"given",
"list"
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/tasks_endpoint.py#L33-L56 | train |
mieubrisse/wunderpy2 | wunderpy2/tasks_endpoint.py | update_task | def update_task(client, task_id, revision, title=None, assignee_id=None, completed=None, recurrence_type=None, recurrence_count=None, due_date=None, starred=None, remove=None):
'''
Updates the task with the given ID
See https://developer.wunderlist.com/documentation/endpoints/task for detailed parameter information
'''
if title is not None:
_check_title_length(title, client.api)
if (recurrence_type is None and recurrence_count is not None) or (recurrence_type is not None and recurrence_count is None):
raise ValueError("recurrence_type and recurrence_count are required are required together")
if due_date is not None:
_check_date_format(due_date, client.api)
data = {
'revision' : int(revision),
'title' : title,
'assignee_id' : int(assignee_id) if assignee_id else None,
'completed' : completed,
'recurrence_type' : recurrence_type,
'recurrence_count' : int(recurrence_count) if recurrence_count else None,
'due_date' : due_date,
'starred' : starred,
'remove' : remove,
}
data = { key: value for key, value in data.items() if value is not None }
endpoint = '/'.join([client.api.Endpoints.TASKS, str(task_id)])
response = client.authenticated_request(endpoint, 'PATCH', data=data)
return response.json() | python | def update_task(client, task_id, revision, title=None, assignee_id=None, completed=None, recurrence_type=None, recurrence_count=None, due_date=None, starred=None, remove=None):
'''
Updates the task with the given ID
See https://developer.wunderlist.com/documentation/endpoints/task for detailed parameter information
'''
if title is not None:
_check_title_length(title, client.api)
if (recurrence_type is None and recurrence_count is not None) or (recurrence_type is not None and recurrence_count is None):
raise ValueError("recurrence_type and recurrence_count are required are required together")
if due_date is not None:
_check_date_format(due_date, client.api)
data = {
'revision' : int(revision),
'title' : title,
'assignee_id' : int(assignee_id) if assignee_id else None,
'completed' : completed,
'recurrence_type' : recurrence_type,
'recurrence_count' : int(recurrence_count) if recurrence_count else None,
'due_date' : due_date,
'starred' : starred,
'remove' : remove,
}
data = { key: value for key, value in data.items() if value is not None }
endpoint = '/'.join([client.api.Endpoints.TASKS, str(task_id)])
response = client.authenticated_request(endpoint, 'PATCH', data=data)
return response.json() | [
"def",
"update_task",
"(",
"client",
",",
"task_id",
",",
"revision",
",",
"title",
"=",
"None",
",",
"assignee_id",
"=",
"None",
",",
"completed",
"=",
"None",
",",
"recurrence_type",
"=",
"None",
",",
"recurrence_count",
"=",
"None",
",",
"due_date",
"="... | Updates the task with the given ID
See https://developer.wunderlist.com/documentation/endpoints/task for detailed parameter information | [
"Updates",
"the",
"task",
"with",
"the",
"given",
"ID"
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/tasks_endpoint.py#L58-L84 | train |
mieubrisse/wunderpy2 | wunderpy2/lists_endpoint.py | _check_title_length | def _check_title_length(title, api):
''' Checks the given title against the given API specifications to ensure it's short enough '''
if len(title) > api.MAX_LIST_TITLE_LENGTH:
raise ValueError("Title cannot be longer than {} characters".format(api.MAX_TASK_TITLE_LENGTH)) | python | def _check_title_length(title, api):
''' Checks the given title against the given API specifications to ensure it's short enough '''
if len(title) > api.MAX_LIST_TITLE_LENGTH:
raise ValueError("Title cannot be longer than {} characters".format(api.MAX_TASK_TITLE_LENGTH)) | [
"def",
"_check_title_length",
"(",
"title",
",",
"api",
")",
":",
"if",
"len",
"(",
"title",
")",
">",
"api",
".",
"MAX_LIST_TITLE_LENGTH",
":",
"raise",
"ValueError",
"(",
"\"Title cannot be longer than {} characters\"",
".",
"format",
"(",
"api",
".",
"MAX_TAS... | Checks the given title against the given API specifications to ensure it's short enough | [
"Checks",
"the",
"given",
"title",
"against",
"the",
"given",
"API",
"specifications",
"to",
"ensure",
"it",
"s",
"short",
"enough"
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/lists_endpoint.py#L4-L7 | train |
mieubrisse/wunderpy2 | wunderpy2/lists_endpoint.py | get_lists | def get_lists(client):
''' Gets all the client's lists '''
response = client.authenticated_request(client.api.Endpoints.LISTS)
return response.json() | python | def get_lists(client):
''' Gets all the client's lists '''
response = client.authenticated_request(client.api.Endpoints.LISTS)
return response.json() | [
"def",
"get_lists",
"(",
"client",
")",
":",
"response",
"=",
"client",
".",
"authenticated_request",
"(",
"client",
".",
"api",
".",
"Endpoints",
".",
"LISTS",
")",
"return",
"response",
".",
"json",
"(",
")"
] | Gets all the client's lists | [
"Gets",
"all",
"the",
"client",
"s",
"lists"
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/lists_endpoint.py#L9-L12 | train |
mieubrisse/wunderpy2 | wunderpy2/lists_endpoint.py | get_list | def get_list(client, list_id):
''' Gets the given list '''
endpoint = '/'.join([client.api.Endpoints.LISTS, str(list_id)])
response = client.authenticated_request(endpoint)
return response.json() | python | def get_list(client, list_id):
''' Gets the given list '''
endpoint = '/'.join([client.api.Endpoints.LISTS, str(list_id)])
response = client.authenticated_request(endpoint)
return response.json() | [
"def",
"get_list",
"(",
"client",
",",
"list_id",
")",
":",
"endpoint",
"=",
"'/'",
".",
"join",
"(",
"[",
"client",
".",
"api",
".",
"Endpoints",
".",
"LISTS",
",",
"str",
"(",
"list_id",
")",
"]",
")",
"response",
"=",
"client",
".",
"authenticated... | Gets the given list | [
"Gets",
"the",
"given",
"list"
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/lists_endpoint.py#L14-L18 | train |
mieubrisse/wunderpy2 | wunderpy2/lists_endpoint.py | create_list | def create_list(client, title):
''' Creates a new list with the given title '''
_check_title_length(title, client.api)
data = {
'title' : title,
}
response = client.authenticated_request(client.api.Endpoints.LISTS, method='POST', data=data)
return response.json() | python | def create_list(client, title):
''' Creates a new list with the given title '''
_check_title_length(title, client.api)
data = {
'title' : title,
}
response = client.authenticated_request(client.api.Endpoints.LISTS, method='POST', data=data)
return response.json() | [
"def",
"create_list",
"(",
"client",
",",
"title",
")",
":",
"_check_title_length",
"(",
"title",
",",
"client",
".",
"api",
")",
"data",
"=",
"{",
"'title'",
":",
"title",
",",
"}",
"response",
"=",
"client",
".",
"authenticated_request",
"(",
"client",
... | Creates a new list with the given title | [
"Creates",
"a",
"new",
"list",
"with",
"the",
"given",
"title"
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/lists_endpoint.py#L20-L27 | train |
mieubrisse/wunderpy2 | wunderpy2/lists_endpoint.py | update_list | def update_list(client, list_id, revision, title=None, public=None):
'''
Updates the list with the given ID to have the given properties
See https://developer.wunderlist.com/documentation/endpoints/list for detailed parameter information
'''
if title is not None:
_check_title_length(title, client.api)
data = {
'revision' : revision,
'title' : title,
'public' : public,
}
data = { key: value for key, value in data.items() if value is not None }
endpoint = '/'.join([client.api.Endpoints.LISTS, str(list_id)])
response = client.authenticated_request(endpoint, 'PATCH', data=data)
return response.json() | python | def update_list(client, list_id, revision, title=None, public=None):
'''
Updates the list with the given ID to have the given properties
See https://developer.wunderlist.com/documentation/endpoints/list for detailed parameter information
'''
if title is not None:
_check_title_length(title, client.api)
data = {
'revision' : revision,
'title' : title,
'public' : public,
}
data = { key: value for key, value in data.items() if value is not None }
endpoint = '/'.join([client.api.Endpoints.LISTS, str(list_id)])
response = client.authenticated_request(endpoint, 'PATCH', data=data)
return response.json() | [
"def",
"update_list",
"(",
"client",
",",
"list_id",
",",
"revision",
",",
"title",
"=",
"None",
",",
"public",
"=",
"None",
")",
":",
"if",
"title",
"is",
"not",
"None",
":",
"_check_title_length",
"(",
"title",
",",
"client",
".",
"api",
")",
"data",... | Updates the list with the given ID to have the given properties
See https://developer.wunderlist.com/documentation/endpoints/list for detailed parameter information | [
"Updates",
"the",
"list",
"with",
"the",
"given",
"ID",
"to",
"have",
"the",
"given",
"properties"
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/lists_endpoint.py#L29-L45 | train |
mieubrisse/wunderpy2 | wunderpy2/positions_endpoints.py | get_list_positions_obj | def get_list_positions_obj(client, positions_obj_id):
'''
Gets the object that defines how lists are ordered (there will always be only one of these)
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
A ListPositionsObj-mapped object defining the order of list layout
'''
return endpoint_helpers.get_endpoint_obj(client, client.api.Endpoints.LIST_POSITIONS, positions_obj_id) | python | def get_list_positions_obj(client, positions_obj_id):
'''
Gets the object that defines how lists are ordered (there will always be only one of these)
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
A ListPositionsObj-mapped object defining the order of list layout
'''
return endpoint_helpers.get_endpoint_obj(client, client.api.Endpoints.LIST_POSITIONS, positions_obj_id) | [
"def",
"get_list_positions_obj",
"(",
"client",
",",
"positions_obj_id",
")",
":",
"return",
"endpoint_helpers",
".",
"get_endpoint_obj",
"(",
"client",
",",
"client",
".",
"api",
".",
"Endpoints",
".",
"LIST_POSITIONS",
",",
"positions_obj_id",
")"
] | Gets the object that defines how lists are ordered (there will always be only one of these)
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
A ListPositionsObj-mapped object defining the order of list layout | [
"Gets",
"the",
"object",
"that",
"defines",
"how",
"lists",
"are",
"ordered",
"(",
"there",
"will",
"always",
"be",
"only",
"one",
"of",
"these",
")"
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/positions_endpoints.py#L27-L36 | train |
mieubrisse/wunderpy2 | wunderpy2/positions_endpoints.py | update_list_positions_obj | def update_list_positions_obj(client, positions_obj_id, revision, values):
'''
Updates the ordering of lists to have the given value. The given ID and revision should match the singleton object defining how lists are laid out.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
The updated ListPositionsObj-mapped object defining the order of list layout
'''
return _update_positions_obj(client, client.api.Endpoints.LIST_POSITIONS, positions_obj_id, revision, values) | python | def update_list_positions_obj(client, positions_obj_id, revision, values):
'''
Updates the ordering of lists to have the given value. The given ID and revision should match the singleton object defining how lists are laid out.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
The updated ListPositionsObj-mapped object defining the order of list layout
'''
return _update_positions_obj(client, client.api.Endpoints.LIST_POSITIONS, positions_obj_id, revision, values) | [
"def",
"update_list_positions_obj",
"(",
"client",
",",
"positions_obj_id",
",",
"revision",
",",
"values",
")",
":",
"return",
"_update_positions_obj",
"(",
"client",
",",
"client",
".",
"api",
".",
"Endpoints",
".",
"LIST_POSITIONS",
",",
"positions_obj_id",
","... | Updates the ordering of lists to have the given value. The given ID and revision should match the singleton object defining how lists are laid out.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
The updated ListPositionsObj-mapped object defining the order of list layout | [
"Updates",
"the",
"ordering",
"of",
"lists",
"to",
"have",
"the",
"given",
"value",
".",
"The",
"given",
"ID",
"and",
"revision",
"should",
"match",
"the",
"singleton",
"object",
"defining",
"how",
"lists",
"are",
"laid",
"out",
"."
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/positions_endpoints.py#L39-L48 | train |
mieubrisse/wunderpy2 | wunderpy2/positions_endpoints.py | get_task_positions_objs | def get_task_positions_objs(client, list_id):
'''
Gets a list containing the object that encapsulates information about the order lists are laid out in. This list will always contain exactly one object.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
A list containing a single ListPositionsObj-mapped object
'''
params = {
'list_id' : int(list_id)
}
response = client.authenticated_request(client.api.Endpoints.TASK_POSITIONS, params=params)
return response.json() | python | def get_task_positions_objs(client, list_id):
'''
Gets a list containing the object that encapsulates information about the order lists are laid out in. This list will always contain exactly one object.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
A list containing a single ListPositionsObj-mapped object
'''
params = {
'list_id' : int(list_id)
}
response = client.authenticated_request(client.api.Endpoints.TASK_POSITIONS, params=params)
return response.json() | [
"def",
"get_task_positions_objs",
"(",
"client",
",",
"list_id",
")",
":",
"params",
"=",
"{",
"'list_id'",
":",
"int",
"(",
"list_id",
")",
"}",
"response",
"=",
"client",
".",
"authenticated_request",
"(",
"client",
".",
"api",
".",
"Endpoints",
".",
"TA... | Gets a list containing the object that encapsulates information about the order lists are laid out in. This list will always contain exactly one object.
See https://developer.wunderlist.com/documentation/endpoints/positions for more info
Return:
A list containing a single ListPositionsObj-mapped object | [
"Gets",
"a",
"list",
"containing",
"the",
"object",
"that",
"encapsulates",
"information",
"about",
"the",
"order",
"lists",
"are",
"laid",
"out",
"in",
".",
"This",
"list",
"will",
"always",
"contain",
"exactly",
"one",
"object",
"."
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/positions_endpoints.py#L50-L63 | train |
mieubrisse/wunderpy2 | wunderpy2/positions_endpoints.py | get_task_subtask_positions_objs | def get_task_subtask_positions_objs(client, task_id):
'''
Gets a list of the positions of a single task's subtasks
Each task should (will?) only have one positions object defining how its subtasks are laid out
'''
params = {
'task_id' : int(task_id)
}
response = client.authenticated_request(client.api.Endpoints.SUBTASK_POSITIONS, params=params)
return response.json() | python | def get_task_subtask_positions_objs(client, task_id):
'''
Gets a list of the positions of a single task's subtasks
Each task should (will?) only have one positions object defining how its subtasks are laid out
'''
params = {
'task_id' : int(task_id)
}
response = client.authenticated_request(client.api.Endpoints.SUBTASK_POSITIONS, params=params)
return response.json() | [
"def",
"get_task_subtask_positions_objs",
"(",
"client",
",",
"task_id",
")",
":",
"params",
"=",
"{",
"'task_id'",
":",
"int",
"(",
"task_id",
")",
"}",
"response",
"=",
"client",
".",
"authenticated_request",
"(",
"client",
".",
"api",
".",
"Endpoints",
".... | Gets a list of the positions of a single task's subtasks
Each task should (will?) only have one positions object defining how its subtasks are laid out | [
"Gets",
"a",
"list",
"of",
"the",
"positions",
"of",
"a",
"single",
"task",
"s",
"subtasks"
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/positions_endpoints.py#L71-L81 | train |
mieubrisse/wunderpy2 | wunderpy2/positions_endpoints.py | get_list_subtask_positions_objs | def get_list_subtask_positions_objs(client, list_id):
'''
Gets all subtask positions objects for the tasks within a given list. This is a convenience method so you don't have to get all the list's tasks before getting subtasks, though I can't fathom how mass subtask reordering is useful.
Returns:
List of SubtaskPositionsObj-mapped objects representing the order of subtasks for the tasks within the given list
'''
params = {
'list_id' : int(list_id)
}
response = client.authenticated_request(client.api.Endpoints.SUBTASK_POSITIONS, params=params)
return response.json() | python | def get_list_subtask_positions_objs(client, list_id):
'''
Gets all subtask positions objects for the tasks within a given list. This is a convenience method so you don't have to get all the list's tasks before getting subtasks, though I can't fathom how mass subtask reordering is useful.
Returns:
List of SubtaskPositionsObj-mapped objects representing the order of subtasks for the tasks within the given list
'''
params = {
'list_id' : int(list_id)
}
response = client.authenticated_request(client.api.Endpoints.SUBTASK_POSITIONS, params=params)
return response.json() | [
"def",
"get_list_subtask_positions_objs",
"(",
"client",
",",
"list_id",
")",
":",
"params",
"=",
"{",
"'list_id'",
":",
"int",
"(",
"list_id",
")",
"}",
"response",
"=",
"client",
".",
"authenticated_request",
"(",
"client",
".",
"api",
".",
"Endpoints",
".... | Gets all subtask positions objects for the tasks within a given list. This is a convenience method so you don't have to get all the list's tasks before getting subtasks, though I can't fathom how mass subtask reordering is useful.
Returns:
List of SubtaskPositionsObj-mapped objects representing the order of subtasks for the tasks within the given list | [
"Gets",
"all",
"subtask",
"positions",
"objects",
"for",
"the",
"tasks",
"within",
"a",
"given",
"list",
".",
"This",
"is",
"a",
"convenience",
"method",
"so",
"you",
"don",
"t",
"have",
"to",
"get",
"all",
"the",
"list",
"s",
"tasks",
"before",
"getting... | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/positions_endpoints.py#L84-L95 | train |
mieubrisse/wunderpy2 | wunderpy2/subtasks_endpoint.py | _check_title_length | def _check_title_length(title, api):
''' Checks the given title against the given API specifications to ensure it's short enough '''
if len(title) > api.MAX_SUBTASK_TITLE_LENGTH:
raise ValueError("Title cannot be longer than {} characters".format(api.MAX_SUBTASK_TITLE_LENGTH)) | python | def _check_title_length(title, api):
''' Checks the given title against the given API specifications to ensure it's short enough '''
if len(title) > api.MAX_SUBTASK_TITLE_LENGTH:
raise ValueError("Title cannot be longer than {} characters".format(api.MAX_SUBTASK_TITLE_LENGTH)) | [
"def",
"_check_title_length",
"(",
"title",
",",
"api",
")",
":",
"if",
"len",
"(",
"title",
")",
">",
"api",
".",
"MAX_SUBTASK_TITLE_LENGTH",
":",
"raise",
"ValueError",
"(",
"\"Title cannot be longer than {} characters\"",
".",
"format",
"(",
"api",
".",
"MAX_... | Checks the given title against the given API specifications to ensure it's short enough | [
"Checks",
"the",
"given",
"title",
"against",
"the",
"given",
"API",
"specifications",
"to",
"ensure",
"it",
"s",
"short",
"enough"
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/subtasks_endpoint.py#L5-L8 | train |
mieubrisse/wunderpy2 | wunderpy2/subtasks_endpoint.py | get_task_subtasks | def get_task_subtasks(client, task_id, completed=False):
''' Gets subtasks for task with given ID '''
params = {
'task_id' : int(task_id),
'completed' : completed,
}
response = client.authenticated_request(client.api.Endpoints.SUBTASKS, params=params)
return response.json() | python | def get_task_subtasks(client, task_id, completed=False):
''' Gets subtasks for task with given ID '''
params = {
'task_id' : int(task_id),
'completed' : completed,
}
response = client.authenticated_request(client.api.Endpoints.SUBTASKS, params=params)
return response.json() | [
"def",
"get_task_subtasks",
"(",
"client",
",",
"task_id",
",",
"completed",
"=",
"False",
")",
":",
"params",
"=",
"{",
"'task_id'",
":",
"int",
"(",
"task_id",
")",
",",
"'completed'",
":",
"completed",
",",
"}",
"response",
"=",
"client",
".",
"authen... | Gets subtasks for task with given ID | [
"Gets",
"subtasks",
"for",
"task",
"with",
"given",
"ID"
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/subtasks_endpoint.py#L10-L17 | train |
mieubrisse/wunderpy2 | wunderpy2/subtasks_endpoint.py | get_list_subtasks | def get_list_subtasks(client, list_id, completed=False):
''' Gets subtasks for the list with given ID '''
params = {
'list_id' : int(list_id),
'completed' : completed,
}
response = client.authenticated_request(client.api.Endpoints.SUBTASKS, params=params)
return response.json() | python | def get_list_subtasks(client, list_id, completed=False):
''' Gets subtasks for the list with given ID '''
params = {
'list_id' : int(list_id),
'completed' : completed,
}
response = client.authenticated_request(client.api.Endpoints.SUBTASKS, params=params)
return response.json() | [
"def",
"get_list_subtasks",
"(",
"client",
",",
"list_id",
",",
"completed",
"=",
"False",
")",
":",
"params",
"=",
"{",
"'list_id'",
":",
"int",
"(",
"list_id",
")",
",",
"'completed'",
":",
"completed",
",",
"}",
"response",
"=",
"client",
".",
"authen... | Gets subtasks for the list with given ID | [
"Gets",
"subtasks",
"for",
"the",
"list",
"with",
"given",
"ID"
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/subtasks_endpoint.py#L19-L26 | train |
mieubrisse/wunderpy2 | wunderpy2/subtasks_endpoint.py | get_subtask | def get_subtask(client, subtask_id):
''' Gets the subtask with the given ID '''
endpoint = '/'.join([client.api.Endpoints.SUBTASKS, str(subtask_id)])
response = client.authenticated_request(endpoint)
return response.json() | python | def get_subtask(client, subtask_id):
''' Gets the subtask with the given ID '''
endpoint = '/'.join([client.api.Endpoints.SUBTASKS, str(subtask_id)])
response = client.authenticated_request(endpoint)
return response.json() | [
"def",
"get_subtask",
"(",
"client",
",",
"subtask_id",
")",
":",
"endpoint",
"=",
"'/'",
".",
"join",
"(",
"[",
"client",
".",
"api",
".",
"Endpoints",
".",
"SUBTASKS",
",",
"str",
"(",
"subtask_id",
")",
"]",
")",
"response",
"=",
"client",
".",
"a... | Gets the subtask with the given ID | [
"Gets",
"the",
"subtask",
"with",
"the",
"given",
"ID"
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/subtasks_endpoint.py#L28-L32 | train |
mieubrisse/wunderpy2 | wunderpy2/subtasks_endpoint.py | create_subtask | def create_subtask(client, task_id, title, completed=False):
''' Creates a subtask with the given title under the task with the given ID '''
_check_title_length(title, client.api)
data = {
'task_id' : int(task_id) if task_id else None,
'title' : title,
'completed' : completed,
}
data = { key: value for key, value in data.items() if value is not None }
response = client.authenticated_request(client.api.Endpoints.SUBTASKS, 'POST', data=data)
return response.json() | python | def create_subtask(client, task_id, title, completed=False):
''' Creates a subtask with the given title under the task with the given ID '''
_check_title_length(title, client.api)
data = {
'task_id' : int(task_id) if task_id else None,
'title' : title,
'completed' : completed,
}
data = { key: value for key, value in data.items() if value is not None }
response = client.authenticated_request(client.api.Endpoints.SUBTASKS, 'POST', data=data)
return response.json() | [
"def",
"create_subtask",
"(",
"client",
",",
"task_id",
",",
"title",
",",
"completed",
"=",
"False",
")",
":",
"_check_title_length",
"(",
"title",
",",
"client",
".",
"api",
")",
"data",
"=",
"{",
"'task_id'",
":",
"int",
"(",
"task_id",
")",
"if",
"... | Creates a subtask with the given title under the task with the given ID | [
"Creates",
"a",
"subtask",
"with",
"the",
"given",
"title",
"under",
"the",
"task",
"with",
"the",
"given",
"ID"
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/subtasks_endpoint.py#L34-L44 | train |
mieubrisse/wunderpy2 | wunderpy2/subtasks_endpoint.py | update_subtask | def update_subtask(client, subtask_id, revision, title=None, completed=None):
'''
Updates the subtask with the given ID
See https://developer.wunderlist.com/documentation/endpoints/subtask for detailed parameter information
'''
if title is not None:
_check_title_length(title, client.api)
data = {
'revision' : int(revision),
'title' : title,
'completed' : completed,
}
data = { key: value for key, value in data.items() if value is not None }
endpoint = '/'.join([client.api.Endpoints.SUBTASKS, str(subtask_id)])
response = client.authenticated_request(endpoint, 'PATCH', data=data)
return response.json() | python | def update_subtask(client, subtask_id, revision, title=None, completed=None):
'''
Updates the subtask with the given ID
See https://developer.wunderlist.com/documentation/endpoints/subtask for detailed parameter information
'''
if title is not None:
_check_title_length(title, client.api)
data = {
'revision' : int(revision),
'title' : title,
'completed' : completed,
}
data = { key: value for key, value in data.items() if value is not None }
endpoint = '/'.join([client.api.Endpoints.SUBTASKS, str(subtask_id)])
response = client.authenticated_request(endpoint, 'PATCH', data=data)
return response.json() | [
"def",
"update_subtask",
"(",
"client",
",",
"subtask_id",
",",
"revision",
",",
"title",
"=",
"None",
",",
"completed",
"=",
"None",
")",
":",
"if",
"title",
"is",
"not",
"None",
":",
"_check_title_length",
"(",
"title",
",",
"client",
".",
"api",
")",
... | Updates the subtask with the given ID
See https://developer.wunderlist.com/documentation/endpoints/subtask for detailed parameter information | [
"Updates",
"the",
"subtask",
"with",
"the",
"given",
"ID"
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/subtasks_endpoint.py#L46-L62 | train |
mieubrisse/wunderpy2 | wunderpy2/subtasks_endpoint.py | delete_subtask | def delete_subtask(client, subtask_id, revision):
''' Deletes the subtask with the given ID provided the given revision equals the revision the server has '''
params = {
'revision' : int(revision),
}
endpoint = '/'.join([client.api.Endpoints.SUBTASKS, str(subtask_id)])
client.authenticated_request(endpoint, 'DELETE', params=params) | python | def delete_subtask(client, subtask_id, revision):
''' Deletes the subtask with the given ID provided the given revision equals the revision the server has '''
params = {
'revision' : int(revision),
}
endpoint = '/'.join([client.api.Endpoints.SUBTASKS, str(subtask_id)])
client.authenticated_request(endpoint, 'DELETE', params=params) | [
"def",
"delete_subtask",
"(",
"client",
",",
"subtask_id",
",",
"revision",
")",
":",
"params",
"=",
"{",
"'revision'",
":",
"int",
"(",
"revision",
")",
",",
"}",
"endpoint",
"=",
"'/'",
".",
"join",
"(",
"[",
"client",
".",
"api",
".",
"Endpoints",
... | Deletes the subtask with the given ID provided the given revision equals the revision the server has | [
"Deletes",
"the",
"subtask",
"with",
"the",
"given",
"ID",
"provided",
"the",
"given",
"revision",
"equals",
"the",
"revision",
"the",
"server",
"has"
] | 7106b6c13ca45ef4d56f805753c93258d5b822c2 | https://github.com/mieubrisse/wunderpy2/blob/7106b6c13ca45ef4d56f805753c93258d5b822c2/wunderpy2/subtasks_endpoint.py#L64-L70 | train |
bprinty/animation | animation/decorators.py | wait | def wait(animation='elipses', text='', speed=0.2):
"""
Decorator for adding wait animation to long running
functions.
Args:
animation (str, tuple): String reference to animation or tuple
with custom animation.
speed (float): Number of seconds each cycle of animation.
Examples:
>>> @animation.wait('bar')
>>> def long_running_function():
>>> ... 5 seconds later ...
>>> return
"""
def decorator(func):
func.animation = animation
func.speed = speed
func.text = text
@wraps(func)
def wrapper(*args, **kwargs):
animation = func.animation
text = func.text
if not isinstance(animation, (list, tuple)) and \
not hasattr(animations, animation):
text = animation if text == '' else text
animation = 'elipses'
wait = Wait(animation=animation, text=text, speed=func.speed)
wait.start()
try:
ret = func(*args, **kwargs)
finally:
wait.stop()
sys.stdout.write('\n')
return ret
return wrapper
return decorator | python | def wait(animation='elipses', text='', speed=0.2):
"""
Decorator for adding wait animation to long running
functions.
Args:
animation (str, tuple): String reference to animation or tuple
with custom animation.
speed (float): Number of seconds each cycle of animation.
Examples:
>>> @animation.wait('bar')
>>> def long_running_function():
>>> ... 5 seconds later ...
>>> return
"""
def decorator(func):
func.animation = animation
func.speed = speed
func.text = text
@wraps(func)
def wrapper(*args, **kwargs):
animation = func.animation
text = func.text
if not isinstance(animation, (list, tuple)) and \
not hasattr(animations, animation):
text = animation if text == '' else text
animation = 'elipses'
wait = Wait(animation=animation, text=text, speed=func.speed)
wait.start()
try:
ret = func(*args, **kwargs)
finally:
wait.stop()
sys.stdout.write('\n')
return ret
return wrapper
return decorator | [
"def",
"wait",
"(",
"animation",
"=",
"'elipses'",
",",
"text",
"=",
"''",
",",
"speed",
"=",
"0.2",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"func",
".",
"animation",
"=",
"animation",
"func",
".",
"speed",
"=",
"speed",
"func",
".",
"... | Decorator for adding wait animation to long running
functions.
Args:
animation (str, tuple): String reference to animation or tuple
with custom animation.
speed (float): Number of seconds each cycle of animation.
Examples:
>>> @animation.wait('bar')
>>> def long_running_function():
>>> ... 5 seconds later ...
>>> return | [
"Decorator",
"for",
"adding",
"wait",
"animation",
"to",
"long",
"running",
"functions",
"."
] | e5c87bf593de1f04e5ce341496b8dff7ce5296ee | https://github.com/bprinty/animation/blob/e5c87bf593de1f04e5ce341496b8dff7ce5296ee/animation/decorators.py#L105-L143 | train |
bprinty/animation | animation/decorators.py | simple_wait | def simple_wait(func):
"""
Decorator for adding simple text wait animation to
long running functions.
Examples:
>>> @animation.simple_wait
>>> def long_running_function():
>>> ... 5 seconds later ...
>>> return
"""
@wraps(func)
def wrapper(*args, **kwargs):
wait = Wait()
wait.start()
try:
ret = func(*args, **kwargs)
finally:
wait.stop()
sys.stdout.write('\n')
return ret
return wrapper | python | def simple_wait(func):
"""
Decorator for adding simple text wait animation to
long running functions.
Examples:
>>> @animation.simple_wait
>>> def long_running_function():
>>> ... 5 seconds later ...
>>> return
"""
@wraps(func)
def wrapper(*args, **kwargs):
wait = Wait()
wait.start()
try:
ret = func(*args, **kwargs)
finally:
wait.stop()
sys.stdout.write('\n')
return ret
return wrapper | [
"def",
"simple_wait",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"wait",
"=",
"Wait",
"(",
")",
"wait",
".",
"start",
"(",
")",
"try",
":",
"ret",
"=",
"func",
... | Decorator for adding simple text wait animation to
long running functions.
Examples:
>>> @animation.simple_wait
>>> def long_running_function():
>>> ... 5 seconds later ...
>>> return | [
"Decorator",
"for",
"adding",
"simple",
"text",
"wait",
"animation",
"to",
"long",
"running",
"functions",
"."
] | e5c87bf593de1f04e5ce341496b8dff7ce5296ee | https://github.com/bprinty/animation/blob/e5c87bf593de1f04e5ce341496b8dff7ce5296ee/animation/decorators.py#L146-L167 | train |
bprinty/animation | animation/decorators.py | Wait.start | def start(self):
"""
Start animation thread.
"""
self.thread = threading.Thread(target=self._animate)
self.thread.start()
return | python | def start(self):
"""
Start animation thread.
"""
self.thread = threading.Thread(target=self._animate)
self.thread.start()
return | [
"def",
"start",
"(",
"self",
")",
":",
"self",
".",
"thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_animate",
")",
"self",
".",
"thread",
".",
"start",
"(",
")",
"return"
] | Start animation thread. | [
"Start",
"animation",
"thread",
"."
] | e5c87bf593de1f04e5ce341496b8dff7ce5296ee | https://github.com/bprinty/animation/blob/e5c87bf593de1f04e5ce341496b8dff7ce5296ee/animation/decorators.py#L84-L90 | train |
bprinty/animation | animation/decorators.py | Wait.stop | def stop(self):
"""
Stop animation thread.
"""
time.sleep(self.speed)
self._count = -9999
sys.stdout.write(self.reverser + '\r\033[K\033[A')
sys.stdout.flush()
return | python | def stop(self):
"""
Stop animation thread.
"""
time.sleep(self.speed)
self._count = -9999
sys.stdout.write(self.reverser + '\r\033[K\033[A')
sys.stdout.flush()
return | [
"def",
"stop",
"(",
"self",
")",
":",
"time",
".",
"sleep",
"(",
"self",
".",
"speed",
")",
"self",
".",
"_count",
"=",
"-",
"9999",
"sys",
".",
"stdout",
".",
"write",
"(",
"self",
".",
"reverser",
"+",
"'\\r\\033[K\\033[A'",
")",
"sys",
".",
"std... | Stop animation thread. | [
"Stop",
"animation",
"thread",
"."
] | e5c87bf593de1f04e5ce341496b8dff7ce5296ee | https://github.com/bprinty/animation/blob/e5c87bf593de1f04e5ce341496b8dff7ce5296ee/animation/decorators.py#L92-L100 | train |
mattja/nsim | nsim/timeseries.py | merge | def merge(tup):
"""Merge several timeseries
Arguments:
tup: sequence of Timeseries, with the same shape except for axis 0
Returns:
Resulting merged timeseries which can have duplicate time points.
"""
if not all(tuple(ts.shape[1:] == tup[0].shape[1:] for ts in tup[1:])):
raise ValueError('Timeseries to merge must have compatible shapes')
indices = np.vstack(tuple(ts.tspan for ts in tup)).argsort()
return np.vstack((tup))[indices] | python | def merge(tup):
"""Merge several timeseries
Arguments:
tup: sequence of Timeseries, with the same shape except for axis 0
Returns:
Resulting merged timeseries which can have duplicate time points.
"""
if not all(tuple(ts.shape[1:] == tup[0].shape[1:] for ts in tup[1:])):
raise ValueError('Timeseries to merge must have compatible shapes')
indices = np.vstack(tuple(ts.tspan for ts in tup)).argsort()
return np.vstack((tup))[indices] | [
"def",
"merge",
"(",
"tup",
")",
":",
"if",
"not",
"all",
"(",
"tuple",
"(",
"ts",
".",
"shape",
"[",
"1",
":",
"]",
"==",
"tup",
"[",
"0",
"]",
".",
"shape",
"[",
"1",
":",
"]",
"for",
"ts",
"in",
"tup",
"[",
"1",
":",
"]",
")",
")",
"... | Merge several timeseries
Arguments:
tup: sequence of Timeseries, with the same shape except for axis 0
Returns:
Resulting merged timeseries which can have duplicate time points. | [
"Merge",
"several",
"timeseries",
"Arguments",
":",
"tup",
":",
"sequence",
"of",
"Timeseries",
"with",
"the",
"same",
"shape",
"except",
"for",
"axis",
"0",
"Returns",
":",
"Resulting",
"merged",
"timeseries",
"which",
"can",
"have",
"duplicate",
"time",
"poi... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/timeseries.py#L865-L875 | train |
mattja/nsim | nsim/timeseries.py | Timeseries.add_analyses | def add_analyses(cls, source):
"""Dynamically add new analysis methods to the Timeseries class.
Args:
source: Can be a function, module or the filename of a python file.
If a filename or a module is given, then all functions defined
inside not starting with _ will be added as methods.
The only restriction on the functions is that they can accept a
Timeseries as their first argument. So existing functions that
take a ndarray or array or even a list will usually also work.
"""
if isinstance(source, types.FunctionType):
_add_single_method(source.__name__, source)
else:
if isinstance(source, types.ModuleType):
mod = source
elif isinstance(source, types.StringTypes):
import os
import imp
path = os.path.abspath(source)
if os.path.isfile(path) and path[-3:] == '.py':
dir, file = os.path.split(path)
name = file[:-3]
module_info = imp.find_module(name, [dir])
mod = imp.load_module('nsim.' + name, *module_info)
elif (os.path.isdir(path) and
'__init__.py' in os.listdir(path)):
module_info = imp.find_module('__init__', [path])
name = os.path.basename(path)
mod = imp.load_module('nsim.' + name, *module_info)
else:
raise Error('"%s" is not a file or directory' % source)
else:
raise ValueError('`source` argument not a function or module')
for name, obj in mod.__dict__.items():
if name[0] != '_' and isinstance(obj, types.FunctionType):
cls._add_single_method(name, obj) | python | def add_analyses(cls, source):
"""Dynamically add new analysis methods to the Timeseries class.
Args:
source: Can be a function, module or the filename of a python file.
If a filename or a module is given, then all functions defined
inside not starting with _ will be added as methods.
The only restriction on the functions is that they can accept a
Timeseries as their first argument. So existing functions that
take a ndarray or array or even a list will usually also work.
"""
if isinstance(source, types.FunctionType):
_add_single_method(source.__name__, source)
else:
if isinstance(source, types.ModuleType):
mod = source
elif isinstance(source, types.StringTypes):
import os
import imp
path = os.path.abspath(source)
if os.path.isfile(path) and path[-3:] == '.py':
dir, file = os.path.split(path)
name = file[:-3]
module_info = imp.find_module(name, [dir])
mod = imp.load_module('nsim.' + name, *module_info)
elif (os.path.isdir(path) and
'__init__.py' in os.listdir(path)):
module_info = imp.find_module('__init__', [path])
name = os.path.basename(path)
mod = imp.load_module('nsim.' + name, *module_info)
else:
raise Error('"%s" is not a file or directory' % source)
else:
raise ValueError('`source` argument not a function or module')
for name, obj in mod.__dict__.items():
if name[0] != '_' and isinstance(obj, types.FunctionType):
cls._add_single_method(name, obj) | [
"def",
"add_analyses",
"(",
"cls",
",",
"source",
")",
":",
"if",
"isinstance",
"(",
"source",
",",
"types",
".",
"FunctionType",
")",
":",
"_add_single_method",
"(",
"source",
".",
"__name__",
",",
"source",
")",
"else",
":",
"if",
"isinstance",
"(",
"s... | Dynamically add new analysis methods to the Timeseries class.
Args:
source: Can be a function, module or the filename of a python file.
If a filename or a module is given, then all functions defined
inside not starting with _ will be added as methods.
The only restriction on the functions is that they can accept a
Timeseries as their first argument. So existing functions that
take a ndarray or array or even a list will usually also work. | [
"Dynamically",
"add",
"new",
"analysis",
"methods",
"to",
"the",
"Timeseries",
"class",
".",
"Args",
":",
"source",
":",
"Can",
"be",
"a",
"function",
"module",
"or",
"the",
"filename",
"of",
"a",
"python",
"file",
".",
"If",
"a",
"filename",
"or",
"a",
... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/timeseries.py#L119-L155 | train |
mattja/nsim | nsim/timeseries.py | Timeseries.absolute | def absolute(self):
"""Calculate the absolute value element-wise.
Returns:
absolute (Timeseries):
Absolute value. For complex input (a + b*j) gives sqrt(a**a + b**2)
"""
return Timeseries(np.absolute(self), self.tspan, self.labels) | python | def absolute(self):
"""Calculate the absolute value element-wise.
Returns:
absolute (Timeseries):
Absolute value. For complex input (a + b*j) gives sqrt(a**a + b**2)
"""
return Timeseries(np.absolute(self), self.tspan, self.labels) | [
"def",
"absolute",
"(",
"self",
")",
":",
"return",
"Timeseries",
"(",
"np",
".",
"absolute",
"(",
"self",
")",
",",
"self",
".",
"tspan",
",",
"self",
".",
"labels",
")"
] | Calculate the absolute value element-wise.
Returns:
absolute (Timeseries):
Absolute value. For complex input (a + b*j) gives sqrt(a**a + b**2) | [
"Calculate",
"the",
"absolute",
"value",
"element",
"-",
"wise",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/timeseries.py#L448-L455 | train |
mattja/nsim | nsim/timeseries.py | Timeseries.angle | def angle(self, deg=False):
"""Return the angle of the complex argument.
Args:
deg (bool, optional):
Return angle in degrees if True, radians if False (default).
Returns:
angle (Timeseries):
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
"""
if self.dtype.str[1] != 'c':
warnings.warn('angle() is intended for complex-valued timeseries',
RuntimeWarning, 1)
return Timeseries(np.angle(self, deg=deg), self.tspan, self.labels) | python | def angle(self, deg=False):
"""Return the angle of the complex argument.
Args:
deg (bool, optional):
Return angle in degrees if True, radians if False (default).
Returns:
angle (Timeseries):
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
"""
if self.dtype.str[1] != 'c':
warnings.warn('angle() is intended for complex-valued timeseries',
RuntimeWarning, 1)
return Timeseries(np.angle(self, deg=deg), self.tspan, self.labels) | [
"def",
"angle",
"(",
"self",
",",
"deg",
"=",
"False",
")",
":",
"if",
"self",
".",
"dtype",
".",
"str",
"[",
"1",
"]",
"!=",
"'c'",
":",
"warnings",
".",
"warn",
"(",
"'angle() is intended for complex-valued timeseries'",
",",
"RuntimeWarning",
",",
"1",
... | Return the angle of the complex argument.
Args:
deg (bool, optional):
Return angle in degrees if True, radians if False (default).
Returns:
angle (Timeseries):
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64. | [
"Return",
"the",
"angle",
"of",
"the",
"complex",
"argument",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/timeseries.py#L461-L476 | train |
mattja/nsim | nsim/timeseries.py | Timeseries.swapaxes | def swapaxes(self, axis1, axis2):
"""Interchange two axes of a Timeseries."""
if self.ndim <=1 or axis1 == axis2:
return self
ar = np.asarray(self).swapaxes(axis1, axis2)
if axis1 != 0 and axis2 != 0:
# then axis 0 is unaffected by the swap
labels = self.labels[:]
labels[axis1], labels[axis2] = labels[axis2], labels[axis1]
return Timeseries(ar, self.tspan, labels)
return ar | python | def swapaxes(self, axis1, axis2):
"""Interchange two axes of a Timeseries."""
if self.ndim <=1 or axis1 == axis2:
return self
ar = np.asarray(self).swapaxes(axis1, axis2)
if axis1 != 0 and axis2 != 0:
# then axis 0 is unaffected by the swap
labels = self.labels[:]
labels[axis1], labels[axis2] = labels[axis2], labels[axis1]
return Timeseries(ar, self.tspan, labels)
return ar | [
"def",
"swapaxes",
"(",
"self",
",",
"axis1",
",",
"axis2",
")",
":",
"if",
"self",
".",
"ndim",
"<=",
"1",
"or",
"axis1",
"==",
"axis2",
":",
"return",
"self",
"ar",
"=",
"np",
".",
"asarray",
"(",
"self",
")",
".",
"swapaxes",
"(",
"axis1",
","... | Interchange two axes of a Timeseries. | [
"Interchange",
"two",
"axes",
"of",
"a",
"Timeseries",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/timeseries.py#L487-L497 | train |
mattja/nsim | nsim/timeseries.py | Timeseries.transpose | def transpose(self, *axes):
"""Permute the dimensions of a Timeseries."""
if self.ndim <= 1:
return self
ar = np.asarray(self).transpose(*axes)
if axes[0] != 0:
# then axis 0 is unaffected by the transposition
newlabels = [self.labels[ax] for ax in axes]
return Timeseries(ar, self.tspan, newlabels)
else:
return ar | python | def transpose(self, *axes):
"""Permute the dimensions of a Timeseries."""
if self.ndim <= 1:
return self
ar = np.asarray(self).transpose(*axes)
if axes[0] != 0:
# then axis 0 is unaffected by the transposition
newlabels = [self.labels[ax] for ax in axes]
return Timeseries(ar, self.tspan, newlabels)
else:
return ar | [
"def",
"transpose",
"(",
"self",
",",
"*",
"axes",
")",
":",
"if",
"self",
".",
"ndim",
"<=",
"1",
":",
"return",
"self",
"ar",
"=",
"np",
".",
"asarray",
"(",
"self",
")",
".",
"transpose",
"(",
"*",
"axes",
")",
"if",
"axes",
"[",
"0",
"]",
... | Permute the dimensions of a Timeseries. | [
"Permute",
"the",
"dimensions",
"of",
"a",
"Timeseries",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/timeseries.py#L499-L509 | train |
mattja/nsim | nsim/timeseries.py | Timeseries.reshape | def reshape(self, newshape, order='C'):
"""If axis 0 is unaffected by the reshape, then returns a Timeseries,
otherwise returns an ndarray. Preserves labels of axis j only if all
axes<=j are unaffected by the reshape.
See ``numpy.ndarray.reshape()`` for more information
"""
oldshape = self.shape
ar = np.asarray(self).reshape(newshape, order=order)
if (newshape is -1 and len(oldshape) is 1 or
(isinstance(newshape, numbers.Integral) and
newshape == oldshape[0]) or
(isinstance(newshape, Sequence) and
(newshape[0] == oldshape[0] or
(newshape[0] is -1 and np.array(oldshape[1:]).prod() ==
np.array(newshape[1:]).prod())))):
# then axis 0 is unaffected by the reshape
newlabels = [None] * ar.ndim
i = 1
while i < ar.ndim and i < self.ndim and ar.shape[i] == oldshape[i]:
newlabels[i] = self.labels[i]
i += 1
return Timeseries(ar, self.tspan, newlabels)
else:
return ar | python | def reshape(self, newshape, order='C'):
"""If axis 0 is unaffected by the reshape, then returns a Timeseries,
otherwise returns an ndarray. Preserves labels of axis j only if all
axes<=j are unaffected by the reshape.
See ``numpy.ndarray.reshape()`` for more information
"""
oldshape = self.shape
ar = np.asarray(self).reshape(newshape, order=order)
if (newshape is -1 and len(oldshape) is 1 or
(isinstance(newshape, numbers.Integral) and
newshape == oldshape[0]) or
(isinstance(newshape, Sequence) and
(newshape[0] == oldshape[0] or
(newshape[0] is -1 and np.array(oldshape[1:]).prod() ==
np.array(newshape[1:]).prod())))):
# then axis 0 is unaffected by the reshape
newlabels = [None] * ar.ndim
i = 1
while i < ar.ndim and i < self.ndim and ar.shape[i] == oldshape[i]:
newlabels[i] = self.labels[i]
i += 1
return Timeseries(ar, self.tspan, newlabels)
else:
return ar | [
"def",
"reshape",
"(",
"self",
",",
"newshape",
",",
"order",
"=",
"'C'",
")",
":",
"oldshape",
"=",
"self",
".",
"shape",
"ar",
"=",
"np",
".",
"asarray",
"(",
"self",
")",
".",
"reshape",
"(",
"newshape",
",",
"order",
"=",
"order",
")",
"if",
... | If axis 0 is unaffected by the reshape, then returns a Timeseries,
otherwise returns an ndarray. Preserves labels of axis j only if all
axes<=j are unaffected by the reshape.
See ``numpy.ndarray.reshape()`` for more information | [
"If",
"axis",
"0",
"is",
"unaffected",
"by",
"the",
"reshape",
"then",
"returns",
"a",
"Timeseries",
"otherwise",
"returns",
"an",
"ndarray",
".",
"Preserves",
"labels",
"of",
"axis",
"j",
"only",
"if",
"all",
"axes<",
"=",
"j",
"are",
"unaffected",
"by",
... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/timeseries.py#L517-L540 | train |
mattja/nsim | nsim/timeseries.py | Timeseries.merge | def merge(self, ts):
"""Merge another timeseries with this one
Arguments:
ts (Timeseries): The two timeseries being merged must have the
same shape except for axis 0.
Returns:
Resulting merged timeseries which can have duplicate time points.
"""
if ts.shape[1:] != self.shape[1:]:
raise ValueError('Timeseries to merge must have compatible shapes')
indices = np.vstack((self.tspan, ts.tspan)).argsort()
return np.vstack((self, ts))[indices] | python | def merge(self, ts):
"""Merge another timeseries with this one
Arguments:
ts (Timeseries): The two timeseries being merged must have the
same shape except for axis 0.
Returns:
Resulting merged timeseries which can have duplicate time points.
"""
if ts.shape[1:] != self.shape[1:]:
raise ValueError('Timeseries to merge must have compatible shapes')
indices = np.vstack((self.tspan, ts.tspan)).argsort()
return np.vstack((self, ts))[indices] | [
"def",
"merge",
"(",
"self",
",",
"ts",
")",
":",
"if",
"ts",
".",
"shape",
"[",
"1",
":",
"]",
"!=",
"self",
".",
"shape",
"[",
"1",
":",
"]",
":",
"raise",
"ValueError",
"(",
"'Timeseries to merge must have compatible shapes'",
")",
"indices",
"=",
"... | Merge another timeseries with this one
Arguments:
ts (Timeseries): The two timeseries being merged must have the
same shape except for axis 0.
Returns:
Resulting merged timeseries which can have duplicate time points. | [
"Merge",
"another",
"timeseries",
"with",
"this",
"one",
"Arguments",
":",
"ts",
"(",
"Timeseries",
")",
":",
"The",
"two",
"timeseries",
"being",
"merged",
"must",
"have",
"the",
"same",
"shape",
"except",
"for",
"axis",
"0",
".",
"Returns",
":",
"Resulti... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/timeseries.py#L651-L662 | train |
mattja/nsim | nsim/timeseries.py | Timeseries.expand_dims | def expand_dims(self, axis):
"""Insert a new axis, at a given position in the array shape
Args:
axis (int): Position (amongst axes) where new axis is to be inserted.
"""
if axis == -1:
axis = self.ndim
array = np.expand_dims(self, axis)
if axis == 0:
# prepended an axis: no longer a Timeseries
return array
else:
new_labels = self.labels.insert(axis, None)
return Timeseries(array, self.tspan, new_labels) | python | def expand_dims(self, axis):
"""Insert a new axis, at a given position in the array shape
Args:
axis (int): Position (amongst axes) where new axis is to be inserted.
"""
if axis == -1:
axis = self.ndim
array = np.expand_dims(self, axis)
if axis == 0:
# prepended an axis: no longer a Timeseries
return array
else:
new_labels = self.labels.insert(axis, None)
return Timeseries(array, self.tspan, new_labels) | [
"def",
"expand_dims",
"(",
"self",
",",
"axis",
")",
":",
"if",
"axis",
"==",
"-",
"1",
":",
"axis",
"=",
"self",
".",
"ndim",
"array",
"=",
"np",
".",
"expand_dims",
"(",
"self",
",",
"axis",
")",
"if",
"axis",
"==",
"0",
":",
"# prepended an axis... | Insert a new axis, at a given position in the array shape
Args:
axis (int): Position (amongst axes) where new axis is to be inserted. | [
"Insert",
"a",
"new",
"axis",
"at",
"a",
"given",
"position",
"in",
"the",
"array",
"shape",
"Args",
":",
"axis",
"(",
"int",
")",
":",
"Position",
"(",
"amongst",
"axes",
")",
"where",
"new",
"axis",
"is",
"to",
"be",
"inserted",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/timeseries.py#L664-L677 | train |
mattja/nsim | nsim/timeseries.py | Timeseries.concatenate | def concatenate(self, tup, axis=0):
"""Join a sequence of Timeseries to this one
Args:
tup (sequence of Timeseries): timeseries to be joined with this one.
They must have the same shape as this Timeseries, except in the
dimension corresponding to `axis`.
axis (int, optional): The axis along which timeseries will be joined.
Returns:
res (Timeseries or ndarray)
"""
if not isinstance(tup, Sequence):
tup = (tup,)
if tup is (None,) or len(tup) is 0:
return self
tup = (self,) + tuple(tup)
new_array = np.concatenate(tup, axis)
if not all(hasattr(ts, 'tspan') and
hasattr(ts, 'labels') for ts in tup):
return new_array
if axis == 0:
starts = [ts.tspan[0] for ts in tup]
ends = [ts.tspan[-1] for ts in tup]
if not all(starts[i] > ends[i-1] for i in range(1, len(starts))):
# series being joined are not ordered in time. not Timeseries
return new_array
else:
new_tspan = np.concatenate([ts.tspan for ts in tup])
else:
new_tspan = self.tspan
new_labels = [None]
for ax in range(1, new_array.ndim):
if ax == axis:
axislabels = []
for ts in tup:
if ts.labels[axis] is None:
axislabels.extend('' * ts.shape[axis])
else:
axislabels.extend(ts.labels[axis])
if all(lab == '' for lab in axislabels):
new_labels.append(None)
else:
new_labels.append(axislabels)
else:
# non-concatenation axis
axlabels = tup[0].labels[ax]
if not all(ts.labels[ax] == axlabels for ts in tup[1:]):
# series to be joined do not agree on labels for this axis
axlabels = None
new_labels.append(axlabels)
return self.__new__(self.__class__, new_array, new_tspan, new_labels) | python | def concatenate(self, tup, axis=0):
"""Join a sequence of Timeseries to this one
Args:
tup (sequence of Timeseries): timeseries to be joined with this one.
They must have the same shape as this Timeseries, except in the
dimension corresponding to `axis`.
axis (int, optional): The axis along which timeseries will be joined.
Returns:
res (Timeseries or ndarray)
"""
if not isinstance(tup, Sequence):
tup = (tup,)
if tup is (None,) or len(tup) is 0:
return self
tup = (self,) + tuple(tup)
new_array = np.concatenate(tup, axis)
if not all(hasattr(ts, 'tspan') and
hasattr(ts, 'labels') for ts in tup):
return new_array
if axis == 0:
starts = [ts.tspan[0] for ts in tup]
ends = [ts.tspan[-1] for ts in tup]
if not all(starts[i] > ends[i-1] for i in range(1, len(starts))):
# series being joined are not ordered in time. not Timeseries
return new_array
else:
new_tspan = np.concatenate([ts.tspan for ts in tup])
else:
new_tspan = self.tspan
new_labels = [None]
for ax in range(1, new_array.ndim):
if ax == axis:
axislabels = []
for ts in tup:
if ts.labels[axis] is None:
axislabels.extend('' * ts.shape[axis])
else:
axislabels.extend(ts.labels[axis])
if all(lab == '' for lab in axislabels):
new_labels.append(None)
else:
new_labels.append(axislabels)
else:
# non-concatenation axis
axlabels = tup[0].labels[ax]
if not all(ts.labels[ax] == axlabels for ts in tup[1:]):
# series to be joined do not agree on labels for this axis
axlabels = None
new_labels.append(axlabels)
return self.__new__(self.__class__, new_array, new_tspan, new_labels) | [
"def",
"concatenate",
"(",
"self",
",",
"tup",
",",
"axis",
"=",
"0",
")",
":",
"if",
"not",
"isinstance",
"(",
"tup",
",",
"Sequence",
")",
":",
"tup",
"=",
"(",
"tup",
",",
")",
"if",
"tup",
"is",
"(",
"None",
",",
")",
"or",
"len",
"(",
"t... | Join a sequence of Timeseries to this one
Args:
tup (sequence of Timeseries): timeseries to be joined with this one.
They must have the same shape as this Timeseries, except in the
dimension corresponding to `axis`.
axis (int, optional): The axis along which timeseries will be joined.
Returns:
res (Timeseries or ndarray) | [
"Join",
"a",
"sequence",
"of",
"Timeseries",
"to",
"this",
"one",
"Args",
":",
"tup",
"(",
"sequence",
"of",
"Timeseries",
")",
":",
"timeseries",
"to",
"be",
"joined",
"with",
"this",
"one",
".",
"They",
"must",
"have",
"the",
"same",
"shape",
"as",
"... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/timeseries.py#L679-L728 | train |
mattja/nsim | nsim/timeseries.py | Timeseries.split | def split(self, indices_or_sections, axis=0):
"""Split a timeseries into multiple sub-timeseries"""
if not isinstance(indices_or_sections, numbers.Integral):
raise Error('splitting by array of indices is not yet implemented')
n = indices_or_sections
if self.shape[axis] % n != 0:
raise ValueError("Array split doesn't result in an equal division")
step = self.shape[axis] / n
pieces = []
start = 0
while start < self.shape[axis]:
stop = start + step
ix = [slice(None)] * self.ndim
ix[axis] = slice(start, stop)
ix = tuple(ix)
pieces.append(self[ix])
start += step
return pieces | python | def split(self, indices_or_sections, axis=0):
"""Split a timeseries into multiple sub-timeseries"""
if not isinstance(indices_or_sections, numbers.Integral):
raise Error('splitting by array of indices is not yet implemented')
n = indices_or_sections
if self.shape[axis] % n != 0:
raise ValueError("Array split doesn't result in an equal division")
step = self.shape[axis] / n
pieces = []
start = 0
while start < self.shape[axis]:
stop = start + step
ix = [slice(None)] * self.ndim
ix[axis] = slice(start, stop)
ix = tuple(ix)
pieces.append(self[ix])
start += step
return pieces | [
"def",
"split",
"(",
"self",
",",
"indices_or_sections",
",",
"axis",
"=",
"0",
")",
":",
"if",
"not",
"isinstance",
"(",
"indices_or_sections",
",",
"numbers",
".",
"Integral",
")",
":",
"raise",
"Error",
"(",
"'splitting by array of indices is not yet implemente... | Split a timeseries into multiple sub-timeseries | [
"Split",
"a",
"timeseries",
"into",
"multiple",
"sub",
"-",
"timeseries"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/timeseries.py#L730-L747 | train |
mattja/nsim | nsim/analysesN/plots.py | plot | def plot(dts, title=None, points=None, show=True):
"""Plot a distributed timeseries
Args:
dts (DistTimeseries)
title (str, optional)
points (int, optional): Limit the number of time points plotted.
If specified, will downsample to use this total number of time points,
and only fetch back the necessary points to the client for plotting.
Returns:
fig
"""
if points is not None and len(dts.tspan) > points:
# then downsample (TODO: use interpolation)
ix = np.linspace(0, len(dts.tspan) - 1, points).astype(np.int64)
dts = dts[ix, ...]
ts = distob.gather(dts)
return ts.plot(title, show) | python | def plot(dts, title=None, points=None, show=True):
"""Plot a distributed timeseries
Args:
dts (DistTimeseries)
title (str, optional)
points (int, optional): Limit the number of time points plotted.
If specified, will downsample to use this total number of time points,
and only fetch back the necessary points to the client for plotting.
Returns:
fig
"""
if points is not None and len(dts.tspan) > points:
# then downsample (TODO: use interpolation)
ix = np.linspace(0, len(dts.tspan) - 1, points).astype(np.int64)
dts = dts[ix, ...]
ts = distob.gather(dts)
return ts.plot(title, show) | [
"def",
"plot",
"(",
"dts",
",",
"title",
"=",
"None",
",",
"points",
"=",
"None",
",",
"show",
"=",
"True",
")",
":",
"if",
"points",
"is",
"not",
"None",
"and",
"len",
"(",
"dts",
".",
"tspan",
")",
">",
"points",
":",
"# then downsample (TODO: use... | Plot a distributed timeseries
Args:
dts (DistTimeseries)
title (str, optional)
points (int, optional): Limit the number of time points plotted.
If specified, will downsample to use this total number of time points,
and only fetch back the necessary points to the client for plotting.
Returns:
fig | [
"Plot",
"a",
"distributed",
"timeseries",
"Args",
":",
"dts",
"(",
"DistTimeseries",
")",
"title",
"(",
"str",
"optional",
")",
"points",
"(",
"int",
"optional",
")",
":",
"Limit",
"the",
"number",
"of",
"time",
"points",
"plotted",
".",
"If",
"specified",... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analysesN/plots.py#L20-L36 | train |
mattja/nsim | nsim/analysesN/plots.py | phase_histogram | def phase_histogram(dts, times=None, nbins=30, colormap=mpl.cm.Blues):
"""Plot a polar histogram of a phase variable's probability distribution
Args:
dts: DistTimeseries with axis 2 ranging over separate instances of an
oscillator (time series values are assumed to represent an angle)
times (float or sequence of floats): The target times at which
to plot the distribution
nbins (int): number of histogram bins
colormap
"""
if times is None:
times = np.linspace(dts.tspan[0], dts.tspan[-1], num=4)
elif isinstance(times, numbers.Number):
times = np.array([times], dtype=np.float64)
indices = distob.gather(dts.tspan.searchsorted(times))
if indices[-1] == len(dts.tspan):
indices[-1] -= 1
nplots = len(indices)
fig = plt.figure()
n = np.zeros((nbins, nplots))
for i in range(nplots):
index = indices[i]
time = dts.tspan[index]
phases = distob.gather(dts.mod2pi()[index, 0, :])
ax = fig.add_subplot(1, nplots, i + 1, projection='polar')
n[:,i], bins, patches = ax.hist(phases, nbins, (-np.pi, np.pi),
density=True, histtype='bar')
ax.set_title('time = %d s' % time)
ax.set_xticklabels(['0', r'$\frac{\pi}{4}$', r'$\frac{\pi}{2}$',
r'$\frac{3\pi}{4}$', r'$\pi$', r'$\frac{-3\pi}{4}$',
r'$\frac{-\pi}{2}$', r'$\frac{-\pi}{4}$'])
nmin, nmax = n.min(), n.max()
#TODO should make a custom colormap instead of reducing color dynamic range:
norm = mpl.colors.Normalize(1.2*nmin - 0.2*nmax,
0.6*nmin + 0.4*nmax, clip=True)
for i in range(nplots):
ax = fig.get_axes()[i]
ax.set_ylim(0, nmax)
for this_n, thispatch in zip(n[:,i], ax.patches):
color = colormap(norm(this_n))
thispatch.set_facecolor(color)
thispatch.set_edgecolor(color)
fig.show() | python | def phase_histogram(dts, times=None, nbins=30, colormap=mpl.cm.Blues):
"""Plot a polar histogram of a phase variable's probability distribution
Args:
dts: DistTimeseries with axis 2 ranging over separate instances of an
oscillator (time series values are assumed to represent an angle)
times (float or sequence of floats): The target times at which
to plot the distribution
nbins (int): number of histogram bins
colormap
"""
if times is None:
times = np.linspace(dts.tspan[0], dts.tspan[-1], num=4)
elif isinstance(times, numbers.Number):
times = np.array([times], dtype=np.float64)
indices = distob.gather(dts.tspan.searchsorted(times))
if indices[-1] == len(dts.tspan):
indices[-1] -= 1
nplots = len(indices)
fig = plt.figure()
n = np.zeros((nbins, nplots))
for i in range(nplots):
index = indices[i]
time = dts.tspan[index]
phases = distob.gather(dts.mod2pi()[index, 0, :])
ax = fig.add_subplot(1, nplots, i + 1, projection='polar')
n[:,i], bins, patches = ax.hist(phases, nbins, (-np.pi, np.pi),
density=True, histtype='bar')
ax.set_title('time = %d s' % time)
ax.set_xticklabels(['0', r'$\frac{\pi}{4}$', r'$\frac{\pi}{2}$',
r'$\frac{3\pi}{4}$', r'$\pi$', r'$\frac{-3\pi}{4}$',
r'$\frac{-\pi}{2}$', r'$\frac{-\pi}{4}$'])
nmin, nmax = n.min(), n.max()
#TODO should make a custom colormap instead of reducing color dynamic range:
norm = mpl.colors.Normalize(1.2*nmin - 0.2*nmax,
0.6*nmin + 0.4*nmax, clip=True)
for i in range(nplots):
ax = fig.get_axes()[i]
ax.set_ylim(0, nmax)
for this_n, thispatch in zip(n[:,i], ax.patches):
color = colormap(norm(this_n))
thispatch.set_facecolor(color)
thispatch.set_edgecolor(color)
fig.show() | [
"def",
"phase_histogram",
"(",
"dts",
",",
"times",
"=",
"None",
",",
"nbins",
"=",
"30",
",",
"colormap",
"=",
"mpl",
".",
"cm",
".",
"Blues",
")",
":",
"if",
"times",
"is",
"None",
":",
"times",
"=",
"np",
".",
"linspace",
"(",
"dts",
".",
"tsp... | Plot a polar histogram of a phase variable's probability distribution
Args:
dts: DistTimeseries with axis 2 ranging over separate instances of an
oscillator (time series values are assumed to represent an angle)
times (float or sequence of floats): The target times at which
to plot the distribution
nbins (int): number of histogram bins
colormap | [
"Plot",
"a",
"polar",
"histogram",
"of",
"a",
"phase",
"variable",
"s",
"probability",
"distribution",
"Args",
":",
"dts",
":",
"DistTimeseries",
"with",
"axis",
"2",
"ranging",
"over",
"separate",
"instances",
"of",
"an",
"oscillator",
"(",
"time",
"series",
... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analysesN/plots.py#L39-L81 | train |
mattja/nsim | nsim/analyses1/freq.py | psd | def psd(ts, nperseg=1500, noverlap=1200, plot=True):
"""plot Welch estimate of power spectral density, using nperseg samples per
segment, with noverlap samples overlap and Hamming window."""
ts = ts.squeeze()
if ts.ndim is 1:
ts = ts.reshape((-1, 1))
fs = (len(ts) - 1.0) / (ts.tspan[-1] - ts.tspan[0])
window = signal.hamming(nperseg, sym=False)
nfft = max(256, 2**np.int(np.log2(nperseg) + 1))
freqs, pxx = signal.welch(ts, fs, window, nperseg, noverlap, nfft,
detrend='linear', axis=0)
# Discard estimates for freq bins that are too low for the window size.
# (require two full cycles to fit within the window)
index = np.nonzero(freqs >= 2.0*fs/nperseg)[0][0]
if index > 0:
freqs = freqs[index:]
pxx = pxx[index:]
# Discard estimate for last freq bin as too high for Nyquist frequency:
freqs = freqs[:-1]
pxx = pxx[:-1]
if plot is True:
_plot_psd(ts, freqs, pxx)
return freqs, pxx | python | def psd(ts, nperseg=1500, noverlap=1200, plot=True):
"""plot Welch estimate of power spectral density, using nperseg samples per
segment, with noverlap samples overlap and Hamming window."""
ts = ts.squeeze()
if ts.ndim is 1:
ts = ts.reshape((-1, 1))
fs = (len(ts) - 1.0) / (ts.tspan[-1] - ts.tspan[0])
window = signal.hamming(nperseg, sym=False)
nfft = max(256, 2**np.int(np.log2(nperseg) + 1))
freqs, pxx = signal.welch(ts, fs, window, nperseg, noverlap, nfft,
detrend='linear', axis=0)
# Discard estimates for freq bins that are too low for the window size.
# (require two full cycles to fit within the window)
index = np.nonzero(freqs >= 2.0*fs/nperseg)[0][0]
if index > 0:
freqs = freqs[index:]
pxx = pxx[index:]
# Discard estimate for last freq bin as too high for Nyquist frequency:
freqs = freqs[:-1]
pxx = pxx[:-1]
if plot is True:
_plot_psd(ts, freqs, pxx)
return freqs, pxx | [
"def",
"psd",
"(",
"ts",
",",
"nperseg",
"=",
"1500",
",",
"noverlap",
"=",
"1200",
",",
"plot",
"=",
"True",
")",
":",
"ts",
"=",
"ts",
".",
"squeeze",
"(",
")",
"if",
"ts",
".",
"ndim",
"is",
"1",
":",
"ts",
"=",
"ts",
".",
"reshape",
"(",
... | plot Welch estimate of power spectral density, using nperseg samples per
segment, with noverlap samples overlap and Hamming window. | [
"plot",
"Welch",
"estimate",
"of",
"power",
"spectral",
"density",
"using",
"nperseg",
"samples",
"per",
"segment",
"with",
"noverlap",
"samples",
"overlap",
"and",
"Hamming",
"window",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/freq.py#L31-L53 | train |
mattja/nsim | nsim/analyses1/freq.py | lowpass | def lowpass(ts, cutoff_hz, order=3):
"""forward-backward butterworth low-pass filter"""
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
channels = ts.shape[1]
fs = (len(ts) - 1.0) / (ts.tspan[-1] - ts.tspan[0])
nyq = 0.5 * fs
cutoff = cutoff_hz/nyq
b, a = signal.butter(order, cutoff, btype='low')
if not np.all(np.abs(np.roots(a)) < 1.0):
raise ValueError('Filter will not be stable with these values.')
dtype = ts.dtype
output = np.zeros((len(ts), channels), dtype)
for i in range(channels):
output[:, i] = signal.filtfilt(b, a, ts[:, i])
if orig_ndim is 1:
output = output[:, 0]
return Timeseries(output, ts.tspan, labels=ts.labels) | python | def lowpass(ts, cutoff_hz, order=3):
"""forward-backward butterworth low-pass filter"""
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
channels = ts.shape[1]
fs = (len(ts) - 1.0) / (ts.tspan[-1] - ts.tspan[0])
nyq = 0.5 * fs
cutoff = cutoff_hz/nyq
b, a = signal.butter(order, cutoff, btype='low')
if not np.all(np.abs(np.roots(a)) < 1.0):
raise ValueError('Filter will not be stable with these values.')
dtype = ts.dtype
output = np.zeros((len(ts), channels), dtype)
for i in range(channels):
output[:, i] = signal.filtfilt(b, a, ts[:, i])
if orig_ndim is 1:
output = output[:, 0]
return Timeseries(output, ts.tspan, labels=ts.labels) | [
"def",
"lowpass",
"(",
"ts",
",",
"cutoff_hz",
",",
"order",
"=",
"3",
")",
":",
"orig_ndim",
"=",
"ts",
".",
"ndim",
"if",
"ts",
".",
"ndim",
"is",
"1",
":",
"ts",
"=",
"ts",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"channels",
"=",
"ts",
"... | forward-backward butterworth low-pass filter | [
"forward",
"-",
"backward",
"butterworth",
"low",
"-",
"pass",
"filter"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/freq.py#L98-L116 | train |
mattja/nsim | nsim/analyses1/freq.py | bandpass | def bandpass(ts, low_hz, high_hz, order=3):
"""forward-backward butterworth band-pass filter"""
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
channels = ts.shape[1]
fs = (len(ts) - 1.0) / (ts.tspan[-1] - ts.tspan[0])
nyq = 0.5 * fs
low = low_hz/nyq
high = high_hz/nyq
b, a = signal.butter(order, [low, high], btype='band')
if not np.all(np.abs(np.roots(a)) < 1.0):
raise ValueError('Filter will not be stable with these values.')
dtype = ts.dtype
output = np.zeros((len(ts), channels), dtype)
for i in range(channels):
output[:, i] = signal.filtfilt(b, a, ts[:, i])
if orig_ndim is 1:
output = output[:, 0]
return Timeseries(output, ts.tspan, labels=ts.labels) | python | def bandpass(ts, low_hz, high_hz, order=3):
"""forward-backward butterworth band-pass filter"""
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
channels = ts.shape[1]
fs = (len(ts) - 1.0) / (ts.tspan[-1] - ts.tspan[0])
nyq = 0.5 * fs
low = low_hz/nyq
high = high_hz/nyq
b, a = signal.butter(order, [low, high], btype='band')
if not np.all(np.abs(np.roots(a)) < 1.0):
raise ValueError('Filter will not be stable with these values.')
dtype = ts.dtype
output = np.zeros((len(ts), channels), dtype)
for i in range(channels):
output[:, i] = signal.filtfilt(b, a, ts[:, i])
if orig_ndim is 1:
output = output[:, 0]
return Timeseries(output, ts.tspan, labels=ts.labels) | [
"def",
"bandpass",
"(",
"ts",
",",
"low_hz",
",",
"high_hz",
",",
"order",
"=",
"3",
")",
":",
"orig_ndim",
"=",
"ts",
".",
"ndim",
"if",
"ts",
".",
"ndim",
"is",
"1",
":",
"ts",
"=",
"ts",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"channels",
... | forward-backward butterworth band-pass filter | [
"forward",
"-",
"backward",
"butterworth",
"band",
"-",
"pass",
"filter"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/freq.py#L140-L159 | train |
mattja/nsim | nsim/analyses1/freq.py | notch | def notch(ts, freq_hz, bandwidth_hz=1.0):
"""notch filter to remove remove a particular frequency
Adapted from code by Sturla Molden
"""
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
channels = ts.shape[1]
fs = (len(ts) - 1.0) / (ts.tspan[-1] - ts.tspan[0])
nyq = 0.5 * fs
freq = freq_hz/nyq
bandwidth = bandwidth_hz/nyq
R = 1.0 - 3.0*(bandwidth/2.0)
K = ((1.0 - 2.0*R*np.cos(np.pi*freq) + R**2) /
(2.0 - 2.0*np.cos(np.pi*freq)))
b, a = np.zeros(3), np.zeros(3)
a[0] = 1.0
a[1] = -2.0*R*np.cos(np.pi*freq)
a[2] = R**2
b[0] = K
b[1] = -2*K*np.cos(np.pi*freq)
b[2] = K
if not np.all(np.abs(np.roots(a)) < 1.0):
raise ValueError('Filter will not be stable with these values.')
dtype = ts.dtype
output = np.zeros((len(ts), channels), dtype)
for i in range(channels):
output[:, i] = signal.filtfilt(b, a, ts[:, i])
if orig_ndim is 1:
output = output[:, 0]
return Timeseries(output, ts.tspan, labels=ts.labels) | python | def notch(ts, freq_hz, bandwidth_hz=1.0):
"""notch filter to remove remove a particular frequency
Adapted from code by Sturla Molden
"""
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
channels = ts.shape[1]
fs = (len(ts) - 1.0) / (ts.tspan[-1] - ts.tspan[0])
nyq = 0.5 * fs
freq = freq_hz/nyq
bandwidth = bandwidth_hz/nyq
R = 1.0 - 3.0*(bandwidth/2.0)
K = ((1.0 - 2.0*R*np.cos(np.pi*freq) + R**2) /
(2.0 - 2.0*np.cos(np.pi*freq)))
b, a = np.zeros(3), np.zeros(3)
a[0] = 1.0
a[1] = -2.0*R*np.cos(np.pi*freq)
a[2] = R**2
b[0] = K
b[1] = -2*K*np.cos(np.pi*freq)
b[2] = K
if not np.all(np.abs(np.roots(a)) < 1.0):
raise ValueError('Filter will not be stable with these values.')
dtype = ts.dtype
output = np.zeros((len(ts), channels), dtype)
for i in range(channels):
output[:, i] = signal.filtfilt(b, a, ts[:, i])
if orig_ndim is 1:
output = output[:, 0]
return Timeseries(output, ts.tspan, labels=ts.labels) | [
"def",
"notch",
"(",
"ts",
",",
"freq_hz",
",",
"bandwidth_hz",
"=",
"1.0",
")",
":",
"orig_ndim",
"=",
"ts",
".",
"ndim",
"if",
"ts",
".",
"ndim",
"is",
"1",
":",
"ts",
"=",
"ts",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"channels",
"=",
"ts"... | notch filter to remove remove a particular frequency
Adapted from code by Sturla Molden | [
"notch",
"filter",
"to",
"remove",
"remove",
"a",
"particular",
"frequency",
"Adapted",
"from",
"code",
"by",
"Sturla",
"Molden"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/freq.py#L162-L192 | train |
mattja/nsim | nsim/analyses1/freq.py | hilbert | def hilbert(ts):
"""Analytic signal, using the Hilbert transform"""
output = signal.hilbert(signal.detrend(ts, axis=0), axis=0)
return Timeseries(output, ts.tspan, labels=ts.labels) | python | def hilbert(ts):
"""Analytic signal, using the Hilbert transform"""
output = signal.hilbert(signal.detrend(ts, axis=0), axis=0)
return Timeseries(output, ts.tspan, labels=ts.labels) | [
"def",
"hilbert",
"(",
"ts",
")",
":",
"output",
"=",
"signal",
".",
"hilbert",
"(",
"signal",
".",
"detrend",
"(",
"ts",
",",
"axis",
"=",
"0",
")",
",",
"axis",
"=",
"0",
")",
"return",
"Timeseries",
"(",
"output",
",",
"ts",
".",
"tspan",
",",... | Analytic signal, using the Hilbert transform | [
"Analytic",
"signal",
"using",
"the",
"Hilbert",
"transform"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/freq.py#L195-L198 | train |
mattja/nsim | nsim/analyses1/freq.py | hilbert_amplitude | def hilbert_amplitude(ts):
"""Amplitude of the analytic signal, using the Hilbert transform"""
output = np.abs(signal.hilbert(signal.detrend(ts, axis=0), axis=0))
return Timeseries(output, ts.tspan, labels=ts.labels) | python | def hilbert_amplitude(ts):
"""Amplitude of the analytic signal, using the Hilbert transform"""
output = np.abs(signal.hilbert(signal.detrend(ts, axis=0), axis=0))
return Timeseries(output, ts.tspan, labels=ts.labels) | [
"def",
"hilbert_amplitude",
"(",
"ts",
")",
":",
"output",
"=",
"np",
".",
"abs",
"(",
"signal",
".",
"hilbert",
"(",
"signal",
".",
"detrend",
"(",
"ts",
",",
"axis",
"=",
"0",
")",
",",
"axis",
"=",
"0",
")",
")",
"return",
"Timeseries",
"(",
"... | Amplitude of the analytic signal, using the Hilbert transform | [
"Amplitude",
"of",
"the",
"analytic",
"signal",
"using",
"the",
"Hilbert",
"transform"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/freq.py#L201-L204 | train |
mattja/nsim | nsim/analyses1/freq.py | hilbert_phase | def hilbert_phase(ts):
"""Phase of the analytic signal, using the Hilbert transform"""
output = np.angle(signal.hilbert(signal.detrend(ts, axis=0), axis=0))
return Timeseries(output, ts.tspan, labels=ts.labels) | python | def hilbert_phase(ts):
"""Phase of the analytic signal, using the Hilbert transform"""
output = np.angle(signal.hilbert(signal.detrend(ts, axis=0), axis=0))
return Timeseries(output, ts.tspan, labels=ts.labels) | [
"def",
"hilbert_phase",
"(",
"ts",
")",
":",
"output",
"=",
"np",
".",
"angle",
"(",
"signal",
".",
"hilbert",
"(",
"signal",
".",
"detrend",
"(",
"ts",
",",
"axis",
"=",
"0",
")",
",",
"axis",
"=",
"0",
")",
")",
"return",
"Timeseries",
"(",
"ou... | Phase of the analytic signal, using the Hilbert transform | [
"Phase",
"of",
"the",
"analytic",
"signal",
"using",
"the",
"Hilbert",
"transform"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/freq.py#L207-L210 | train |
mattja/nsim | nsim/analyses1/freq.py | cwt | def cwt(ts, freqs=np.logspace(0, 2), wavelet=cwtmorlet, plot=True):
"""Continuous wavelet transform
Note the full results can use a huge amount of memory at 64-bit precision
Args:
ts: Timeseries of m variables, shape (n, m). Assumed constant timestep.
freqs: list of frequencies (in Hz) to use for the tranform.
(default is 50 frequency bins logarithmic from 1Hz to 100Hz)
wavelet: the wavelet to use. may be complex. see scipy.signal.wavelets
plot: whether to plot time-resolved power spectrum
Returns:
coefs: Continuous wavelet transform output array, shape (n,len(freqs),m)
"""
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
channels = ts.shape[1]
fs = (len(ts) - 1.0) / (1.0*ts.tspan[-1] - ts.tspan[0])
x = signal.detrend(ts, axis=0)
dtype = wavelet(fs/freqs[0], fs/freqs[0]).dtype
coefs = np.zeros((len(ts), len(freqs), channels), dtype)
for i in range(channels):
coefs[:, :, i] = roughcwt(x[:, i], cwtmorlet, fs/freqs).T
if plot:
_plot_cwt(ts, coefs, freqs)
if orig_ndim is 1:
coefs = coefs[:, :, 0]
return coefs | python | def cwt(ts, freqs=np.logspace(0, 2), wavelet=cwtmorlet, plot=True):
"""Continuous wavelet transform
Note the full results can use a huge amount of memory at 64-bit precision
Args:
ts: Timeseries of m variables, shape (n, m). Assumed constant timestep.
freqs: list of frequencies (in Hz) to use for the tranform.
(default is 50 frequency bins logarithmic from 1Hz to 100Hz)
wavelet: the wavelet to use. may be complex. see scipy.signal.wavelets
plot: whether to plot time-resolved power spectrum
Returns:
coefs: Continuous wavelet transform output array, shape (n,len(freqs),m)
"""
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
channels = ts.shape[1]
fs = (len(ts) - 1.0) / (1.0*ts.tspan[-1] - ts.tspan[0])
x = signal.detrend(ts, axis=0)
dtype = wavelet(fs/freqs[0], fs/freqs[0]).dtype
coefs = np.zeros((len(ts), len(freqs), channels), dtype)
for i in range(channels):
coefs[:, :, i] = roughcwt(x[:, i], cwtmorlet, fs/freqs).T
if plot:
_plot_cwt(ts, coefs, freqs)
if orig_ndim is 1:
coefs = coefs[:, :, 0]
return coefs | [
"def",
"cwt",
"(",
"ts",
",",
"freqs",
"=",
"np",
".",
"logspace",
"(",
"0",
",",
"2",
")",
",",
"wavelet",
"=",
"cwtmorlet",
",",
"plot",
"=",
"True",
")",
":",
"orig_ndim",
"=",
"ts",
".",
"ndim",
"if",
"ts",
".",
"ndim",
"is",
"1",
":",
"t... | Continuous wavelet transform
Note the full results can use a huge amount of memory at 64-bit precision
Args:
ts: Timeseries of m variables, shape (n, m). Assumed constant timestep.
freqs: list of frequencies (in Hz) to use for the tranform.
(default is 50 frequency bins logarithmic from 1Hz to 100Hz)
wavelet: the wavelet to use. may be complex. see scipy.signal.wavelets
plot: whether to plot time-resolved power spectrum
Returns:
coefs: Continuous wavelet transform output array, shape (n,len(freqs),m) | [
"Continuous",
"wavelet",
"transform",
"Note",
"the",
"full",
"results",
"can",
"use",
"a",
"huge",
"amount",
"of",
"memory",
"at",
"64",
"-",
"bit",
"precision"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/freq.py#L213-L241 | train |
mattja/nsim | nsim/analyses1/freq.py | cwt_distributed | def cwt_distributed(ts, freqs=np.logspace(0, 2), wavelet=cwtmorlet, plot=True):
"""Continuous wavelet transform using distributed computation.
(Currently just splits the data by channel. TODO split it further.)
Note: this function requires an IPython cluster to be started first.
Args:
ts: Timeseries of m variables, shape (n, m). Assumed constant timestep.
freqs: list of frequencies (in Hz) to use for the tranform.
(default is 50 frequency bins logarithmic from 1Hz to 100Hz)
wavelet: the wavelet to use. may be complex. see scipy.signal.wavelets
plot: whether to plot time-resolved power spectrum
Returns:
coefs: Continuous wavelet transform output array, shape (n,len(freqs),m)
"""
if ts.ndim is 1 or ts.shape[1] is 1:
return cwt(ts, freqs, wavelet, plot)
import distob
vcwt = distob.vectorize(cwt)
coefs = vcwt(ts, freqs, wavelet, plot=False)
if plot:
_plot_cwt(ts, coefs, freqs)
return coefs | python | def cwt_distributed(ts, freqs=np.logspace(0, 2), wavelet=cwtmorlet, plot=True):
"""Continuous wavelet transform using distributed computation.
(Currently just splits the data by channel. TODO split it further.)
Note: this function requires an IPython cluster to be started first.
Args:
ts: Timeseries of m variables, shape (n, m). Assumed constant timestep.
freqs: list of frequencies (in Hz) to use for the tranform.
(default is 50 frequency bins logarithmic from 1Hz to 100Hz)
wavelet: the wavelet to use. may be complex. see scipy.signal.wavelets
plot: whether to plot time-resolved power spectrum
Returns:
coefs: Continuous wavelet transform output array, shape (n,len(freqs),m)
"""
if ts.ndim is 1 or ts.shape[1] is 1:
return cwt(ts, freqs, wavelet, plot)
import distob
vcwt = distob.vectorize(cwt)
coefs = vcwt(ts, freqs, wavelet, plot=False)
if plot:
_plot_cwt(ts, coefs, freqs)
return coefs | [
"def",
"cwt_distributed",
"(",
"ts",
",",
"freqs",
"=",
"np",
".",
"logspace",
"(",
"0",
",",
"2",
")",
",",
"wavelet",
"=",
"cwtmorlet",
",",
"plot",
"=",
"True",
")",
":",
"if",
"ts",
".",
"ndim",
"is",
"1",
"or",
"ts",
".",
"shape",
"[",
"1"... | Continuous wavelet transform using distributed computation.
(Currently just splits the data by channel. TODO split it further.)
Note: this function requires an IPython cluster to be started first.
Args:
ts: Timeseries of m variables, shape (n, m). Assumed constant timestep.
freqs: list of frequencies (in Hz) to use for the tranform.
(default is 50 frequency bins logarithmic from 1Hz to 100Hz)
wavelet: the wavelet to use. may be complex. see scipy.signal.wavelets
plot: whether to plot time-resolved power spectrum
Returns:
coefs: Continuous wavelet transform output array, shape (n,len(freqs),m) | [
"Continuous",
"wavelet",
"transform",
"using",
"distributed",
"computation",
".",
"(",
"Currently",
"just",
"splits",
"the",
"data",
"by",
"channel",
".",
"TODO",
"split",
"it",
"further",
".",
")",
"Note",
":",
"this",
"function",
"requires",
"an",
"IPython",... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/freq.py#L244-L266 | train |
mattja/nsim | nsim/analyses1/freq.py | _plot_cwt | def _plot_cwt(ts, coefs, freqs, tsize=1024, fsize=512):
"""Plot time resolved power spectral density from cwt results
Args:
ts: the original Timeseries
coefs: continuous wavelet transform coefficients as calculated by cwt()
freqs: list of frequencies (in Hz) corresponding to coefs.
tsize, fsize: size of the plot (time axis and frequency axis, in pixels)
"""
import matplotlib.style
import matplotlib as mpl
mpl.style.use('classic')
import matplotlib.pyplot as plt
from scipy import interpolate
channels = ts.shape[1]
fig = plt.figure()
for i in range(channels):
rect = (0.1, 0.85*(channels - i - 1)/channels + 0.1,
0.8, 0.85/channels)
ax = fig.add_axes(rect)
logpowers = np.log((coefs[:, :, i] * coefs[:, :, i].conj()).real)
tmin, tmax = ts.tspan[0], ts.tspan[-1]
fmin, fmax = freqs[0], freqs[-1]
tgrid, fgrid = np.mgrid[tmin:tmax:tsize*1j, fmin:fmax:fsize*1j]
gd = interpolate.interpn((ts.tspan, freqs), logpowers,
(tgrid, fgrid)).T
ax.imshow(gd, cmap='gnuplot2', aspect='auto', origin='lower',
extent=(tmin, tmax, fmin, fmax))
ax.set_ylabel('freq (Hz)')
fig.axes[0].set_title(u'log(power spectral density)')
fig.axes[channels - 1].set_xlabel('time (s)')
fig.show() | python | def _plot_cwt(ts, coefs, freqs, tsize=1024, fsize=512):
"""Plot time resolved power spectral density from cwt results
Args:
ts: the original Timeseries
coefs: continuous wavelet transform coefficients as calculated by cwt()
freqs: list of frequencies (in Hz) corresponding to coefs.
tsize, fsize: size of the plot (time axis and frequency axis, in pixels)
"""
import matplotlib.style
import matplotlib as mpl
mpl.style.use('classic')
import matplotlib.pyplot as plt
from scipy import interpolate
channels = ts.shape[1]
fig = plt.figure()
for i in range(channels):
rect = (0.1, 0.85*(channels - i - 1)/channels + 0.1,
0.8, 0.85/channels)
ax = fig.add_axes(rect)
logpowers = np.log((coefs[:, :, i] * coefs[:, :, i].conj()).real)
tmin, tmax = ts.tspan[0], ts.tspan[-1]
fmin, fmax = freqs[0], freqs[-1]
tgrid, fgrid = np.mgrid[tmin:tmax:tsize*1j, fmin:fmax:fsize*1j]
gd = interpolate.interpn((ts.tspan, freqs), logpowers,
(tgrid, fgrid)).T
ax.imshow(gd, cmap='gnuplot2', aspect='auto', origin='lower',
extent=(tmin, tmax, fmin, fmax))
ax.set_ylabel('freq (Hz)')
fig.axes[0].set_title(u'log(power spectral density)')
fig.axes[channels - 1].set_xlabel('time (s)')
fig.show() | [
"def",
"_plot_cwt",
"(",
"ts",
",",
"coefs",
",",
"freqs",
",",
"tsize",
"=",
"1024",
",",
"fsize",
"=",
"512",
")",
":",
"import",
"matplotlib",
".",
"style",
"import",
"matplotlib",
"as",
"mpl",
"mpl",
".",
"style",
".",
"use",
"(",
"'classic'",
")... | Plot time resolved power spectral density from cwt results
Args:
ts: the original Timeseries
coefs: continuous wavelet transform coefficients as calculated by cwt()
freqs: list of frequencies (in Hz) corresponding to coefs.
tsize, fsize: size of the plot (time axis and frequency axis, in pixels) | [
"Plot",
"time",
"resolved",
"power",
"spectral",
"density",
"from",
"cwt",
"results",
"Args",
":",
"ts",
":",
"the",
"original",
"Timeseries",
"coefs",
":",
"continuous",
"wavelet",
"transform",
"coefficients",
"as",
"calculated",
"by",
"cwt",
"()",
"freqs",
"... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/freq.py#L269-L299 | train |
mattja/nsim | nsim/analysesN/misc.py | first_return_times | def first_return_times(dts, c=None, d=0.0):
"""For an ensemble of time series, return the set of all time intervals
between successive returns to value c for all instances in the ensemble.
If c is not given, the default is the mean across all times and across all
time series in the ensemble.
Args:
dts (DistTimeseries)
c (float): Optional target value (default is the ensemble mean value)
d (float): Optional min distance from c to be attained between returns
Returns:
array of time intervals (Can take the mean of these to estimate the
expected first return time for the whole ensemble)
"""
if c is None:
c = dts.mean()
vmrt = distob.vectorize(analyses1.first_return_times)
all_intervals = vmrt(dts, c, d)
if hasattr(type(all_intervals), '__array_interface__'):
return np.ravel(all_intervals)
else:
return np.hstack([distob.gather(ilist) for ilist in all_intervals]) | python | def first_return_times(dts, c=None, d=0.0):
"""For an ensemble of time series, return the set of all time intervals
between successive returns to value c for all instances in the ensemble.
If c is not given, the default is the mean across all times and across all
time series in the ensemble.
Args:
dts (DistTimeseries)
c (float): Optional target value (default is the ensemble mean value)
d (float): Optional min distance from c to be attained between returns
Returns:
array of time intervals (Can take the mean of these to estimate the
expected first return time for the whole ensemble)
"""
if c is None:
c = dts.mean()
vmrt = distob.vectorize(analyses1.first_return_times)
all_intervals = vmrt(dts, c, d)
if hasattr(type(all_intervals), '__array_interface__'):
return np.ravel(all_intervals)
else:
return np.hstack([distob.gather(ilist) for ilist in all_intervals]) | [
"def",
"first_return_times",
"(",
"dts",
",",
"c",
"=",
"None",
",",
"d",
"=",
"0.0",
")",
":",
"if",
"c",
"is",
"None",
":",
"c",
"=",
"dts",
".",
"mean",
"(",
")",
"vmrt",
"=",
"distob",
".",
"vectorize",
"(",
"analyses1",
".",
"first_return_time... | For an ensemble of time series, return the set of all time intervals
between successive returns to value c for all instances in the ensemble.
If c is not given, the default is the mean across all times and across all
time series in the ensemble.
Args:
dts (DistTimeseries)
c (float): Optional target value (default is the ensemble mean value)
d (float): Optional min distance from c to be attained between returns
Returns:
array of time intervals (Can take the mean of these to estimate the
expected first return time for the whole ensemble) | [
"For",
"an",
"ensemble",
"of",
"time",
"series",
"return",
"the",
"set",
"of",
"all",
"time",
"intervals",
"between",
"successive",
"returns",
"to",
"value",
"c",
"for",
"all",
"instances",
"in",
"the",
"ensemble",
".",
"If",
"c",
"is",
"not",
"given",
"... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analysesN/misc.py#L16-L40 | train |
mattja/nsim | nsim/analysesN/epochs.py | variability_fp | def variability_fp(ts, freqs=None, ncycles=6, plot=True):
"""Example variability function.
Gives two continuous, time-resolved measures of the variability of a
time series, ranging between -1 and 1.
The two measures are based on variance of the centroid frequency and
variance of the height of the spectral peak, respectively.
(Centroid frequency meaning the power-weighted average frequency)
These measures are calculated over sliding time windows of variable size.
See also: Blenkinsop et al. (2012) The dynamic evolution of focal-onset
epilepsies - combining theoretical and clinical observations
Args:
ts Timeseries of m variables, shape (n, m). Assumed constant timestep.
freqs (optional) List of frequencies to examine. If None, defaults to
50 frequency bands ranging 1Hz to 60Hz, logarithmically spaced.
ncycles Window size, in number of cycles of the centroid frequency.
plot bool Whether to display the output
Returns:
variability Timeseries of shape (n, m, 2)
variability[:, :, 0] gives a measure of variability
between -1 and 1 based on variance of centroid frequency.
variability[:, :, 1] gives a measure of variability
between -1 and 1 based on variance of maximum power.
"""
if ts.ndim <= 2:
return analyses1.variability_fp(ts, freqs, ncycles, plot)
else:
return distob.vectorize(analyses1.variability_fp)(
ts, freqs, ncycles, plot) | python | def variability_fp(ts, freqs=None, ncycles=6, plot=True):
"""Example variability function.
Gives two continuous, time-resolved measures of the variability of a
time series, ranging between -1 and 1.
The two measures are based on variance of the centroid frequency and
variance of the height of the spectral peak, respectively.
(Centroid frequency meaning the power-weighted average frequency)
These measures are calculated over sliding time windows of variable size.
See also: Blenkinsop et al. (2012) The dynamic evolution of focal-onset
epilepsies - combining theoretical and clinical observations
Args:
ts Timeseries of m variables, shape (n, m). Assumed constant timestep.
freqs (optional) List of frequencies to examine. If None, defaults to
50 frequency bands ranging 1Hz to 60Hz, logarithmically spaced.
ncycles Window size, in number of cycles of the centroid frequency.
plot bool Whether to display the output
Returns:
variability Timeseries of shape (n, m, 2)
variability[:, :, 0] gives a measure of variability
between -1 and 1 based on variance of centroid frequency.
variability[:, :, 1] gives a measure of variability
between -1 and 1 based on variance of maximum power.
"""
if ts.ndim <= 2:
return analyses1.variability_fp(ts, freqs, ncycles, plot)
else:
return distob.vectorize(analyses1.variability_fp)(
ts, freqs, ncycles, plot) | [
"def",
"variability_fp",
"(",
"ts",
",",
"freqs",
"=",
"None",
",",
"ncycles",
"=",
"6",
",",
"plot",
"=",
"True",
")",
":",
"if",
"ts",
".",
"ndim",
"<=",
"2",
":",
"return",
"analyses1",
".",
"variability_fp",
"(",
"ts",
",",
"freqs",
",",
"ncycl... | Example variability function.
Gives two continuous, time-resolved measures of the variability of a
time series, ranging between -1 and 1.
The two measures are based on variance of the centroid frequency and
variance of the height of the spectral peak, respectively.
(Centroid frequency meaning the power-weighted average frequency)
These measures are calculated over sliding time windows of variable size.
See also: Blenkinsop et al. (2012) The dynamic evolution of focal-onset
epilepsies - combining theoretical and clinical observations
Args:
ts Timeseries of m variables, shape (n, m). Assumed constant timestep.
freqs (optional) List of frequencies to examine. If None, defaults to
50 frequency bands ranging 1Hz to 60Hz, logarithmically spaced.
ncycles Window size, in number of cycles of the centroid frequency.
plot bool Whether to display the output
Returns:
variability Timeseries of shape (n, m, 2)
variability[:, :, 0] gives a measure of variability
between -1 and 1 based on variance of centroid frequency.
variability[:, :, 1] gives a measure of variability
between -1 and 1 based on variance of maximum power. | [
"Example",
"variability",
"function",
".",
"Gives",
"two",
"continuous",
"time",
"-",
"resolved",
"measures",
"of",
"the",
"variability",
"of",
"a",
"time",
"series",
"ranging",
"between",
"-",
"1",
"and",
"1",
".",
"The",
"two",
"measures",
"are",
"based",
... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analysesN/epochs.py#L29-L57 | train |
mattja/nsim | nsim/analysesN/epochs.py | epochs | def epochs(ts, variability=None, threshold=0.0, minlength=1.0, plot=True):
"""Identify "stationary" epochs within a time series, based on a
continuous measure of variability.
Epochs are defined to contain the points of minimal variability, and to
extend as wide as possible with variability not exceeding the threshold.
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m, q), giving q scalar
measures of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
Epochs require the mean of these to be below the threshold.
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
plot bool Whether to display the output
Returns: (variability, allchannels_epochs)
variability: as above
allchannels_epochs: (list of) list of tuples
For each variable, a list of tuples (start, end) that give the
starting and ending indices of stationary epochs.
(epochs are inclusive of start point but not the end point)
"""
if ts.ndim <= 2:
return analyses1.epochs_distributed(
ts, variability, threshold, minlength, plot)
else:
return distob.vectorize(analyses1.epochs)(
ts, variability, threshold, minlength, plot) | python | def epochs(ts, variability=None, threshold=0.0, minlength=1.0, plot=True):
"""Identify "stationary" epochs within a time series, based on a
continuous measure of variability.
Epochs are defined to contain the points of minimal variability, and to
extend as wide as possible with variability not exceeding the threshold.
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m, q), giving q scalar
measures of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
Epochs require the mean of these to be below the threshold.
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
plot bool Whether to display the output
Returns: (variability, allchannels_epochs)
variability: as above
allchannels_epochs: (list of) list of tuples
For each variable, a list of tuples (start, end) that give the
starting and ending indices of stationary epochs.
(epochs are inclusive of start point but not the end point)
"""
if ts.ndim <= 2:
return analyses1.epochs_distributed(
ts, variability, threshold, minlength, plot)
else:
return distob.vectorize(analyses1.epochs)(
ts, variability, threshold, minlength, plot) | [
"def",
"epochs",
"(",
"ts",
",",
"variability",
"=",
"None",
",",
"threshold",
"=",
"0.0",
",",
"minlength",
"=",
"1.0",
",",
"plot",
"=",
"True",
")",
":",
"if",
"ts",
".",
"ndim",
"<=",
"2",
":",
"return",
"analyses1",
".",
"epochs_distributed",
"(... | Identify "stationary" epochs within a time series, based on a
continuous measure of variability.
Epochs are defined to contain the points of minimal variability, and to
extend as wide as possible with variability not exceeding the threshold.
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m, q), giving q scalar
measures of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
Epochs require the mean of these to be below the threshold.
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
plot bool Whether to display the output
Returns: (variability, allchannels_epochs)
variability: as above
allchannels_epochs: (list of) list of tuples
For each variable, a list of tuples (start, end) that give the
starting and ending indices of stationary epochs.
(epochs are inclusive of start point but not the end point) | [
"Identify",
"stationary",
"epochs",
"within",
"a",
"time",
"series",
"based",
"on",
"a",
"continuous",
"measure",
"of",
"variability",
".",
"Epochs",
"are",
"defined",
"to",
"contain",
"the",
"points",
"of",
"minimal",
"variability",
"and",
"to",
"extend",
"as... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analysesN/epochs.py#L60-L88 | train |
mattja/nsim | nsim/analysesN/epochs.py | epochs_joint | def epochs_joint(ts, variability=None, threshold=0.0, minlength=1.0,
proportion=0.75, plot=True):
"""Identify epochs within a multivariate time series where at least a
certain proportion of channels are "stationary", based on a previously
computed variability measure.
(Note: This requires an IPython cluster to be started first,
e.g. on a workstation type 'ipcluster start')
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m), giving a scalar
measure of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
proportion Require at least this fraction of channels to be "stationary"
plot bool Whether to display the output
Returns: (variability, joint_epochs)
joint_epochs: list of tuples
A list of tuples (start, end) that give the starting and ending indices
of time epochs that are stationary for at least `proportion` of channels.
(epochs are inclusive of start point but not the end point)
"""
if ts.ndim <= 2:
return analyses1.epochs_joint(
ts, variability, threshold, minlength, plot)
else:
return distob.vectorize(analyses1.epochs_joint)(
ts, variability, threshold, minlength, plot) | python | def epochs_joint(ts, variability=None, threshold=0.0, minlength=1.0,
proportion=0.75, plot=True):
"""Identify epochs within a multivariate time series where at least a
certain proportion of channels are "stationary", based on a previously
computed variability measure.
(Note: This requires an IPython cluster to be started first,
e.g. on a workstation type 'ipcluster start')
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m), giving a scalar
measure of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
proportion Require at least this fraction of channels to be "stationary"
plot bool Whether to display the output
Returns: (variability, joint_epochs)
joint_epochs: list of tuples
A list of tuples (start, end) that give the starting and ending indices
of time epochs that are stationary for at least `proportion` of channels.
(epochs are inclusive of start point but not the end point)
"""
if ts.ndim <= 2:
return analyses1.epochs_joint(
ts, variability, threshold, minlength, plot)
else:
return distob.vectorize(analyses1.epochs_joint)(
ts, variability, threshold, minlength, plot) | [
"def",
"epochs_joint",
"(",
"ts",
",",
"variability",
"=",
"None",
",",
"threshold",
"=",
"0.0",
",",
"minlength",
"=",
"1.0",
",",
"proportion",
"=",
"0.75",
",",
"plot",
"=",
"True",
")",
":",
"if",
"ts",
".",
"ndim",
"<=",
"2",
":",
"return",
"a... | Identify epochs within a multivariate time series where at least a
certain proportion of channels are "stationary", based on a previously
computed variability measure.
(Note: This requires an IPython cluster to be started first,
e.g. on a workstation type 'ipcluster start')
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m), giving a scalar
measure of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
proportion Require at least this fraction of channels to be "stationary"
plot bool Whether to display the output
Returns: (variability, joint_epochs)
joint_epochs: list of tuples
A list of tuples (start, end) that give the starting and ending indices
of time epochs that are stationary for at least `proportion` of channels.
(epochs are inclusive of start point but not the end point) | [
"Identify",
"epochs",
"within",
"a",
"multivariate",
"time",
"series",
"where",
"at",
"least",
"a",
"certain",
"proportion",
"of",
"channels",
"are",
"stationary",
"based",
"on",
"a",
"previously",
"computed",
"variability",
"measure",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analysesN/epochs.py#L128-L158 | train |
mattja/nsim | nsim/analysesN/phase.py | periods | def periods(dts, phi=0.0):
"""For an ensemble of oscillators, return the set of periods lengths of
all successive oscillations of all oscillators.
An individual oscillation is defined to start and end when the phase
passes phi (by default zero) after completing a full cycle.
If the timeseries of an oscillator phase begins (or ends) exactly at phi,
then the first (or last) oscillation will be included.
Arguments:
dts (DistTimeseries): where dts.shape[1] is 1 (single output variable
representing phase) and axis 2 ranges over multiple realizations of
the oscillator.
phi=0.0: float
A single oscillation starts and ends at phase phi (by default zero).
"""
vperiods = distob.vectorize(analyses1.periods)
all_periods = vperiods(dts, phi)
if hasattr(type(all_periods), '__array_interface__'):
return np.ravel(all_periods)
else:
return np.hstack([distob.gather(plist) for plist in all_periods]) | python | def periods(dts, phi=0.0):
"""For an ensemble of oscillators, return the set of periods lengths of
all successive oscillations of all oscillators.
An individual oscillation is defined to start and end when the phase
passes phi (by default zero) after completing a full cycle.
If the timeseries of an oscillator phase begins (or ends) exactly at phi,
then the first (or last) oscillation will be included.
Arguments:
dts (DistTimeseries): where dts.shape[1] is 1 (single output variable
representing phase) and axis 2 ranges over multiple realizations of
the oscillator.
phi=0.0: float
A single oscillation starts and ends at phase phi (by default zero).
"""
vperiods = distob.vectorize(analyses1.periods)
all_periods = vperiods(dts, phi)
if hasattr(type(all_periods), '__array_interface__'):
return np.ravel(all_periods)
else:
return np.hstack([distob.gather(plist) for plist in all_periods]) | [
"def",
"periods",
"(",
"dts",
",",
"phi",
"=",
"0.0",
")",
":",
"vperiods",
"=",
"distob",
".",
"vectorize",
"(",
"analyses1",
".",
"periods",
")",
"all_periods",
"=",
"vperiods",
"(",
"dts",
",",
"phi",
")",
"if",
"hasattr",
"(",
"type",
"(",
"all_p... | For an ensemble of oscillators, return the set of periods lengths of
all successive oscillations of all oscillators.
An individual oscillation is defined to start and end when the phase
passes phi (by default zero) after completing a full cycle.
If the timeseries of an oscillator phase begins (or ends) exactly at phi,
then the first (or last) oscillation will be included.
Arguments:
dts (DistTimeseries): where dts.shape[1] is 1 (single output variable
representing phase) and axis 2 ranges over multiple realizations of
the oscillator.
phi=0.0: float
A single oscillation starts and ends at phase phi (by default zero). | [
"For",
"an",
"ensemble",
"of",
"oscillators",
"return",
"the",
"set",
"of",
"periods",
"lengths",
"of",
"all",
"successive",
"oscillations",
"of",
"all",
"oscillators",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analysesN/phase.py#L16-L39 | train |
mattja/nsim | nsim/analysesN/phase.py | circmean | def circmean(dts, axis=2):
"""Circular mean phase"""
return np.exp(1.0j * dts).mean(axis=axis).angle() | python | def circmean(dts, axis=2):
"""Circular mean phase"""
return np.exp(1.0j * dts).mean(axis=axis).angle() | [
"def",
"circmean",
"(",
"dts",
",",
"axis",
"=",
"2",
")",
":",
"return",
"np",
".",
"exp",
"(",
"1.0j",
"*",
"dts",
")",
".",
"mean",
"(",
"axis",
"=",
"axis",
")",
".",
"angle",
"(",
")"
] | Circular mean phase | [
"Circular",
"mean",
"phase"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analysesN/phase.py#L42-L44 | train |
mattja/nsim | nsim/analysesN/phase.py | order_param | def order_param(dts, axis=2):
"""Order parameter of phase synchronization"""
return np.abs(np.exp(1.0j * dts).mean(axis=axis)) | python | def order_param(dts, axis=2):
"""Order parameter of phase synchronization"""
return np.abs(np.exp(1.0j * dts).mean(axis=axis)) | [
"def",
"order_param",
"(",
"dts",
",",
"axis",
"=",
"2",
")",
":",
"return",
"np",
".",
"abs",
"(",
"np",
".",
"exp",
"(",
"1.0j",
"*",
"dts",
")",
".",
"mean",
"(",
"axis",
"=",
"axis",
")",
")"
] | Order parameter of phase synchronization | [
"Order",
"parameter",
"of",
"phase",
"synchronization"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analysesN/phase.py#L47-L49 | train |
mattja/nsim | nsim/analysesN/phase.py | circstd | def circstd(dts, axis=2):
"""Circular standard deviation"""
R = np.abs(np.exp(1.0j * dts).mean(axis=axis))
return np.sqrt(-2.0 * np.log(R)) | python | def circstd(dts, axis=2):
"""Circular standard deviation"""
R = np.abs(np.exp(1.0j * dts).mean(axis=axis))
return np.sqrt(-2.0 * np.log(R)) | [
"def",
"circstd",
"(",
"dts",
",",
"axis",
"=",
"2",
")",
":",
"R",
"=",
"np",
".",
"abs",
"(",
"np",
".",
"exp",
"(",
"1.0j",
"*",
"dts",
")",
".",
"mean",
"(",
"axis",
"=",
"axis",
")",
")",
"return",
"np",
".",
"sqrt",
"(",
"-",
"2.0",
... | Circular standard deviation | [
"Circular",
"standard",
"deviation"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analysesN/phase.py#L52-L55 | train |
mattja/nsim | nsim/models/neural_mass.py | JansenRit.f | def f(self, v, t):
"""Aburn2012 equations right hand side, noise free term
Args:
v: (8,) array
state vector
t: number
scalar time
Returns:
(8,) array
"""
ret = np.zeros(8)
ret[0] = v[4]
ret[4] = (self.He1*self.ke1*(self.g1*self.S(v[1]-v[2]) + self.u_mean) -
2*self.ke1*v[4] - self.ke1*self.ke1*v[0])
ret[1] = v[5]
ret[5] = (self.He2*self.ke2*(self.g2*self.S(v[0]) + self.p_mean) -
2*self.ke2*v[5] - self.ke2*self.ke2*v[1])
ret[2] = v[6]
ret[6] = (self.Hi*self.ki*self.g4*self.S(v[3]) - 2*self.ki*v[6] -
self.ki*self.ki*v[2])
ret[3] = v[7]
ret[7] = (self.He3*self.ke3*self.g3*self.S(v[1]-v[2]) -
2*self.ke3*v[7] - self.ke3*self.ke3*v[3])
return ret | python | def f(self, v, t):
"""Aburn2012 equations right hand side, noise free term
Args:
v: (8,) array
state vector
t: number
scalar time
Returns:
(8,) array
"""
ret = np.zeros(8)
ret[0] = v[4]
ret[4] = (self.He1*self.ke1*(self.g1*self.S(v[1]-v[2]) + self.u_mean) -
2*self.ke1*v[4] - self.ke1*self.ke1*v[0])
ret[1] = v[5]
ret[5] = (self.He2*self.ke2*(self.g2*self.S(v[0]) + self.p_mean) -
2*self.ke2*v[5] - self.ke2*self.ke2*v[1])
ret[2] = v[6]
ret[6] = (self.Hi*self.ki*self.g4*self.S(v[3]) - 2*self.ki*v[6] -
self.ki*self.ki*v[2])
ret[3] = v[7]
ret[7] = (self.He3*self.ke3*self.g3*self.S(v[1]-v[2]) -
2*self.ke3*v[7] - self.ke3*self.ke3*v[3])
return ret | [
"def",
"f",
"(",
"self",
",",
"v",
",",
"t",
")",
":",
"ret",
"=",
"np",
".",
"zeros",
"(",
"8",
")",
"ret",
"[",
"0",
"]",
"=",
"v",
"[",
"4",
"]",
"ret",
"[",
"4",
"]",
"=",
"(",
"self",
".",
"He1",
"*",
"self",
".",
"ke1",
"*",
"("... | Aburn2012 equations right hand side, noise free term
Args:
v: (8,) array
state vector
t: number
scalar time
Returns:
(8,) array | [
"Aburn2012",
"equations",
"right",
"hand",
"side",
"noise",
"free",
"term",
"Args",
":",
"v",
":",
"(",
"8",
")",
"array",
"state",
"vector",
"t",
":",
"number",
"scalar",
"time",
"Returns",
":",
"(",
"8",
")",
"array"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/models/neural_mass.py#L79-L105 | train |
mattja/nsim | nsim/models/neural_mass.py | JansenRit.G | def G(self, v, t):
"""Aburn2012 equations right hand side, noise term
Args:
v: (8,) array
state vector
t: number
scalar time
Returns:
(8,1) array
Only one matrix column, meaning that in this example we are modelling
the noise input to pyramidal and spiny populations as fully
correlated. To simulate uncorrelated inputs instead, use an array of
shape (8, 2) with the second noise element [5,1] instead of [5,0].
"""
ret = np.zeros((8, 1))
ret[4,0] = self.ke1 * self.He1 * self.u_sdev
ret[5,0] = self.ke2 * self.He2 * self.p_sdev
return ret | python | def G(self, v, t):
"""Aburn2012 equations right hand side, noise term
Args:
v: (8,) array
state vector
t: number
scalar time
Returns:
(8,1) array
Only one matrix column, meaning that in this example we are modelling
the noise input to pyramidal and spiny populations as fully
correlated. To simulate uncorrelated inputs instead, use an array of
shape (8, 2) with the second noise element [5,1] instead of [5,0].
"""
ret = np.zeros((8, 1))
ret[4,0] = self.ke1 * self.He1 * self.u_sdev
ret[5,0] = self.ke2 * self.He2 * self.p_sdev
return ret | [
"def",
"G",
"(",
"self",
",",
"v",
",",
"t",
")",
":",
"ret",
"=",
"np",
".",
"zeros",
"(",
"(",
"8",
",",
"1",
")",
")",
"ret",
"[",
"4",
",",
"0",
"]",
"=",
"self",
".",
"ke1",
"*",
"self",
".",
"He1",
"*",
"self",
".",
"u_sdev",
"ret... | Aburn2012 equations right hand side, noise term
Args:
v: (8,) array
state vector
t: number
scalar time
Returns:
(8,1) array
Only one matrix column, meaning that in this example we are modelling
the noise input to pyramidal and spiny populations as fully
correlated. To simulate uncorrelated inputs instead, use an array of
shape (8, 2) with the second noise element [5,1] instead of [5,0]. | [
"Aburn2012",
"equations",
"right",
"hand",
"side",
"noise",
"term",
"Args",
":",
"v",
":",
"(",
"8",
")",
"array",
"state",
"vector",
"t",
":",
"number",
"scalar",
"time",
"Returns",
":",
"(",
"8",
"1",
")",
"array",
"Only",
"one",
"matrix",
"column",
... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/models/neural_mass.py#L107-L124 | train |
mattja/nsim | nsim/models/neural_mass.py | JansenRit.coupling | def coupling(self, source_y, target_y, weight):
"""How to couple the output of one node to the input of another.
Args:
source_y (array of shape (8,)): state of the source node
target_y (array of shape (8,)): state of the target node
weight (float): the connection strength
Returns:
input (array of shape (8,)): value to drive each variable of the
target node.
"""
v_pyramidal = source_y[1] - source_y[2]
return (np.array([0, 0, 0, 0, 0, 1.0, 0, 0]) *
(weight*self.g1*self.He2*self.ke2*self.S(v_pyramidal))) | python | def coupling(self, source_y, target_y, weight):
"""How to couple the output of one node to the input of another.
Args:
source_y (array of shape (8,)): state of the source node
target_y (array of shape (8,)): state of the target node
weight (float): the connection strength
Returns:
input (array of shape (8,)): value to drive each variable of the
target node.
"""
v_pyramidal = source_y[1] - source_y[2]
return (np.array([0, 0, 0, 0, 0, 1.0, 0, 0]) *
(weight*self.g1*self.He2*self.ke2*self.S(v_pyramidal))) | [
"def",
"coupling",
"(",
"self",
",",
"source_y",
",",
"target_y",
",",
"weight",
")",
":",
"v_pyramidal",
"=",
"source_y",
"[",
"1",
"]",
"-",
"source_y",
"[",
"2",
"]",
"return",
"(",
"np",
".",
"array",
"(",
"[",
"0",
",",
"0",
",",
"0",
",",
... | How to couple the output of one node to the input of another.
Args:
source_y (array of shape (8,)): state of the source node
target_y (array of shape (8,)): state of the target node
weight (float): the connection strength
Returns:
input (array of shape (8,)): value to drive each variable of the
target node. | [
"How",
"to",
"couple",
"the",
"output",
"of",
"one",
"node",
"to",
"the",
"input",
"of",
"another",
".",
"Args",
":",
"source_y",
"(",
"array",
"of",
"shape",
"(",
"8",
"))",
":",
"state",
"of",
"the",
"source",
"node",
"target_y",
"(",
"array",
"of"... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/models/neural_mass.py#L126-L138 | train |
mattja/nsim | nsim/analyses1/pyeeg.py | hurst | def hurst(X):
""" Compute the Hurst exponent of X. If the output H=0.5,the behavior
of the time-series is similar to random walk. If H<0.5, the time-series
cover less "distance" than a random walk, vice verse.
Parameters
----------
X
list
a time series
Returns
-------
H
float
Hurst exponent
Notes
--------
Author of this function is Xin Liu
Examples
--------
>>> import pyeeg
>>> from numpy.random import randn
>>> a = randn(4096)
>>> pyeeg.hurst(a)
0.5057444
"""
X = numpy.array(X)
N = X.size
T = numpy.arange(1, N + 1)
Y = numpy.cumsum(X)
Ave_T = Y / T
S_T = numpy.zeros(N)
R_T = numpy.zeros(N)
for i in range(N):
S_T[i] = numpy.std(X[:i + 1])
X_T = Y - T * Ave_T[i]
R_T[i] = numpy.ptp(X_T[:i + 1])
R_S = R_T / S_T
R_S = numpy.log(R_S)[1:]
n = numpy.log(T)[1:]
A = numpy.column_stack((n, numpy.ones(n.size)))
[m, c] = numpy.linalg.lstsq(A, R_S)[0]
H = m
return H | python | def hurst(X):
""" Compute the Hurst exponent of X. If the output H=0.5,the behavior
of the time-series is similar to random walk. If H<0.5, the time-series
cover less "distance" than a random walk, vice verse.
Parameters
----------
X
list
a time series
Returns
-------
H
float
Hurst exponent
Notes
--------
Author of this function is Xin Liu
Examples
--------
>>> import pyeeg
>>> from numpy.random import randn
>>> a = randn(4096)
>>> pyeeg.hurst(a)
0.5057444
"""
X = numpy.array(X)
N = X.size
T = numpy.arange(1, N + 1)
Y = numpy.cumsum(X)
Ave_T = Y / T
S_T = numpy.zeros(N)
R_T = numpy.zeros(N)
for i in range(N):
S_T[i] = numpy.std(X[:i + 1])
X_T = Y - T * Ave_T[i]
R_T[i] = numpy.ptp(X_T[:i + 1])
R_S = R_T / S_T
R_S = numpy.log(R_S)[1:]
n = numpy.log(T)[1:]
A = numpy.column_stack((n, numpy.ones(n.size)))
[m, c] = numpy.linalg.lstsq(A, R_S)[0]
H = m
return H | [
"def",
"hurst",
"(",
"X",
")",
":",
"X",
"=",
"numpy",
".",
"array",
"(",
"X",
")",
"N",
"=",
"X",
".",
"size",
"T",
"=",
"numpy",
".",
"arange",
"(",
"1",
",",
"N",
"+",
"1",
")",
"Y",
"=",
"numpy",
".",
"cumsum",
"(",
"X",
")",
"Ave_T",... | Compute the Hurst exponent of X. If the output H=0.5,the behavior
of the time-series is similar to random walk. If H<0.5, the time-series
cover less "distance" than a random walk, vice verse.
Parameters
----------
X
list
a time series
Returns
-------
H
float
Hurst exponent
Notes
--------
Author of this function is Xin Liu
Examples
--------
>>> import pyeeg
>>> from numpy.random import randn
>>> a = randn(4096)
>>> pyeeg.hurst(a)
0.5057444 | [
"Compute",
"the",
"Hurst",
"exponent",
"of",
"X",
".",
"If",
"the",
"output",
"H",
"=",
"0",
".",
"5",
"the",
"behavior",
"of",
"the",
"time",
"-",
"series",
"is",
"similar",
"to",
"random",
"walk",
".",
"If",
"H<0",
".",
"5",
"the",
"time",
"-",
... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/pyeeg.py#L40-L96 | train |
mattja/nsim | nsim/analyses1/pyeeg.py | embed_seq | def embed_seq(X, Tau, D):
"""Build a set of embedding sequences from given time series X with lag Tau
and embedding dimension DE. Let X = [x(1), x(2), ... , x(N)], then for each
i such that 1 < i < N - (D - 1) * Tau, we build an embedding sequence,
Y(i) = [x(i), x(i + Tau), ... , x(i + (D - 1) * Tau)]. All embedding
sequence are placed in a matrix Y.
Parameters
----------
X
list
a time series
Tau
integer
the lag or delay when building embedding sequence
D
integer
the embedding dimension
Returns
-------
Y
2-D list
embedding matrix built
Examples
---------------
>>> import pyeeg
>>> a=range(0,9)
>>> pyeeg.embed_seq(a,1,4)
array([[ 0., 1., 2., 3.],
[ 1., 2., 3., 4.],
[ 2., 3., 4., 5.],
[ 3., 4., 5., 6.],
[ 4., 5., 6., 7.],
[ 5., 6., 7., 8.]])
>>> pyeeg.embed_seq(a,2,3)
array([[ 0., 2., 4.],
[ 1., 3., 5.],
[ 2., 4., 6.],
[ 3., 5., 7.],
[ 4., 6., 8.]])
>>> pyeeg.embed_seq(a,4,1)
array([[ 0.],
[ 1.],
[ 2.],
[ 3.],
[ 4.],
[ 5.],
[ 6.],
[ 7.],
[ 8.]])
"""
shape = (X.size - Tau * (D - 1), D)
strides = (X.itemsize, Tau * X.itemsize)
return numpy.lib.stride_tricks.as_strided(X, shape=shape, strides=strides) | python | def embed_seq(X, Tau, D):
"""Build a set of embedding sequences from given time series X with lag Tau
and embedding dimension DE. Let X = [x(1), x(2), ... , x(N)], then for each
i such that 1 < i < N - (D - 1) * Tau, we build an embedding sequence,
Y(i) = [x(i), x(i + Tau), ... , x(i + (D - 1) * Tau)]. All embedding
sequence are placed in a matrix Y.
Parameters
----------
X
list
a time series
Tau
integer
the lag or delay when building embedding sequence
D
integer
the embedding dimension
Returns
-------
Y
2-D list
embedding matrix built
Examples
---------------
>>> import pyeeg
>>> a=range(0,9)
>>> pyeeg.embed_seq(a,1,4)
array([[ 0., 1., 2., 3.],
[ 1., 2., 3., 4.],
[ 2., 3., 4., 5.],
[ 3., 4., 5., 6.],
[ 4., 5., 6., 7.],
[ 5., 6., 7., 8.]])
>>> pyeeg.embed_seq(a,2,3)
array([[ 0., 2., 4.],
[ 1., 3., 5.],
[ 2., 4., 6.],
[ 3., 5., 7.],
[ 4., 6., 8.]])
>>> pyeeg.embed_seq(a,4,1)
array([[ 0.],
[ 1.],
[ 2.],
[ 3.],
[ 4.],
[ 5.],
[ 6.],
[ 7.],
[ 8.]])
"""
shape = (X.size - Tau * (D - 1), D)
strides = (X.itemsize, Tau * X.itemsize)
return numpy.lib.stride_tricks.as_strided(X, shape=shape, strides=strides) | [
"def",
"embed_seq",
"(",
"X",
",",
"Tau",
",",
"D",
")",
":",
"shape",
"=",
"(",
"X",
".",
"size",
"-",
"Tau",
"*",
"(",
"D",
"-",
"1",
")",
",",
"D",
")",
"strides",
"=",
"(",
"X",
".",
"itemsize",
",",
"Tau",
"*",
"X",
".",
"itemsize",
... | Build a set of embedding sequences from given time series X with lag Tau
and embedding dimension DE. Let X = [x(1), x(2), ... , x(N)], then for each
i such that 1 < i < N - (D - 1) * Tau, we build an embedding sequence,
Y(i) = [x(i), x(i + Tau), ... , x(i + (D - 1) * Tau)]. All embedding
sequence are placed in a matrix Y.
Parameters
----------
X
list
a time series
Tau
integer
the lag or delay when building embedding sequence
D
integer
the embedding dimension
Returns
-------
Y
2-D list
embedding matrix built
Examples
---------------
>>> import pyeeg
>>> a=range(0,9)
>>> pyeeg.embed_seq(a,1,4)
array([[ 0., 1., 2., 3.],
[ 1., 2., 3., 4.],
[ 2., 3., 4., 5.],
[ 3., 4., 5., 6.],
[ 4., 5., 6., 7.],
[ 5., 6., 7., 8.]])
>>> pyeeg.embed_seq(a,2,3)
array([[ 0., 2., 4.],
[ 1., 3., 5.],
[ 2., 4., 6.],
[ 3., 5., 7.],
[ 4., 6., 8.]])
>>> pyeeg.embed_seq(a,4,1)
array([[ 0.],
[ 1.],
[ 2.],
[ 3.],
[ 4.],
[ 5.],
[ 6.],
[ 7.],
[ 8.]]) | [
"Build",
"a",
"set",
"of",
"embedding",
"sequences",
"from",
"given",
"time",
"series",
"X",
"with",
"lag",
"Tau",
"and",
"embedding",
"dimension",
"DE",
".",
"Let",
"X",
"=",
"[",
"x",
"(",
"1",
")",
"x",
"(",
"2",
")",
"...",
"x",
"(",
"N",
")"... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/pyeeg.py#L99-L163 | train |
mattja/nsim | nsim/analyses1/pyeeg.py | bin_power | def bin_power(X, Band, Fs):
"""Compute power in each frequency bin specified by Band from FFT result of
X. By default, X is a real signal.
Note
-----
A real signal can be synthesized, thus not real.
Parameters
-----------
Band
list
boundary frequencies (in Hz) of bins. They can be unequal bins, e.g.
[0.5,4,7,12,30] which are delta, theta, alpha and beta respectively.
You can also use range() function of Python to generate equal bins and
pass the generated list to this function.
Each element of Band is a physical frequency and shall not exceed the
Nyquist frequency, i.e., half of sampling frequency.
X
list
a 1-D real time series.
Fs
integer
the sampling rate in physical frequency
Returns
-------
Power
list
spectral power in each frequency bin.
Power_ratio
list
spectral power in each frequency bin normalized by total power in ALL
frequency bins.
"""
C = numpy.fft.fft(X)
C = abs(C)
Power = numpy.zeros(len(Band) - 1)
for Freq_Index in range(0, len(Band) - 1):
Freq = float(Band[Freq_Index])
Next_Freq = float(Band[Freq_Index + 1])
Power[Freq_Index] = sum(
C[numpy.floor(
Freq / Fs * len(X)
): numpy.floor(Next_Freq / Fs * len(X))]
)
Power_Ratio = Power / sum(Power)
return Power, Power_Ratio | python | def bin_power(X, Band, Fs):
"""Compute power in each frequency bin specified by Band from FFT result of
X. By default, X is a real signal.
Note
-----
A real signal can be synthesized, thus not real.
Parameters
-----------
Band
list
boundary frequencies (in Hz) of bins. They can be unequal bins, e.g.
[0.5,4,7,12,30] which are delta, theta, alpha and beta respectively.
You can also use range() function of Python to generate equal bins and
pass the generated list to this function.
Each element of Band is a physical frequency and shall not exceed the
Nyquist frequency, i.e., half of sampling frequency.
X
list
a 1-D real time series.
Fs
integer
the sampling rate in physical frequency
Returns
-------
Power
list
spectral power in each frequency bin.
Power_ratio
list
spectral power in each frequency bin normalized by total power in ALL
frequency bins.
"""
C = numpy.fft.fft(X)
C = abs(C)
Power = numpy.zeros(len(Band) - 1)
for Freq_Index in range(0, len(Band) - 1):
Freq = float(Band[Freq_Index])
Next_Freq = float(Band[Freq_Index + 1])
Power[Freq_Index] = sum(
C[numpy.floor(
Freq / Fs * len(X)
): numpy.floor(Next_Freq / Fs * len(X))]
)
Power_Ratio = Power / sum(Power)
return Power, Power_Ratio | [
"def",
"bin_power",
"(",
"X",
",",
"Band",
",",
"Fs",
")",
":",
"C",
"=",
"numpy",
".",
"fft",
".",
"fft",
"(",
"X",
")",
"C",
"=",
"abs",
"(",
"C",
")",
"Power",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"Band",
")",
"-",
"1",
")",
"f... | Compute power in each frequency bin specified by Band from FFT result of
X. By default, X is a real signal.
Note
-----
A real signal can be synthesized, thus not real.
Parameters
-----------
Band
list
boundary frequencies (in Hz) of bins. They can be unequal bins, e.g.
[0.5,4,7,12,30] which are delta, theta, alpha and beta respectively.
You can also use range() function of Python to generate equal bins and
pass the generated list to this function.
Each element of Band is a physical frequency and shall not exceed the
Nyquist frequency, i.e., half of sampling frequency.
X
list
a 1-D real time series.
Fs
integer
the sampling rate in physical frequency
Returns
-------
Power
list
spectral power in each frequency bin.
Power_ratio
list
spectral power in each frequency bin normalized by total power in ALL
frequency bins. | [
"Compute",
"power",
"in",
"each",
"frequency",
"bin",
"specified",
"by",
"Band",
"from",
"FFT",
"result",
"of",
"X",
".",
"By",
"default",
"X",
"is",
"a",
"real",
"signal",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/pyeeg.py#L166-L226 | train |
mattja/nsim | nsim/analyses1/pyeeg.py | pfd | def pfd(X, D=None):
"""Compute Petrosian Fractal Dimension of a time series from either two
cases below:
1. X, the time series of type list (default)
2. D, the first order differential sequence of X (if D is provided,
recommended to speed up)
In case 1, D is computed using Numpy's difference function.
To speed up, it is recommended to compute D before calling this function
because D may also be used by other functions whereas computing it here
again will slow down.
"""
if D is None:
D = numpy.diff(X)
D = D.tolist()
N_delta = 0 # number of sign changes in derivative of the signal
for i in range(1, len(D)):
if D[i] * D[i - 1] < 0:
N_delta += 1
n = len(X)
return numpy.log10(n) / (
numpy.log10(n) + numpy.log10(n / n + 0.4 * N_delta)
) | python | def pfd(X, D=None):
"""Compute Petrosian Fractal Dimension of a time series from either two
cases below:
1. X, the time series of type list (default)
2. D, the first order differential sequence of X (if D is provided,
recommended to speed up)
In case 1, D is computed using Numpy's difference function.
To speed up, it is recommended to compute D before calling this function
because D may also be used by other functions whereas computing it here
again will slow down.
"""
if D is None:
D = numpy.diff(X)
D = D.tolist()
N_delta = 0 # number of sign changes in derivative of the signal
for i in range(1, len(D)):
if D[i] * D[i - 1] < 0:
N_delta += 1
n = len(X)
return numpy.log10(n) / (
numpy.log10(n) + numpy.log10(n / n + 0.4 * N_delta)
) | [
"def",
"pfd",
"(",
"X",
",",
"D",
"=",
"None",
")",
":",
"if",
"D",
"is",
"None",
":",
"D",
"=",
"numpy",
".",
"diff",
"(",
"X",
")",
"D",
"=",
"D",
".",
"tolist",
"(",
")",
"N_delta",
"=",
"0",
"# number of sign changes in derivative of the signal",... | Compute Petrosian Fractal Dimension of a time series from either two
cases below:
1. X, the time series of type list (default)
2. D, the first order differential sequence of X (if D is provided,
recommended to speed up)
In case 1, D is computed using Numpy's difference function.
To speed up, it is recommended to compute D before calling this function
because D may also be used by other functions whereas computing it here
again will slow down. | [
"Compute",
"Petrosian",
"Fractal",
"Dimension",
"of",
"a",
"time",
"series",
"from",
"either",
"two",
"cases",
"below",
":",
"1",
".",
"X",
"the",
"time",
"series",
"of",
"type",
"list",
"(",
"default",
")",
"2",
".",
"D",
"the",
"first",
"order",
"dif... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/pyeeg.py#L229-L252 | train |
mattja/nsim | nsim/analyses1/pyeeg.py | hfd | def hfd(X, Kmax):
""" Compute Hjorth Fractal Dimension of a time series X, kmax
is an HFD parameter
"""
L = []
x = []
N = len(X)
for k in range(1, Kmax):
Lk = []
for m in range(0, k):
Lmk = 0
for i in range(1, int(numpy.floor((N - m) / k))):
Lmk += abs(X[m + i * k] - X[m + i * k - k])
Lmk = Lmk * (N - 1) / numpy.floor((N - m) / float(k)) / k
Lk.append(Lmk)
L.append(numpy.log(numpy.mean(Lk)))
x.append([numpy.log(float(1) / k), 1])
(p, r1, r2, s) = numpy.linalg.lstsq(x, L)
return p[0] | python | def hfd(X, Kmax):
""" Compute Hjorth Fractal Dimension of a time series X, kmax
is an HFD parameter
"""
L = []
x = []
N = len(X)
for k in range(1, Kmax):
Lk = []
for m in range(0, k):
Lmk = 0
for i in range(1, int(numpy.floor((N - m) / k))):
Lmk += abs(X[m + i * k] - X[m + i * k - k])
Lmk = Lmk * (N - 1) / numpy.floor((N - m) / float(k)) / k
Lk.append(Lmk)
L.append(numpy.log(numpy.mean(Lk)))
x.append([numpy.log(float(1) / k), 1])
(p, r1, r2, s) = numpy.linalg.lstsq(x, L)
return p[0] | [
"def",
"hfd",
"(",
"X",
",",
"Kmax",
")",
":",
"L",
"=",
"[",
"]",
"x",
"=",
"[",
"]",
"N",
"=",
"len",
"(",
"X",
")",
"for",
"k",
"in",
"range",
"(",
"1",
",",
"Kmax",
")",
":",
"Lk",
"=",
"[",
"]",
"for",
"m",
"in",
"range",
"(",
"0... | Compute Hjorth Fractal Dimension of a time series X, kmax
is an HFD parameter | [
"Compute",
"Hjorth",
"Fractal",
"Dimension",
"of",
"a",
"time",
"series",
"X",
"kmax",
"is",
"an",
"HFD",
"parameter"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/pyeeg.py#L255-L274 | train |
mattja/nsim | nsim/analyses1/pyeeg.py | hjorth | def hjorth(X, D=None):
""" Compute Hjorth mobility and complexity of a time series from either two
cases below:
1. X, the time series of type list (default)
2. D, a first order differential sequence of X (if D is provided,
recommended to speed up)
In case 1, D is computed using Numpy's Difference function.
Notes
-----
To speed up, it is recommended to compute D before calling this function
because D may also be used by other functions whereas computing it here
again will slow down.
Parameters
----------
X
list
a time series
D
list
first order differential sequence of a time series
Returns
-------
As indicated in return line
Hjorth mobility and complexity
"""
if D is None:
D = numpy.diff(X)
D = D.tolist()
D.insert(0, X[0]) # pad the first difference
D = numpy.array(D)
n = len(X)
M2 = float(sum(D ** 2)) / n
TP = sum(numpy.array(X) ** 2)
M4 = 0
for i in range(1, len(D)):
M4 += (D[i] - D[i - 1]) ** 2
M4 = M4 / n
return numpy.sqrt(M2 / TP), numpy.sqrt(
float(M4) * TP / M2 / M2
) | python | def hjorth(X, D=None):
""" Compute Hjorth mobility and complexity of a time series from either two
cases below:
1. X, the time series of type list (default)
2. D, a first order differential sequence of X (if D is provided,
recommended to speed up)
In case 1, D is computed using Numpy's Difference function.
Notes
-----
To speed up, it is recommended to compute D before calling this function
because D may also be used by other functions whereas computing it here
again will slow down.
Parameters
----------
X
list
a time series
D
list
first order differential sequence of a time series
Returns
-------
As indicated in return line
Hjorth mobility and complexity
"""
if D is None:
D = numpy.diff(X)
D = D.tolist()
D.insert(0, X[0]) # pad the first difference
D = numpy.array(D)
n = len(X)
M2 = float(sum(D ** 2)) / n
TP = sum(numpy.array(X) ** 2)
M4 = 0
for i in range(1, len(D)):
M4 += (D[i] - D[i - 1]) ** 2
M4 = M4 / n
return numpy.sqrt(M2 / TP), numpy.sqrt(
float(M4) * TP / M2 / M2
) | [
"def",
"hjorth",
"(",
"X",
",",
"D",
"=",
"None",
")",
":",
"if",
"D",
"is",
"None",
":",
"D",
"=",
"numpy",
".",
"diff",
"(",
"X",
")",
"D",
"=",
"D",
".",
"tolist",
"(",
")",
"D",
".",
"insert",
"(",
"0",
",",
"X",
"[",
"0",
"]",
")",... | Compute Hjorth mobility and complexity of a time series from either two
cases below:
1. X, the time series of type list (default)
2. D, a first order differential sequence of X (if D is provided,
recommended to speed up)
In case 1, D is computed using Numpy's Difference function.
Notes
-----
To speed up, it is recommended to compute D before calling this function
because D may also be used by other functions whereas computing it here
again will slow down.
Parameters
----------
X
list
a time series
D
list
first order differential sequence of a time series
Returns
-------
As indicated in return line
Hjorth mobility and complexity | [
"Compute",
"Hjorth",
"mobility",
"and",
"complexity",
"of",
"a",
"time",
"series",
"from",
"either",
"two",
"cases",
"below",
":",
"1",
".",
"X",
"the",
"time",
"series",
"of",
"type",
"list",
"(",
"default",
")",
"2",
".",
"D",
"a",
"first",
"order",
... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/pyeeg.py#L277-L332 | train |
mattja/nsim | nsim/analyses1/pyeeg.py | spectral_entropy | def spectral_entropy(X, Band, Fs, Power_Ratio=None):
"""Compute spectral entropy of a time series from either two cases below:
1. X, the time series (default)
2. Power_Ratio, a list of normalized signal power in a set of frequency
bins defined in Band (if Power_Ratio is provided, recommended to speed up)
In case 1, Power_Ratio is computed by bin_power() function.
Notes
-----
To speed up, it is recommended to compute Power_Ratio before calling this
function because it may also be used by other functions whereas computing
it here again will slow down.
Parameters
----------
Band
list
boundary frequencies (in Hz) of bins. They can be unequal bins, e.g.
[0.5,4,7,12,30] which are delta, theta, alpha and beta respectively.
You can also use range() function of Python to generate equal bins and
pass the generated list to this function.
Each element of Band is a physical frequency and shall not exceed the
Nyquist frequency, i.e., half of sampling frequency.
X
list
a 1-D real time series.
Fs
integer
the sampling rate in physical frequency
Returns
-------
As indicated in return line
See Also
--------
bin_power: pyeeg function that computes spectral power in frequency bins
"""
if Power_Ratio is None:
Power, Power_Ratio = bin_power(X, Band, Fs)
Spectral_Entropy = 0
for i in range(0, len(Power_Ratio) - 1):
Spectral_Entropy += Power_Ratio[i] * numpy.log(Power_Ratio[i])
Spectral_Entropy /= numpy.log(
len(Power_Ratio)
) # to save time, minus one is omitted
return -1 * Spectral_Entropy | python | def spectral_entropy(X, Band, Fs, Power_Ratio=None):
"""Compute spectral entropy of a time series from either two cases below:
1. X, the time series (default)
2. Power_Ratio, a list of normalized signal power in a set of frequency
bins defined in Band (if Power_Ratio is provided, recommended to speed up)
In case 1, Power_Ratio is computed by bin_power() function.
Notes
-----
To speed up, it is recommended to compute Power_Ratio before calling this
function because it may also be used by other functions whereas computing
it here again will slow down.
Parameters
----------
Band
list
boundary frequencies (in Hz) of bins. They can be unequal bins, e.g.
[0.5,4,7,12,30] which are delta, theta, alpha and beta respectively.
You can also use range() function of Python to generate equal bins and
pass the generated list to this function.
Each element of Band is a physical frequency and shall not exceed the
Nyquist frequency, i.e., half of sampling frequency.
X
list
a 1-D real time series.
Fs
integer
the sampling rate in physical frequency
Returns
-------
As indicated in return line
See Also
--------
bin_power: pyeeg function that computes spectral power in frequency bins
"""
if Power_Ratio is None:
Power, Power_Ratio = bin_power(X, Band, Fs)
Spectral_Entropy = 0
for i in range(0, len(Power_Ratio) - 1):
Spectral_Entropy += Power_Ratio[i] * numpy.log(Power_Ratio[i])
Spectral_Entropy /= numpy.log(
len(Power_Ratio)
) # to save time, minus one is omitted
return -1 * Spectral_Entropy | [
"def",
"spectral_entropy",
"(",
"X",
",",
"Band",
",",
"Fs",
",",
"Power_Ratio",
"=",
"None",
")",
":",
"if",
"Power_Ratio",
"is",
"None",
":",
"Power",
",",
"Power_Ratio",
"=",
"bin_power",
"(",
"X",
",",
"Band",
",",
"Fs",
")",
"Spectral_Entropy",
"=... | Compute spectral entropy of a time series from either two cases below:
1. X, the time series (default)
2. Power_Ratio, a list of normalized signal power in a set of frequency
bins defined in Band (if Power_Ratio is provided, recommended to speed up)
In case 1, Power_Ratio is computed by bin_power() function.
Notes
-----
To speed up, it is recommended to compute Power_Ratio before calling this
function because it may also be used by other functions whereas computing
it here again will slow down.
Parameters
----------
Band
list
boundary frequencies (in Hz) of bins. They can be unequal bins, e.g.
[0.5,4,7,12,30] which are delta, theta, alpha and beta respectively.
You can also use range() function of Python to generate equal bins and
pass the generated list to this function.
Each element of Band is a physical frequency and shall not exceed the
Nyquist frequency, i.e., half of sampling frequency.
X
list
a 1-D real time series.
Fs
integer
the sampling rate in physical frequency
Returns
-------
As indicated in return line
See Also
--------
bin_power: pyeeg function that computes spectral power in frequency bins | [
"Compute",
"spectral",
"entropy",
"of",
"a",
"time",
"series",
"from",
"either",
"two",
"cases",
"below",
":",
"1",
".",
"X",
"the",
"time",
"series",
"(",
"default",
")",
"2",
".",
"Power_Ratio",
"a",
"list",
"of",
"normalized",
"signal",
"power",
"in",... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/pyeeg.py#L335-L393 | train |
mattja/nsim | nsim/analyses1/pyeeg.py | svd_entropy | def svd_entropy(X, Tau, DE, W=None):
"""Compute SVD Entropy from either two cases below:
1. a time series X, with lag tau and embedding dimension dE (default)
2. a list, W, of normalized singular values of a matrix (if W is provided,
recommend to speed up.)
If W is None, the function will do as follows to prepare singular spectrum:
First, computer an embedding matrix from X, Tau and DE using pyeeg
function embed_seq():
M = embed_seq(X, Tau, DE)
Second, use scipy.linalg function svd to decompose the embedding matrix
M and obtain a list of singular values:
W = svd(M, compute_uv=0)
At last, normalize W:
W /= sum(W)
Notes
-------------
To speed up, it is recommended to compute W before calling this function
because W may also be used by other functions whereas computing it here
again will slow down.
"""
if W is None:
Y = embed_seq(X, Tau, DE)
W = numpy.linalg.svd(Y, compute_uv=0)
W /= sum(W) # normalize singular values
return -1 * sum(W * numpy.log(W)) | python | def svd_entropy(X, Tau, DE, W=None):
"""Compute SVD Entropy from either two cases below:
1. a time series X, with lag tau and embedding dimension dE (default)
2. a list, W, of normalized singular values of a matrix (if W is provided,
recommend to speed up.)
If W is None, the function will do as follows to prepare singular spectrum:
First, computer an embedding matrix from X, Tau and DE using pyeeg
function embed_seq():
M = embed_seq(X, Tau, DE)
Second, use scipy.linalg function svd to decompose the embedding matrix
M and obtain a list of singular values:
W = svd(M, compute_uv=0)
At last, normalize W:
W /= sum(W)
Notes
-------------
To speed up, it is recommended to compute W before calling this function
because W may also be used by other functions whereas computing it here
again will slow down.
"""
if W is None:
Y = embed_seq(X, Tau, DE)
W = numpy.linalg.svd(Y, compute_uv=0)
W /= sum(W) # normalize singular values
return -1 * sum(W * numpy.log(W)) | [
"def",
"svd_entropy",
"(",
"X",
",",
"Tau",
",",
"DE",
",",
"W",
"=",
"None",
")",
":",
"if",
"W",
"is",
"None",
":",
"Y",
"=",
"embed_seq",
"(",
"X",
",",
"Tau",
",",
"DE",
")",
"W",
"=",
"numpy",
".",
"linalg",
".",
"svd",
"(",
"Y",
",",
... | Compute SVD Entropy from either two cases below:
1. a time series X, with lag tau and embedding dimension dE (default)
2. a list, W, of normalized singular values of a matrix (if W is provided,
recommend to speed up.)
If W is None, the function will do as follows to prepare singular spectrum:
First, computer an embedding matrix from X, Tau and DE using pyeeg
function embed_seq():
M = embed_seq(X, Tau, DE)
Second, use scipy.linalg function svd to decompose the embedding matrix
M and obtain a list of singular values:
W = svd(M, compute_uv=0)
At last, normalize W:
W /= sum(W)
Notes
-------------
To speed up, it is recommended to compute W before calling this function
because W may also be used by other functions whereas computing it here
again will slow down. | [
"Compute",
"SVD",
"Entropy",
"from",
"either",
"two",
"cases",
"below",
":",
"1",
".",
"a",
"time",
"series",
"X",
"with",
"lag",
"tau",
"and",
"embedding",
"dimension",
"dE",
"(",
"default",
")",
"2",
".",
"a",
"list",
"W",
"of",
"normalized",
"singul... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/pyeeg.py#L396-L428 | train |
mattja/nsim | nsim/analyses1/pyeeg.py | ap_entropy | def ap_entropy(X, M, R):
"""Computer approximate entropy (ApEN) of series X, specified by M and R.
Suppose given time series is X = [x(1), x(2), ... , x(N)]. We first build
embedding matrix Em, of dimension (N-M+1)-by-M, such that the i-th row of
Em is x(i),x(i+1), ... , x(i+M-1). Hence, the embedding lag and dimension
are 1 and M-1 respectively. Such a matrix can be built by calling pyeeg
function as Em = embed_seq(X, 1, M). Then we build matrix Emp, whose only
difference with Em is that the length of each embedding sequence is M + 1
Denote the i-th and j-th row of Em as Em[i] and Em[j]. Their k-th elements
are Em[i][k] and Em[j][k] respectively. The distance between Em[i] and
Em[j] is defined as 1) the maximum difference of their corresponding scalar
components, thus, max(Em[i]-Em[j]), or 2) Euclidean distance. We say two
1-D vectors Em[i] and Em[j] *match* in *tolerance* R, if the distance
between them is no greater than R, thus, max(Em[i]-Em[j]) <= R. Mostly, the
value of R is defined as 20% - 30% of standard deviation of X.
Pick Em[i] as a template, for all j such that 0 < j < N - M + 1, we can
check whether Em[j] matches with Em[i]. Denote the number of Em[j],
which is in the range of Em[i], as k[i], which is the i-th element of the
vector k. The probability that a random row in Em matches Em[i] is
\simga_1^{N-M+1} k[i] / (N - M + 1), thus sum(k)/ (N - M + 1),
denoted as Cm[i].
We repeat the same process on Emp and obtained Cmp[i], but here 0<i<N-M
since the length of each sequence in Emp is M + 1.
The probability that any two embedding sequences in Em match is then
sum(Cm)/ (N - M +1 ). We define Phi_m = sum(log(Cm)) / (N - M + 1) and
Phi_mp = sum(log(Cmp)) / (N - M ).
And the ApEn is defined as Phi_m - Phi_mp.
Notes
-----
Please be aware that self-match is also counted in ApEn.
References
----------
Costa M, Goldberger AL, Peng CK, Multiscale entropy analysis of biological
signals, Physical Review E, 71:021906, 2005
See also
--------
samp_entropy: sample entropy of a time series
"""
N = len(X)
Em = embed_seq(X, 1, M)
A = numpy.tile(Em, (len(Em), 1, 1))
B = numpy.transpose(A, [1, 0, 2])
D = numpy.abs(A - B) # D[i,j,k] = |Em[i][k] - Em[j][k]|
InRange = numpy.max(D, axis=2) <= R
Cm = InRange.mean(axis=0) # Probability that random M-sequences are in range
# M+1-sequences in range iff M-sequences are in range & last values are close
Dp = numpy.abs(numpy.tile(X[M:], (N - M, 1)) - numpy.tile(X[M:], (N - M, 1)).T)
Cmp = numpy.logical_and(Dp <= R, InRange[:-1, :-1]).mean(axis=0)
# Uncomment for old (miscounted) version
#Cm += 1 / (N - M +1); Cm[-1] -= 1 / (N - M + 1)
#Cmp += 1 / (N - M)
Phi_m, Phi_mp = numpy.sum(numpy.log(Cm)), numpy.sum(numpy.log(Cmp))
Ap_En = (Phi_m - Phi_mp) / (N - M)
return Ap_En | python | def ap_entropy(X, M, R):
"""Computer approximate entropy (ApEN) of series X, specified by M and R.
Suppose given time series is X = [x(1), x(2), ... , x(N)]. We first build
embedding matrix Em, of dimension (N-M+1)-by-M, such that the i-th row of
Em is x(i),x(i+1), ... , x(i+M-1). Hence, the embedding lag and dimension
are 1 and M-1 respectively. Such a matrix can be built by calling pyeeg
function as Em = embed_seq(X, 1, M). Then we build matrix Emp, whose only
difference with Em is that the length of each embedding sequence is M + 1
Denote the i-th and j-th row of Em as Em[i] and Em[j]. Their k-th elements
are Em[i][k] and Em[j][k] respectively. The distance between Em[i] and
Em[j] is defined as 1) the maximum difference of their corresponding scalar
components, thus, max(Em[i]-Em[j]), or 2) Euclidean distance. We say two
1-D vectors Em[i] and Em[j] *match* in *tolerance* R, if the distance
between them is no greater than R, thus, max(Em[i]-Em[j]) <= R. Mostly, the
value of R is defined as 20% - 30% of standard deviation of X.
Pick Em[i] as a template, for all j such that 0 < j < N - M + 1, we can
check whether Em[j] matches with Em[i]. Denote the number of Em[j],
which is in the range of Em[i], as k[i], which is the i-th element of the
vector k. The probability that a random row in Em matches Em[i] is
\simga_1^{N-M+1} k[i] / (N - M + 1), thus sum(k)/ (N - M + 1),
denoted as Cm[i].
We repeat the same process on Emp and obtained Cmp[i], but here 0<i<N-M
since the length of each sequence in Emp is M + 1.
The probability that any two embedding sequences in Em match is then
sum(Cm)/ (N - M +1 ). We define Phi_m = sum(log(Cm)) / (N - M + 1) and
Phi_mp = sum(log(Cmp)) / (N - M ).
And the ApEn is defined as Phi_m - Phi_mp.
Notes
-----
Please be aware that self-match is also counted in ApEn.
References
----------
Costa M, Goldberger AL, Peng CK, Multiscale entropy analysis of biological
signals, Physical Review E, 71:021906, 2005
See also
--------
samp_entropy: sample entropy of a time series
"""
N = len(X)
Em = embed_seq(X, 1, M)
A = numpy.tile(Em, (len(Em), 1, 1))
B = numpy.transpose(A, [1, 0, 2])
D = numpy.abs(A - B) # D[i,j,k] = |Em[i][k] - Em[j][k]|
InRange = numpy.max(D, axis=2) <= R
Cm = InRange.mean(axis=0) # Probability that random M-sequences are in range
# M+1-sequences in range iff M-sequences are in range & last values are close
Dp = numpy.abs(numpy.tile(X[M:], (N - M, 1)) - numpy.tile(X[M:], (N - M, 1)).T)
Cmp = numpy.logical_and(Dp <= R, InRange[:-1, :-1]).mean(axis=0)
# Uncomment for old (miscounted) version
#Cm += 1 / (N - M +1); Cm[-1] -= 1 / (N - M + 1)
#Cmp += 1 / (N - M)
Phi_m, Phi_mp = numpy.sum(numpy.log(Cm)), numpy.sum(numpy.log(Cmp))
Ap_En = (Phi_m - Phi_mp) / (N - M)
return Ap_En | [
"def",
"ap_entropy",
"(",
"X",
",",
"M",
",",
"R",
")",
":",
"N",
"=",
"len",
"(",
"X",
")",
"Em",
"=",
"embed_seq",
"(",
"X",
",",
"1",
",",
"M",
")",
"A",
"=",
"numpy",
".",
"tile",
"(",
"Em",
",",
"(",
"len",
"(",
"Em",
")",
",",
"1"... | Computer approximate entropy (ApEN) of series X, specified by M and R.
Suppose given time series is X = [x(1), x(2), ... , x(N)]. We first build
embedding matrix Em, of dimension (N-M+1)-by-M, such that the i-th row of
Em is x(i),x(i+1), ... , x(i+M-1). Hence, the embedding lag and dimension
are 1 and M-1 respectively. Such a matrix can be built by calling pyeeg
function as Em = embed_seq(X, 1, M). Then we build matrix Emp, whose only
difference with Em is that the length of each embedding sequence is M + 1
Denote the i-th and j-th row of Em as Em[i] and Em[j]. Their k-th elements
are Em[i][k] and Em[j][k] respectively. The distance between Em[i] and
Em[j] is defined as 1) the maximum difference of their corresponding scalar
components, thus, max(Em[i]-Em[j]), or 2) Euclidean distance. We say two
1-D vectors Em[i] and Em[j] *match* in *tolerance* R, if the distance
between them is no greater than R, thus, max(Em[i]-Em[j]) <= R. Mostly, the
value of R is defined as 20% - 30% of standard deviation of X.
Pick Em[i] as a template, for all j such that 0 < j < N - M + 1, we can
check whether Em[j] matches with Em[i]. Denote the number of Em[j],
which is in the range of Em[i], as k[i], which is the i-th element of the
vector k. The probability that a random row in Em matches Em[i] is
\simga_1^{N-M+1} k[i] / (N - M + 1), thus sum(k)/ (N - M + 1),
denoted as Cm[i].
We repeat the same process on Emp and obtained Cmp[i], but here 0<i<N-M
since the length of each sequence in Emp is M + 1.
The probability that any two embedding sequences in Em match is then
sum(Cm)/ (N - M +1 ). We define Phi_m = sum(log(Cm)) / (N - M + 1) and
Phi_mp = sum(log(Cmp)) / (N - M ).
And the ApEn is defined as Phi_m - Phi_mp.
Notes
-----
Please be aware that self-match is also counted in ApEn.
References
----------
Costa M, Goldberger AL, Peng CK, Multiscale entropy analysis of biological
signals, Physical Review E, 71:021906, 2005
See also
--------
samp_entropy: sample entropy of a time series | [
"Computer",
"approximate",
"entropy",
"(",
"ApEN",
")",
"of",
"series",
"X",
"specified",
"by",
"M",
"and",
"R",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/pyeeg.py#L466-L536 | train |
mattja/nsim | nsim/analyses1/pyeeg.py | samp_entropy | def samp_entropy(X, M, R):
"""Computer sample entropy (SampEn) of series X, specified by M and R.
SampEn is very close to ApEn.
Suppose given time series is X = [x(1), x(2), ... , x(N)]. We first build
embedding matrix Em, of dimension (N-M+1)-by-M, such that the i-th row of
Em is x(i),x(i+1), ... , x(i+M-1). Hence, the embedding lag and dimension
are 1 and M-1 respectively. Such a matrix can be built by calling pyeeg
function as Em = embed_seq(X, 1, M). Then we build matrix Emp, whose only
difference with Em is that the length of each embedding sequence is M + 1
Denote the i-th and j-th row of Em as Em[i] and Em[j]. Their k-th elements
are Em[i][k] and Em[j][k] respectively. The distance between Em[i] and
Em[j] is defined as 1) the maximum difference of their corresponding scalar
components, thus, max(Em[i]-Em[j]), or 2) Euclidean distance. We say two
1-D vectors Em[i] and Em[j] *match* in *tolerance* R, if the distance
between them is no greater than R, thus, max(Em[i]-Em[j]) <= R. Mostly, the
value of R is defined as 20% - 30% of standard deviation of X.
Pick Em[i] as a template, for all j such that 0 < j < N - M , we can
check whether Em[j] matches with Em[i]. Denote the number of Em[j],
which is in the range of Em[i], as k[i], which is the i-th element of the
vector k.
We repeat the same process on Emp and obtained Cmp[i], 0 < i < N - M.
The SampEn is defined as log(sum(Cm)/sum(Cmp))
References
----------
Costa M, Goldberger AL, Peng C-K, Multiscale entropy analysis of biological
signals, Physical Review E, 71:021906, 2005
See also
--------
ap_entropy: approximate entropy of a time series
"""
N = len(X)
Em = embed_seq(X, 1, M)
A = numpy.tile(Em, (len(Em), 1, 1))
B = numpy.transpose(A, [1, 0, 2])
D = numpy.abs(A - B) # D[i,j,k] = |Em[i][k] - Em[j][k]|
InRange = numpy.max(D, axis=2) <= R
numpy.fill_diagonal(InRange, 0) # Don't count self-matches
Cm = InRange.sum(axis=0) # Probability that random M-sequences are in range
Dp = numpy.abs(numpy.tile(X[M:], (N - M, 1)) - numpy.tile(X[M:], (N - M, 1)).T)
Cmp = numpy.logical_and(Dp <= R, InRange[:-1,:-1]).sum(axis=0)
# Uncomment below for old (miscounted) version
#InRange[numpy.triu_indices(len(InRange))] = 0
#InRange = InRange[:-1,:-2]
#Cm = InRange.sum(axis=0) # Probability that random M-sequences are in range
#Dp = numpy.abs(numpy.tile(X[M:], (N - M, 1)) - numpy.tile(X[M:], (N - M, 1)).T)
#Dp = Dp[:,:-1]
#Cmp = numpy.logical_and(Dp <= R, InRange).sum(axis=0)
# Avoid taking log(0)
Samp_En = numpy.log(numpy.sum(Cm + 1e-100) / numpy.sum(Cmp + 1e-100))
return Samp_En | python | def samp_entropy(X, M, R):
"""Computer sample entropy (SampEn) of series X, specified by M and R.
SampEn is very close to ApEn.
Suppose given time series is X = [x(1), x(2), ... , x(N)]. We first build
embedding matrix Em, of dimension (N-M+1)-by-M, such that the i-th row of
Em is x(i),x(i+1), ... , x(i+M-1). Hence, the embedding lag and dimension
are 1 and M-1 respectively. Such a matrix can be built by calling pyeeg
function as Em = embed_seq(X, 1, M). Then we build matrix Emp, whose only
difference with Em is that the length of each embedding sequence is M + 1
Denote the i-th and j-th row of Em as Em[i] and Em[j]. Their k-th elements
are Em[i][k] and Em[j][k] respectively. The distance between Em[i] and
Em[j] is defined as 1) the maximum difference of their corresponding scalar
components, thus, max(Em[i]-Em[j]), or 2) Euclidean distance. We say two
1-D vectors Em[i] and Em[j] *match* in *tolerance* R, if the distance
between them is no greater than R, thus, max(Em[i]-Em[j]) <= R. Mostly, the
value of R is defined as 20% - 30% of standard deviation of X.
Pick Em[i] as a template, for all j such that 0 < j < N - M , we can
check whether Em[j] matches with Em[i]. Denote the number of Em[j],
which is in the range of Em[i], as k[i], which is the i-th element of the
vector k.
We repeat the same process on Emp and obtained Cmp[i], 0 < i < N - M.
The SampEn is defined as log(sum(Cm)/sum(Cmp))
References
----------
Costa M, Goldberger AL, Peng C-K, Multiscale entropy analysis of biological
signals, Physical Review E, 71:021906, 2005
See also
--------
ap_entropy: approximate entropy of a time series
"""
N = len(X)
Em = embed_seq(X, 1, M)
A = numpy.tile(Em, (len(Em), 1, 1))
B = numpy.transpose(A, [1, 0, 2])
D = numpy.abs(A - B) # D[i,j,k] = |Em[i][k] - Em[j][k]|
InRange = numpy.max(D, axis=2) <= R
numpy.fill_diagonal(InRange, 0) # Don't count self-matches
Cm = InRange.sum(axis=0) # Probability that random M-sequences are in range
Dp = numpy.abs(numpy.tile(X[M:], (N - M, 1)) - numpy.tile(X[M:], (N - M, 1)).T)
Cmp = numpy.logical_and(Dp <= R, InRange[:-1,:-1]).sum(axis=0)
# Uncomment below for old (miscounted) version
#InRange[numpy.triu_indices(len(InRange))] = 0
#InRange = InRange[:-1,:-2]
#Cm = InRange.sum(axis=0) # Probability that random M-sequences are in range
#Dp = numpy.abs(numpy.tile(X[M:], (N - M, 1)) - numpy.tile(X[M:], (N - M, 1)).T)
#Dp = Dp[:,:-1]
#Cmp = numpy.logical_and(Dp <= R, InRange).sum(axis=0)
# Avoid taking log(0)
Samp_En = numpy.log(numpy.sum(Cm + 1e-100) / numpy.sum(Cmp + 1e-100))
return Samp_En | [
"def",
"samp_entropy",
"(",
"X",
",",
"M",
",",
"R",
")",
":",
"N",
"=",
"len",
"(",
"X",
")",
"Em",
"=",
"embed_seq",
"(",
"X",
",",
"1",
",",
"M",
")",
"A",
"=",
"numpy",
".",
"tile",
"(",
"Em",
",",
"(",
"len",
"(",
"Em",
")",
",",
"... | Computer sample entropy (SampEn) of series X, specified by M and R.
SampEn is very close to ApEn.
Suppose given time series is X = [x(1), x(2), ... , x(N)]. We first build
embedding matrix Em, of dimension (N-M+1)-by-M, such that the i-th row of
Em is x(i),x(i+1), ... , x(i+M-1). Hence, the embedding lag and dimension
are 1 and M-1 respectively. Such a matrix can be built by calling pyeeg
function as Em = embed_seq(X, 1, M). Then we build matrix Emp, whose only
difference with Em is that the length of each embedding sequence is M + 1
Denote the i-th and j-th row of Em as Em[i] and Em[j]. Their k-th elements
are Em[i][k] and Em[j][k] respectively. The distance between Em[i] and
Em[j] is defined as 1) the maximum difference of their corresponding scalar
components, thus, max(Em[i]-Em[j]), or 2) Euclidean distance. We say two
1-D vectors Em[i] and Em[j] *match* in *tolerance* R, if the distance
between them is no greater than R, thus, max(Em[i]-Em[j]) <= R. Mostly, the
value of R is defined as 20% - 30% of standard deviation of X.
Pick Em[i] as a template, for all j such that 0 < j < N - M , we can
check whether Em[j] matches with Em[i]. Denote the number of Em[j],
which is in the range of Em[i], as k[i], which is the i-th element of the
vector k.
We repeat the same process on Emp and obtained Cmp[i], 0 < i < N - M.
The SampEn is defined as log(sum(Cm)/sum(Cmp))
References
----------
Costa M, Goldberger AL, Peng C-K, Multiscale entropy analysis of biological
signals, Physical Review E, 71:021906, 2005
See also
--------
ap_entropy: approximate entropy of a time series | [
"Computer",
"sample",
"entropy",
"(",
"SampEn",
")",
"of",
"series",
"X",
"specified",
"by",
"M",
"and",
"R",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/pyeeg.py#L539-L603 | train |
mattja/nsim | nsim/analyses1/pyeeg.py | dfa | def dfa(X, Ave=None, L=None):
"""Compute Detrended Fluctuation Analysis from a time series X and length of
boxes L.
The first step to compute DFA is to integrate the signal. Let original
series be X= [x(1), x(2), ..., x(N)].
The integrated signal Y = [y(1), y(2), ..., y(N)] is obtained as follows
y(k) = \sum_{i=1}^{k}{x(i)-Ave} where Ave is the mean of X.
The second step is to partition/slice/segment the integrated sequence Y
into boxes. At least two boxes are needed for computing DFA. Box sizes are
specified by the L argument of this function. By default, it is from 1/5 of
signal length to one (x-5)-th of the signal length, where x is the nearest
power of 2 from the length of the signal, i.e., 1/16, 1/32, 1/64, 1/128,
...
In each box, a linear least square fitting is employed on data in the box.
Denote the series on fitted line as Yn. Its k-th elements, yn(k),
corresponds to y(k).
For fitting in each box, there is a residue, the sum of squares of all
offsets, difference between actual points and points on fitted line.
F(n) denotes the square root of average total residue in all boxes when box
length is n, thus
Total_Residue = \sum_{k=1}^{N}{(y(k)-yn(k))}
F(n) = \sqrt(Total_Residue/N)
The computing to F(n) is carried out for every box length n. Therefore, a
relationship between n and F(n) can be obtained. In general, F(n) increases
when n increases.
Finally, the relationship between F(n) and n is analyzed. A least square
fitting is performed between log(F(n)) and log(n). The slope of the fitting
line is the DFA value, denoted as Alpha. To white noise, Alpha should be
0.5. Higher level of signal complexity is related to higher Alpha.
Parameters
----------
X:
1-D Python list or numpy array
a time series
Ave:
integer, optional
The average value of the time series
L:
1-D Python list of integers
A list of box size, integers in ascending order
Returns
-------
Alpha:
integer
the result of DFA analysis, thus the slope of fitting line of log(F(n))
vs. log(n). where n is the
Examples
--------
>>> import pyeeg
>>> from numpy.random import randn
>>> print(pyeeg.dfa(randn(4096)))
0.490035110345
Reference
---------
Peng C-K, Havlin S, Stanley HE, Goldberger AL. Quantification of scaling
exponents and crossover phenomena in nonstationary heartbeat time series.
_Chaos_ 1995;5:82-87
Notes
-----
This value depends on the box sizes very much. When the input is a white
noise, this value should be 0.5. But, some choices on box sizes can lead to
the value lower or higher than 0.5, e.g. 0.38 or 0.58.
Based on many test, I set the box sizes from 1/5 of signal length to one
(x-5)-th of the signal length, where x is the nearest power of 2 from the
length of the signal, i.e., 1/16, 1/32, 1/64, 1/128, ...
You may generate a list of box sizes and pass in such a list as a
parameter.
"""
X = numpy.array(X)
if Ave is None:
Ave = numpy.mean(X)
Y = numpy.cumsum(X)
Y -= Ave
if L is None:
L = numpy.floor(len(X) * 1 / (
2 ** numpy.array(list(range(4, int(numpy.log2(len(X))) - 4))))
)
F = numpy.zeros(len(L)) # F(n) of different given box length n
for i in range(0, len(L)):
n = int(L[i]) # for each box length L[i]
if n == 0:
print("time series is too short while the box length is too big")
print("abort")
exit()
for j in range(0, len(X), n): # for each box
if j + n < len(X):
c = list(range(j, j + n))
# coordinates of time in the box
c = numpy.vstack([c, numpy.ones(n)]).T
# the value of data in the box
y = Y[j:j + n]
# add residue in this box
F[i] += numpy.linalg.lstsq(c, y)[1]
F[i] /= ((len(X) / n) * n)
F = numpy.sqrt(F)
Alpha = numpy.linalg.lstsq(numpy.vstack(
[numpy.log(L), numpy.ones(len(L))]
).T, numpy.log(F))[0][0]
return Alpha | python | def dfa(X, Ave=None, L=None):
"""Compute Detrended Fluctuation Analysis from a time series X and length of
boxes L.
The first step to compute DFA is to integrate the signal. Let original
series be X= [x(1), x(2), ..., x(N)].
The integrated signal Y = [y(1), y(2), ..., y(N)] is obtained as follows
y(k) = \sum_{i=1}^{k}{x(i)-Ave} where Ave is the mean of X.
The second step is to partition/slice/segment the integrated sequence Y
into boxes. At least two boxes are needed for computing DFA. Box sizes are
specified by the L argument of this function. By default, it is from 1/5 of
signal length to one (x-5)-th of the signal length, where x is the nearest
power of 2 from the length of the signal, i.e., 1/16, 1/32, 1/64, 1/128,
...
In each box, a linear least square fitting is employed on data in the box.
Denote the series on fitted line as Yn. Its k-th elements, yn(k),
corresponds to y(k).
For fitting in each box, there is a residue, the sum of squares of all
offsets, difference between actual points and points on fitted line.
F(n) denotes the square root of average total residue in all boxes when box
length is n, thus
Total_Residue = \sum_{k=1}^{N}{(y(k)-yn(k))}
F(n) = \sqrt(Total_Residue/N)
The computing to F(n) is carried out for every box length n. Therefore, a
relationship between n and F(n) can be obtained. In general, F(n) increases
when n increases.
Finally, the relationship between F(n) and n is analyzed. A least square
fitting is performed between log(F(n)) and log(n). The slope of the fitting
line is the DFA value, denoted as Alpha. To white noise, Alpha should be
0.5. Higher level of signal complexity is related to higher Alpha.
Parameters
----------
X:
1-D Python list or numpy array
a time series
Ave:
integer, optional
The average value of the time series
L:
1-D Python list of integers
A list of box size, integers in ascending order
Returns
-------
Alpha:
integer
the result of DFA analysis, thus the slope of fitting line of log(F(n))
vs. log(n). where n is the
Examples
--------
>>> import pyeeg
>>> from numpy.random import randn
>>> print(pyeeg.dfa(randn(4096)))
0.490035110345
Reference
---------
Peng C-K, Havlin S, Stanley HE, Goldberger AL. Quantification of scaling
exponents and crossover phenomena in nonstationary heartbeat time series.
_Chaos_ 1995;5:82-87
Notes
-----
This value depends on the box sizes very much. When the input is a white
noise, this value should be 0.5. But, some choices on box sizes can lead to
the value lower or higher than 0.5, e.g. 0.38 or 0.58.
Based on many test, I set the box sizes from 1/5 of signal length to one
(x-5)-th of the signal length, where x is the nearest power of 2 from the
length of the signal, i.e., 1/16, 1/32, 1/64, 1/128, ...
You may generate a list of box sizes and pass in such a list as a
parameter.
"""
X = numpy.array(X)
if Ave is None:
Ave = numpy.mean(X)
Y = numpy.cumsum(X)
Y -= Ave
if L is None:
L = numpy.floor(len(X) * 1 / (
2 ** numpy.array(list(range(4, int(numpy.log2(len(X))) - 4))))
)
F = numpy.zeros(len(L)) # F(n) of different given box length n
for i in range(0, len(L)):
n = int(L[i]) # for each box length L[i]
if n == 0:
print("time series is too short while the box length is too big")
print("abort")
exit()
for j in range(0, len(X), n): # for each box
if j + n < len(X):
c = list(range(j, j + n))
# coordinates of time in the box
c = numpy.vstack([c, numpy.ones(n)]).T
# the value of data in the box
y = Y[j:j + n]
# add residue in this box
F[i] += numpy.linalg.lstsq(c, y)[1]
F[i] /= ((len(X) / n) * n)
F = numpy.sqrt(F)
Alpha = numpy.linalg.lstsq(numpy.vstack(
[numpy.log(L), numpy.ones(len(L))]
).T, numpy.log(F))[0][0]
return Alpha | [
"def",
"dfa",
"(",
"X",
",",
"Ave",
"=",
"None",
",",
"L",
"=",
"None",
")",
":",
"X",
"=",
"numpy",
".",
"array",
"(",
"X",
")",
"if",
"Ave",
"is",
"None",
":",
"Ave",
"=",
"numpy",
".",
"mean",
"(",
"X",
")",
"Y",
"=",
"numpy",
".",
"cu... | Compute Detrended Fluctuation Analysis from a time series X and length of
boxes L.
The first step to compute DFA is to integrate the signal. Let original
series be X= [x(1), x(2), ..., x(N)].
The integrated signal Y = [y(1), y(2), ..., y(N)] is obtained as follows
y(k) = \sum_{i=1}^{k}{x(i)-Ave} where Ave is the mean of X.
The second step is to partition/slice/segment the integrated sequence Y
into boxes. At least two boxes are needed for computing DFA. Box sizes are
specified by the L argument of this function. By default, it is from 1/5 of
signal length to one (x-5)-th of the signal length, where x is the nearest
power of 2 from the length of the signal, i.e., 1/16, 1/32, 1/64, 1/128,
...
In each box, a linear least square fitting is employed on data in the box.
Denote the series on fitted line as Yn. Its k-th elements, yn(k),
corresponds to y(k).
For fitting in each box, there is a residue, the sum of squares of all
offsets, difference between actual points and points on fitted line.
F(n) denotes the square root of average total residue in all boxes when box
length is n, thus
Total_Residue = \sum_{k=1}^{N}{(y(k)-yn(k))}
F(n) = \sqrt(Total_Residue/N)
The computing to F(n) is carried out for every box length n. Therefore, a
relationship between n and F(n) can be obtained. In general, F(n) increases
when n increases.
Finally, the relationship between F(n) and n is analyzed. A least square
fitting is performed between log(F(n)) and log(n). The slope of the fitting
line is the DFA value, denoted as Alpha. To white noise, Alpha should be
0.5. Higher level of signal complexity is related to higher Alpha.
Parameters
----------
X:
1-D Python list or numpy array
a time series
Ave:
integer, optional
The average value of the time series
L:
1-D Python list of integers
A list of box size, integers in ascending order
Returns
-------
Alpha:
integer
the result of DFA analysis, thus the slope of fitting line of log(F(n))
vs. log(n). where n is the
Examples
--------
>>> import pyeeg
>>> from numpy.random import randn
>>> print(pyeeg.dfa(randn(4096)))
0.490035110345
Reference
---------
Peng C-K, Havlin S, Stanley HE, Goldberger AL. Quantification of scaling
exponents and crossover phenomena in nonstationary heartbeat time series.
_Chaos_ 1995;5:82-87
Notes
-----
This value depends on the box sizes very much. When the input is a white
noise, this value should be 0.5. But, some choices on box sizes can lead to
the value lower or higher than 0.5, e.g. 0.38 or 0.58.
Based on many test, I set the box sizes from 1/5 of signal length to one
(x-5)-th of the signal length, where x is the nearest power of 2 from the
length of the signal, i.e., 1/16, 1/32, 1/64, 1/128, ...
You may generate a list of box sizes and pass in such a list as a
parameter. | [
"Compute",
"Detrended",
"Fluctuation",
"Analysis",
"from",
"a",
"time",
"series",
"X",
"and",
"length",
"of",
"boxes",
"L",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/pyeeg.py#L606-L733 | train |
mattja/nsim | nsim/analyses1/pyeeg.py | permutation_entropy | def permutation_entropy(x, n, tau):
"""Compute Permutation Entropy of a given time series x, specified by
permutation order n and embedding lag tau.
Parameters
----------
x
list
a time series
n
integer
Permutation order
tau
integer
Embedding lag
Returns
----------
PE
float
permutation entropy
Notes
----------
Suppose the given time series is X =[x(1),x(2),x(3),...,x(N)].
We first build embedding matrix Em, of dimension(n*N-n+1),
such that the ith row of Em is x(i),x(i+1),..x(i+n-1). Hence
the embedding lag and the embedding dimension are 1 and n
respectively. We build this matrix from a given time series,
X, by calling pyEEg function embed_seq(x,1,n).
We then transform each row of the embedding matrix into
a new sequence, comprising a set of integers in range of 0,..,n-1.
The order in which the integers are placed within a row is the
same as those of the original elements:0 is placed where the smallest
element of the row was and n-1 replaces the largest element of the row.
To calculate the Permutation entropy, we calculate the entropy of PeSeq.
In doing so, we count the number of occurrences of each permutation
in PeSeq and write it in a sequence, RankMat. We then use this sequence to
calculate entropy by using Shannon's entropy formula.
Permutation entropy is usually calculated with n in range of 3 and 7.
References
----------
Bandt, Christoph, and Bernd Pompe. "Permutation entropy: a natural
complexity measure for time series." Physical Review Letters 88.17
(2002): 174102.
Examples
----------
>>> import pyeeg
>>> x = [1,2,4,5,12,3,4,5]
>>> pyeeg.permutation_entropy(x,5,1)
2.0
"""
PeSeq = []
Em = embed_seq(x, tau, n)
for i in range(0, len(Em)):
r = []
z = []
for j in range(0, len(Em[i])):
z.append(Em[i][j])
for j in range(0, len(Em[i])):
z.sort()
r.append(z.index(Em[i][j]))
z[z.index(Em[i][j])] = -1
PeSeq.append(r)
RankMat = []
while len(PeSeq) > 0:
RankMat.append(PeSeq.count(PeSeq[0]))
x = PeSeq[0]
for j in range(0, PeSeq.count(PeSeq[0])):
PeSeq.pop(PeSeq.index(x))
RankMat = numpy.array(RankMat)
RankMat = numpy.true_divide(RankMat, RankMat.sum())
EntropyMat = numpy.multiply(numpy.log2(RankMat), RankMat)
PE = -1 * EntropyMat.sum()
return PE | python | def permutation_entropy(x, n, tau):
"""Compute Permutation Entropy of a given time series x, specified by
permutation order n and embedding lag tau.
Parameters
----------
x
list
a time series
n
integer
Permutation order
tau
integer
Embedding lag
Returns
----------
PE
float
permutation entropy
Notes
----------
Suppose the given time series is X =[x(1),x(2),x(3),...,x(N)].
We first build embedding matrix Em, of dimension(n*N-n+1),
such that the ith row of Em is x(i),x(i+1),..x(i+n-1). Hence
the embedding lag and the embedding dimension are 1 and n
respectively. We build this matrix from a given time series,
X, by calling pyEEg function embed_seq(x,1,n).
We then transform each row of the embedding matrix into
a new sequence, comprising a set of integers in range of 0,..,n-1.
The order in which the integers are placed within a row is the
same as those of the original elements:0 is placed where the smallest
element of the row was and n-1 replaces the largest element of the row.
To calculate the Permutation entropy, we calculate the entropy of PeSeq.
In doing so, we count the number of occurrences of each permutation
in PeSeq and write it in a sequence, RankMat. We then use this sequence to
calculate entropy by using Shannon's entropy formula.
Permutation entropy is usually calculated with n in range of 3 and 7.
References
----------
Bandt, Christoph, and Bernd Pompe. "Permutation entropy: a natural
complexity measure for time series." Physical Review Letters 88.17
(2002): 174102.
Examples
----------
>>> import pyeeg
>>> x = [1,2,4,5,12,3,4,5]
>>> pyeeg.permutation_entropy(x,5,1)
2.0
"""
PeSeq = []
Em = embed_seq(x, tau, n)
for i in range(0, len(Em)):
r = []
z = []
for j in range(0, len(Em[i])):
z.append(Em[i][j])
for j in range(0, len(Em[i])):
z.sort()
r.append(z.index(Em[i][j]))
z[z.index(Em[i][j])] = -1
PeSeq.append(r)
RankMat = []
while len(PeSeq) > 0:
RankMat.append(PeSeq.count(PeSeq[0]))
x = PeSeq[0]
for j in range(0, PeSeq.count(PeSeq[0])):
PeSeq.pop(PeSeq.index(x))
RankMat = numpy.array(RankMat)
RankMat = numpy.true_divide(RankMat, RankMat.sum())
EntropyMat = numpy.multiply(numpy.log2(RankMat), RankMat)
PE = -1 * EntropyMat.sum()
return PE | [
"def",
"permutation_entropy",
"(",
"x",
",",
"n",
",",
"tau",
")",
":",
"PeSeq",
"=",
"[",
"]",
"Em",
"=",
"embed_seq",
"(",
"x",
",",
"tau",
",",
"n",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"Em",
")",
")",
":",
"r",
"="... | Compute Permutation Entropy of a given time series x, specified by
permutation order n and embedding lag tau.
Parameters
----------
x
list
a time series
n
integer
Permutation order
tau
integer
Embedding lag
Returns
----------
PE
float
permutation entropy
Notes
----------
Suppose the given time series is X =[x(1),x(2),x(3),...,x(N)].
We first build embedding matrix Em, of dimension(n*N-n+1),
such that the ith row of Em is x(i),x(i+1),..x(i+n-1). Hence
the embedding lag and the embedding dimension are 1 and n
respectively. We build this matrix from a given time series,
X, by calling pyEEg function embed_seq(x,1,n).
We then transform each row of the embedding matrix into
a new sequence, comprising a set of integers in range of 0,..,n-1.
The order in which the integers are placed within a row is the
same as those of the original elements:0 is placed where the smallest
element of the row was and n-1 replaces the largest element of the row.
To calculate the Permutation entropy, we calculate the entropy of PeSeq.
In doing so, we count the number of occurrences of each permutation
in PeSeq and write it in a sequence, RankMat. We then use this sequence to
calculate entropy by using Shannon's entropy formula.
Permutation entropy is usually calculated with n in range of 3 and 7.
References
----------
Bandt, Christoph, and Bernd Pompe. "Permutation entropy: a natural
complexity measure for time series." Physical Review Letters 88.17
(2002): 174102.
Examples
----------
>>> import pyeeg
>>> x = [1,2,4,5,12,3,4,5]
>>> pyeeg.permutation_entropy(x,5,1)
2.0 | [
"Compute",
"Permutation",
"Entropy",
"of",
"a",
"given",
"time",
"series",
"x",
"specified",
"by",
"permutation",
"order",
"n",
"and",
"embedding",
"lag",
"tau",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/pyeeg.py#L736-L834 | train |
mattja/nsim | nsim/analyses1/pyeeg.py | information_based_similarity | def information_based_similarity(x, y, n):
"""Calculates the information based similarity of two time series x
and y.
Parameters
----------
x
list
a time series
y
list
a time series
n
integer
word order
Returns
----------
IBS
float
Information based similarity
Notes
----------
Information based similarity is a measure of dissimilarity between
two time series. Let the sequences be x and y. Each sequence is first
replaced by its first ordered difference(Encoder). Calculating the
Heaviside of the resulting sequences, we get two binary sequences,
SymbolicSeq. Using PyEEG function, embed_seq, with lag of 1 and dimension
of n, we build an embedding matrix from the latter sequence.
Each row of this embedding matrix is called a word. Information based
similarity measures the distance between two sequence by comparing the
rank of words in the sequences; more explicitly, the distance, D, is
calculated using the formula:
"1/2^(n-1) * sum( abs(Rank(0)(k)-R(1)(k)) * F(k) )" where Rank(0)(k)
and Rank(1)(k) are the rank of the k-th word in each of the input
sequences. F(k) is a modified "shannon" weighing function that increases
the weight of each word in the calculations when they are more frequent in
the sequences.
It is advisable to calculate IBS for numerical sequences using 8-tupple
words.
References
----------
Yang AC, Hseu SS, Yien HW, Goldberger AL, Peng CK: Linguistic analysis of
the human heartbeat using frequency and rank order statistics. Phys Rev
Lett 2003, 90: 108103
Examples
----------
>>> import pyeeg
>>> from numpy.random import randn
>>> x = randn(100)
>>> y = randn(100)
>>> pyeeg.information_based_similarity(x,y,8)
0.64512947848249214
"""
Wordlist = []
Space = [[0, 0], [0, 1], [1, 0], [1, 1]]
Sample = [0, 1]
if (n == 1):
Wordlist = Sample
if (n == 2):
Wordlist = Space
elif (n > 1):
Wordlist = Space
Buff = []
for k in range(0, n - 2):
Buff = []
for i in range(0, len(Wordlist)):
Buff.append(tuple(Wordlist[i]))
Buff = tuple(Buff)
Wordlist = []
for i in range(0, len(Buff)):
for j in range(0, len(Sample)):
Wordlist.append(list(Buff[i]))
Wordlist[len(Wordlist) - 1].append(Sample[j])
Wordlist.sort()
Input = [[], []]
Input[0] = x
Input[1] = y
SymbolicSeq = [[], []]
for i in range(0, 2):
Encoder = numpy.diff(Input[i])
for j in range(0, len(Input[i]) - 1):
if(Encoder[j] > 0):
SymbolicSeq[i].append(1)
else:
SymbolicSeq[i].append(0)
Wm = []
Wm.append(embed_seq(SymbolicSeq[0], 1, n).tolist())
Wm.append(embed_seq(SymbolicSeq[1], 1, n).tolist())
Count = [[], []]
for i in range(0, 2):
for k in range(0, len(Wordlist)):
Count[i].append(Wm[i].count(Wordlist[k]))
Prob = [[], []]
for i in range(0, 2):
Sigma = 0
for j in range(0, len(Wordlist)):
Sigma += Count[i][j]
for k in range(0, len(Wordlist)):
Prob[i].append(numpy.true_divide(Count[i][k], Sigma))
Entropy = [[], []]
for i in range(0, 2):
for k in range(0, len(Wordlist)):
if (Prob[i][k] == 0):
Entropy[i].append(0)
else:
Entropy[i].append(Prob[i][k] * (numpy.log2(Prob[i][k])))
Rank = [[], []]
Buff = [[], []]
Buff[0] = tuple(Count[0])
Buff[1] = tuple(Count[1])
for i in range(0, 2):
Count[i].sort()
Count[i].reverse()
for k in range(0, len(Wordlist)):
Rank[i].append(Count[i].index(Buff[i][k]))
Count[i][Count[i].index(Buff[i][k])] = -1
IBS = 0
Z = 0
n = 0
for k in range(0, len(Wordlist)):
if ((Buff[0][k] != 0) & (Buff[1][k] != 0)):
F = -Entropy[0][k] - Entropy[1][k]
IBS += numpy.multiply(numpy.absolute(Rank[0][k] - Rank[1][k]), F)
Z += F
else:
n += 1
IBS = numpy.true_divide(IBS, Z)
IBS = numpy.true_divide(IBS, len(Wordlist) - n)
return IBS | python | def information_based_similarity(x, y, n):
"""Calculates the information based similarity of two time series x
and y.
Parameters
----------
x
list
a time series
y
list
a time series
n
integer
word order
Returns
----------
IBS
float
Information based similarity
Notes
----------
Information based similarity is a measure of dissimilarity between
two time series. Let the sequences be x and y. Each sequence is first
replaced by its first ordered difference(Encoder). Calculating the
Heaviside of the resulting sequences, we get two binary sequences,
SymbolicSeq. Using PyEEG function, embed_seq, with lag of 1 and dimension
of n, we build an embedding matrix from the latter sequence.
Each row of this embedding matrix is called a word. Information based
similarity measures the distance between two sequence by comparing the
rank of words in the sequences; more explicitly, the distance, D, is
calculated using the formula:
"1/2^(n-1) * sum( abs(Rank(0)(k)-R(1)(k)) * F(k) )" where Rank(0)(k)
and Rank(1)(k) are the rank of the k-th word in each of the input
sequences. F(k) is a modified "shannon" weighing function that increases
the weight of each word in the calculations when they are more frequent in
the sequences.
It is advisable to calculate IBS for numerical sequences using 8-tupple
words.
References
----------
Yang AC, Hseu SS, Yien HW, Goldberger AL, Peng CK: Linguistic analysis of
the human heartbeat using frequency and rank order statistics. Phys Rev
Lett 2003, 90: 108103
Examples
----------
>>> import pyeeg
>>> from numpy.random import randn
>>> x = randn(100)
>>> y = randn(100)
>>> pyeeg.information_based_similarity(x,y,8)
0.64512947848249214
"""
Wordlist = []
Space = [[0, 0], [0, 1], [1, 0], [1, 1]]
Sample = [0, 1]
if (n == 1):
Wordlist = Sample
if (n == 2):
Wordlist = Space
elif (n > 1):
Wordlist = Space
Buff = []
for k in range(0, n - 2):
Buff = []
for i in range(0, len(Wordlist)):
Buff.append(tuple(Wordlist[i]))
Buff = tuple(Buff)
Wordlist = []
for i in range(0, len(Buff)):
for j in range(0, len(Sample)):
Wordlist.append(list(Buff[i]))
Wordlist[len(Wordlist) - 1].append(Sample[j])
Wordlist.sort()
Input = [[], []]
Input[0] = x
Input[1] = y
SymbolicSeq = [[], []]
for i in range(0, 2):
Encoder = numpy.diff(Input[i])
for j in range(0, len(Input[i]) - 1):
if(Encoder[j] > 0):
SymbolicSeq[i].append(1)
else:
SymbolicSeq[i].append(0)
Wm = []
Wm.append(embed_seq(SymbolicSeq[0], 1, n).tolist())
Wm.append(embed_seq(SymbolicSeq[1], 1, n).tolist())
Count = [[], []]
for i in range(0, 2):
for k in range(0, len(Wordlist)):
Count[i].append(Wm[i].count(Wordlist[k]))
Prob = [[], []]
for i in range(0, 2):
Sigma = 0
for j in range(0, len(Wordlist)):
Sigma += Count[i][j]
for k in range(0, len(Wordlist)):
Prob[i].append(numpy.true_divide(Count[i][k], Sigma))
Entropy = [[], []]
for i in range(0, 2):
for k in range(0, len(Wordlist)):
if (Prob[i][k] == 0):
Entropy[i].append(0)
else:
Entropy[i].append(Prob[i][k] * (numpy.log2(Prob[i][k])))
Rank = [[], []]
Buff = [[], []]
Buff[0] = tuple(Count[0])
Buff[1] = tuple(Count[1])
for i in range(0, 2):
Count[i].sort()
Count[i].reverse()
for k in range(0, len(Wordlist)):
Rank[i].append(Count[i].index(Buff[i][k]))
Count[i][Count[i].index(Buff[i][k])] = -1
IBS = 0
Z = 0
n = 0
for k in range(0, len(Wordlist)):
if ((Buff[0][k] != 0) & (Buff[1][k] != 0)):
F = -Entropy[0][k] - Entropy[1][k]
IBS += numpy.multiply(numpy.absolute(Rank[0][k] - Rank[1][k]), F)
Z += F
else:
n += 1
IBS = numpy.true_divide(IBS, Z)
IBS = numpy.true_divide(IBS, len(Wordlist) - n)
return IBS | [
"def",
"information_based_similarity",
"(",
"x",
",",
"y",
",",
"n",
")",
":",
"Wordlist",
"=",
"[",
"]",
"Space",
"=",
"[",
"[",
"0",
",",
"0",
"]",
",",
"[",
"0",
",",
"1",
"]",
",",
"[",
"1",
",",
"0",
"]",
",",
"[",
"1",
",",
"1",
"]"... | Calculates the information based similarity of two time series x
and y.
Parameters
----------
x
list
a time series
y
list
a time series
n
integer
word order
Returns
----------
IBS
float
Information based similarity
Notes
----------
Information based similarity is a measure of dissimilarity between
two time series. Let the sequences be x and y. Each sequence is first
replaced by its first ordered difference(Encoder). Calculating the
Heaviside of the resulting sequences, we get two binary sequences,
SymbolicSeq. Using PyEEG function, embed_seq, with lag of 1 and dimension
of n, we build an embedding matrix from the latter sequence.
Each row of this embedding matrix is called a word. Information based
similarity measures the distance between two sequence by comparing the
rank of words in the sequences; more explicitly, the distance, D, is
calculated using the formula:
"1/2^(n-1) * sum( abs(Rank(0)(k)-R(1)(k)) * F(k) )" where Rank(0)(k)
and Rank(1)(k) are the rank of the k-th word in each of the input
sequences. F(k) is a modified "shannon" weighing function that increases
the weight of each word in the calculations when they are more frequent in
the sequences.
It is advisable to calculate IBS for numerical sequences using 8-tupple
words.
References
----------
Yang AC, Hseu SS, Yien HW, Goldberger AL, Peng CK: Linguistic analysis of
the human heartbeat using frequency and rank order statistics. Phys Rev
Lett 2003, 90: 108103
Examples
----------
>>> import pyeeg
>>> from numpy.random import randn
>>> x = randn(100)
>>> y = randn(100)
>>> pyeeg.information_based_similarity(x,y,8)
0.64512947848249214 | [
"Calculates",
"the",
"information",
"based",
"similarity",
"of",
"two",
"time",
"series",
"x",
"and",
"y",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/pyeeg.py#L837-L1004 | train |
mattja/nsim | nsim/analyses1/pyeeg.py | LLE | def LLE(x, tau, n, T, fs):
"""Calculate largest Lyauponov exponent of a given time series x using
Rosenstein algorithm.
Parameters
----------
x
list
a time series
n
integer
embedding dimension
tau
integer
Embedding lag
fs
integer
Sampling frequency
T
integer
Mean period
Returns
----------
Lexp
float
Largest Lyapunov Exponent
Notes
----------
A n-dimensional trajectory is first reconstructed from the observed data by
use of embedding delay of tau, using pyeeg function, embed_seq(x, tau, n).
Algorithm then searches for nearest neighbour of each point on the
reconstructed trajectory; temporal separation of nearest neighbours must be
greater than mean period of the time series: the mean period can be
estimated as the reciprocal of the mean frequency in power spectrum
Each pair of nearest neighbours is assumed to diverge exponentially at a
rate given by largest Lyapunov exponent. Now having a collection of
neighbours, a least square fit to the average exponential divergence is
calculated. The slope of this line gives an accurate estimate of the
largest Lyapunov exponent.
References
----------
Rosenstein, Michael T., James J. Collins, and Carlo J. De Luca. "A
practical method for calculating largest Lyapunov exponents from small data
sets." Physica D: Nonlinear Phenomena 65.1 (1993): 117-134.
Examples
----------
>>> import pyeeg
>>> X = numpy.array([3,4,1,2,4,51,4,32,24,12,3,45])
>>> pyeeg.LLE(X,2,4,1,1)
>>> 0.18771136179353307
"""
Em = embed_seq(x, tau, n)
M = len(Em)
A = numpy.tile(Em, (len(Em), 1, 1))
B = numpy.transpose(A, [1, 0, 2])
square_dists = (A - B) ** 2 # square_dists[i,j,k] = (Em[i][k]-Em[j][k])^2
D = numpy.sqrt(square_dists[:,:,:].sum(axis=2)) # D[i,j] = ||Em[i]-Em[j]||_2
# Exclude elements within T of the diagonal
band = numpy.tri(D.shape[0], k=T) - numpy.tri(D.shape[0], k=-T-1)
band[band == 1] = numpy.inf
neighbors = (D + band).argmin(axis=0) # nearest neighbors more than T steps away
# in_bounds[i,j] = (i+j <= M-1 and i+neighbors[j] <= M-1)
inc = numpy.tile(numpy.arange(M), (M, 1))
row_inds = (numpy.tile(numpy.arange(M), (M, 1)).T + inc)
col_inds = (numpy.tile(neighbors, (M, 1)) + inc.T)
in_bounds = numpy.logical_and(row_inds <= M - 1, col_inds <= M - 1)
# Uncomment for old (miscounted) version
#in_bounds = numpy.logical_and(row_inds < M - 1, col_inds < M - 1)
row_inds[-in_bounds] = 0
col_inds[-in_bounds] = 0
# neighbor_dists[i,j] = ||Em[i+j]-Em[i+neighbors[j]]||_2
neighbor_dists = numpy.ma.MaskedArray(D[row_inds, col_inds], -in_bounds)
J = (-neighbor_dists.mask).sum(axis=1) # number of in-bounds indices by row
# Set invalid (zero) values to 1; log(1) = 0 so sum is unchanged
neighbor_dists[neighbor_dists == 0] = 1
d_ij = numpy.sum(numpy.log(neighbor_dists.data), axis=1)
mean_d = d_ij[J > 0] / J[J > 0]
x = numpy.arange(len(mean_d))
X = numpy.vstack((x, numpy.ones(len(mean_d)))).T
[m, c] = numpy.linalg.lstsq(X, mean_d)[0]
Lexp = fs * m
return Lexp | python | def LLE(x, tau, n, T, fs):
"""Calculate largest Lyauponov exponent of a given time series x using
Rosenstein algorithm.
Parameters
----------
x
list
a time series
n
integer
embedding dimension
tau
integer
Embedding lag
fs
integer
Sampling frequency
T
integer
Mean period
Returns
----------
Lexp
float
Largest Lyapunov Exponent
Notes
----------
A n-dimensional trajectory is first reconstructed from the observed data by
use of embedding delay of tau, using pyeeg function, embed_seq(x, tau, n).
Algorithm then searches for nearest neighbour of each point on the
reconstructed trajectory; temporal separation of nearest neighbours must be
greater than mean period of the time series: the mean period can be
estimated as the reciprocal of the mean frequency in power spectrum
Each pair of nearest neighbours is assumed to diverge exponentially at a
rate given by largest Lyapunov exponent. Now having a collection of
neighbours, a least square fit to the average exponential divergence is
calculated. The slope of this line gives an accurate estimate of the
largest Lyapunov exponent.
References
----------
Rosenstein, Michael T., James J. Collins, and Carlo J. De Luca. "A
practical method for calculating largest Lyapunov exponents from small data
sets." Physica D: Nonlinear Phenomena 65.1 (1993): 117-134.
Examples
----------
>>> import pyeeg
>>> X = numpy.array([3,4,1,2,4,51,4,32,24,12,3,45])
>>> pyeeg.LLE(X,2,4,1,1)
>>> 0.18771136179353307
"""
Em = embed_seq(x, tau, n)
M = len(Em)
A = numpy.tile(Em, (len(Em), 1, 1))
B = numpy.transpose(A, [1, 0, 2])
square_dists = (A - B) ** 2 # square_dists[i,j,k] = (Em[i][k]-Em[j][k])^2
D = numpy.sqrt(square_dists[:,:,:].sum(axis=2)) # D[i,j] = ||Em[i]-Em[j]||_2
# Exclude elements within T of the diagonal
band = numpy.tri(D.shape[0], k=T) - numpy.tri(D.shape[0], k=-T-1)
band[band == 1] = numpy.inf
neighbors = (D + band).argmin(axis=0) # nearest neighbors more than T steps away
# in_bounds[i,j] = (i+j <= M-1 and i+neighbors[j] <= M-1)
inc = numpy.tile(numpy.arange(M), (M, 1))
row_inds = (numpy.tile(numpy.arange(M), (M, 1)).T + inc)
col_inds = (numpy.tile(neighbors, (M, 1)) + inc.T)
in_bounds = numpy.logical_and(row_inds <= M - 1, col_inds <= M - 1)
# Uncomment for old (miscounted) version
#in_bounds = numpy.logical_and(row_inds < M - 1, col_inds < M - 1)
row_inds[-in_bounds] = 0
col_inds[-in_bounds] = 0
# neighbor_dists[i,j] = ||Em[i+j]-Em[i+neighbors[j]]||_2
neighbor_dists = numpy.ma.MaskedArray(D[row_inds, col_inds], -in_bounds)
J = (-neighbor_dists.mask).sum(axis=1) # number of in-bounds indices by row
# Set invalid (zero) values to 1; log(1) = 0 so sum is unchanged
neighbor_dists[neighbor_dists == 0] = 1
d_ij = numpy.sum(numpy.log(neighbor_dists.data), axis=1)
mean_d = d_ij[J > 0] / J[J > 0]
x = numpy.arange(len(mean_d))
X = numpy.vstack((x, numpy.ones(len(mean_d)))).T
[m, c] = numpy.linalg.lstsq(X, mean_d)[0]
Lexp = fs * m
return Lexp | [
"def",
"LLE",
"(",
"x",
",",
"tau",
",",
"n",
",",
"T",
",",
"fs",
")",
":",
"Em",
"=",
"embed_seq",
"(",
"x",
",",
"tau",
",",
"n",
")",
"M",
"=",
"len",
"(",
"Em",
")",
"A",
"=",
"numpy",
".",
"tile",
"(",
"Em",
",",
"(",
"len",
"(",
... | Calculate largest Lyauponov exponent of a given time series x using
Rosenstein algorithm.
Parameters
----------
x
list
a time series
n
integer
embedding dimension
tau
integer
Embedding lag
fs
integer
Sampling frequency
T
integer
Mean period
Returns
----------
Lexp
float
Largest Lyapunov Exponent
Notes
----------
A n-dimensional trajectory is first reconstructed from the observed data by
use of embedding delay of tau, using pyeeg function, embed_seq(x, tau, n).
Algorithm then searches for nearest neighbour of each point on the
reconstructed trajectory; temporal separation of nearest neighbours must be
greater than mean period of the time series: the mean period can be
estimated as the reciprocal of the mean frequency in power spectrum
Each pair of nearest neighbours is assumed to diverge exponentially at a
rate given by largest Lyapunov exponent. Now having a collection of
neighbours, a least square fit to the average exponential divergence is
calculated. The slope of this line gives an accurate estimate of the
largest Lyapunov exponent.
References
----------
Rosenstein, Michael T., James J. Collins, and Carlo J. De Luca. "A
practical method for calculating largest Lyapunov exponents from small data
sets." Physica D: Nonlinear Phenomena 65.1 (1993): 117-134.
Examples
----------
>>> import pyeeg
>>> X = numpy.array([3,4,1,2,4,51,4,32,24,12,3,45])
>>> pyeeg.LLE(X,2,4,1,1)
>>> 0.18771136179353307 | [
"Calculate",
"largest",
"Lyauponov",
"exponent",
"of",
"a",
"given",
"time",
"series",
"x",
"using",
"Rosenstein",
"algorithm",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/pyeeg.py#L1007-L1112 | train |
mattja/nsim | nsim/analyses1/phase.py | mod2pi | def mod2pi(ts):
"""For a timeseries where all variables represent phases (in radians),
return an equivalent timeseries where all values are in the range (-pi, pi]
"""
return np.pi - np.mod(np.pi - ts, 2*np.pi) | python | def mod2pi(ts):
"""For a timeseries where all variables represent phases (in radians),
return an equivalent timeseries where all values are in the range (-pi, pi]
"""
return np.pi - np.mod(np.pi - ts, 2*np.pi) | [
"def",
"mod2pi",
"(",
"ts",
")",
":",
"return",
"np",
".",
"pi",
"-",
"np",
".",
"mod",
"(",
"np",
".",
"pi",
"-",
"ts",
",",
"2",
"*",
"np",
".",
"pi",
")"
] | For a timeseries where all variables represent phases (in radians),
return an equivalent timeseries where all values are in the range (-pi, pi] | [
"For",
"a",
"timeseries",
"where",
"all",
"variables",
"represent",
"phases",
"(",
"in",
"radians",
")",
"return",
"an",
"equivalent",
"timeseries",
"where",
"all",
"values",
"are",
"in",
"the",
"range",
"(",
"-",
"pi",
"pi",
"]"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/phase.py#L17-L21 | train |
mattja/nsim | nsim/analyses1/phase.py | phase_crossings | def phase_crossings(ts, phi=0.0):
"""For a single variable timeseries representing the phase of an oscillator,
find the times at which the phase crosses angle phi,
with the condition that the phase must visit phi+pi between crossings.
(Thus if noise causes the phase to wander back and forth across angle phi
without the oscillator doing a full revolution, then this is recorded as
a single crossing event, giving the time of the earliest arrival.)
If the timeseries begins (or ends) exactly at phi, then time zero
(or the ending time) is also included as a crossing event,
so that the boundaries of the first and last oscillations are included.
If the actual crossing time falls between two time steps, linear
interpolation is used to estimate the crossing time.
Arguments:
ts: Timeseries (single variable)
The timeseries of an angle variable (radians)
phi (float): Critical phase angle (radians) at which to report crossings.
Returns:
array of float
"""
#TODO support multivariate time series
ts = ts.squeeze()
if ts.ndim is not 1:
raise ValueError('Currently can only use on single variable timeseries')
# Interpret the timeseries as belonging to a phase variable.
# Map its range to the interval (-pi, pi] with critical angle at zero:
ts = mod2pi(ts - phi)
tsa = ts[0:-1]
tsb = ts[1:]
p2 = np.pi/2
# Time indices where phase crosses or reaches zero from below or above
zc = np.nonzero((tsa > -p2) & (tsa < 0) & (tsb >= 0) & (tsb < p2) |
(tsa < p2) & (tsa > 0) & (tsb <= 0) & (tsb > -p2))[0] + 1
# Estimate crossing time interpolated linearly within a single time step
va = ts[zc-1]
vb = ts[zc]
ct = (np.abs(vb)*ts.tspan[zc-1] +
np.abs(va)*ts.tspan[zc]) / np.abs(vb - va) # denominator always !=0
# Also include starting time if we started exactly at zero
if ts[0] == 0.0:
zc = np.r_[np.array([0]), zc]
ct = np.r_[np.array([ts.tspan[0]]), ct]
# Time indices where phase crosses pi
pc = np.nonzero((tsa > p2) & (tsb < -p2) | (tsa < -p2) & (tsb > p2))[0] + 1
# Select those zero-crossings separated by at least one pi-crossing
splice = np.searchsorted(pc, zc)
which_zc = np.r_[np.array([0]), np.nonzero(splice[0:-1] - splice[1:])[0] +1]
if ct.shape[0] is 0:
return ct
else:
return ct[which_zc] | python | def phase_crossings(ts, phi=0.0):
"""For a single variable timeseries representing the phase of an oscillator,
find the times at which the phase crosses angle phi,
with the condition that the phase must visit phi+pi between crossings.
(Thus if noise causes the phase to wander back and forth across angle phi
without the oscillator doing a full revolution, then this is recorded as
a single crossing event, giving the time of the earliest arrival.)
If the timeseries begins (or ends) exactly at phi, then time zero
(or the ending time) is also included as a crossing event,
so that the boundaries of the first and last oscillations are included.
If the actual crossing time falls between two time steps, linear
interpolation is used to estimate the crossing time.
Arguments:
ts: Timeseries (single variable)
The timeseries of an angle variable (radians)
phi (float): Critical phase angle (radians) at which to report crossings.
Returns:
array of float
"""
#TODO support multivariate time series
ts = ts.squeeze()
if ts.ndim is not 1:
raise ValueError('Currently can only use on single variable timeseries')
# Interpret the timeseries as belonging to a phase variable.
# Map its range to the interval (-pi, pi] with critical angle at zero:
ts = mod2pi(ts - phi)
tsa = ts[0:-1]
tsb = ts[1:]
p2 = np.pi/2
# Time indices where phase crosses or reaches zero from below or above
zc = np.nonzero((tsa > -p2) & (tsa < 0) & (tsb >= 0) & (tsb < p2) |
(tsa < p2) & (tsa > 0) & (tsb <= 0) & (tsb > -p2))[0] + 1
# Estimate crossing time interpolated linearly within a single time step
va = ts[zc-1]
vb = ts[zc]
ct = (np.abs(vb)*ts.tspan[zc-1] +
np.abs(va)*ts.tspan[zc]) / np.abs(vb - va) # denominator always !=0
# Also include starting time if we started exactly at zero
if ts[0] == 0.0:
zc = np.r_[np.array([0]), zc]
ct = np.r_[np.array([ts.tspan[0]]), ct]
# Time indices where phase crosses pi
pc = np.nonzero((tsa > p2) & (tsb < -p2) | (tsa < -p2) & (tsb > p2))[0] + 1
# Select those zero-crossings separated by at least one pi-crossing
splice = np.searchsorted(pc, zc)
which_zc = np.r_[np.array([0]), np.nonzero(splice[0:-1] - splice[1:])[0] +1]
if ct.shape[0] is 0:
return ct
else:
return ct[which_zc] | [
"def",
"phase_crossings",
"(",
"ts",
",",
"phi",
"=",
"0.0",
")",
":",
"#TODO support multivariate time series",
"ts",
"=",
"ts",
".",
"squeeze",
"(",
")",
"if",
"ts",
".",
"ndim",
"is",
"not",
"1",
":",
"raise",
"ValueError",
"(",
"'Currently can only use o... | For a single variable timeseries representing the phase of an oscillator,
find the times at which the phase crosses angle phi,
with the condition that the phase must visit phi+pi between crossings.
(Thus if noise causes the phase to wander back and forth across angle phi
without the oscillator doing a full revolution, then this is recorded as
a single crossing event, giving the time of the earliest arrival.)
If the timeseries begins (or ends) exactly at phi, then time zero
(or the ending time) is also included as a crossing event,
so that the boundaries of the first and last oscillations are included.
If the actual crossing time falls between two time steps, linear
interpolation is used to estimate the crossing time.
Arguments:
ts: Timeseries (single variable)
The timeseries of an angle variable (radians)
phi (float): Critical phase angle (radians) at which to report crossings.
Returns:
array of float | [
"For",
"a",
"single",
"variable",
"timeseries",
"representing",
"the",
"phase",
"of",
"an",
"oscillator",
"find",
"the",
"times",
"at",
"which",
"the",
"phase",
"crosses",
"angle",
"phi",
"with",
"the",
"condition",
"that",
"the",
"phase",
"must",
"visit",
"... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/phase.py#L24-L82 | train |
mattja/nsim | nsim/analyses1/phase.py | periods | def periods(ts, phi=0.0):
"""For a single variable timeseries representing the phase of an oscillator,
measure the period of each successive oscillation.
An individual oscillation is defined to start and end when the phase
passes phi (by default zero) after completing a full cycle.
If the timeseries begins (or ends) exactly at phi, then the first
(or last) oscillation will be included.
Arguments:
ts: Timeseries (single variable)
The timeseries of an angle variable (radians)
phi (float): A single oscillation starts and ends at phase phi (by
default zero).
"""
ts = np.squeeze(ts)
if ts.ndim <= 1:
return np.diff(phase_crossings(ts, phi))
else:
return np.hstack([ts[...,i].periods(phi) for i in range(ts.shape[-1])]) | python | def periods(ts, phi=0.0):
"""For a single variable timeseries representing the phase of an oscillator,
measure the period of each successive oscillation.
An individual oscillation is defined to start and end when the phase
passes phi (by default zero) after completing a full cycle.
If the timeseries begins (or ends) exactly at phi, then the first
(or last) oscillation will be included.
Arguments:
ts: Timeseries (single variable)
The timeseries of an angle variable (radians)
phi (float): A single oscillation starts and ends at phase phi (by
default zero).
"""
ts = np.squeeze(ts)
if ts.ndim <= 1:
return np.diff(phase_crossings(ts, phi))
else:
return np.hstack([ts[...,i].periods(phi) for i in range(ts.shape[-1])]) | [
"def",
"periods",
"(",
"ts",
",",
"phi",
"=",
"0.0",
")",
":",
"ts",
"=",
"np",
".",
"squeeze",
"(",
"ts",
")",
"if",
"ts",
".",
"ndim",
"<=",
"1",
":",
"return",
"np",
".",
"diff",
"(",
"phase_crossings",
"(",
"ts",
",",
"phi",
")",
")",
"el... | For a single variable timeseries representing the phase of an oscillator,
measure the period of each successive oscillation.
An individual oscillation is defined to start and end when the phase
passes phi (by default zero) after completing a full cycle.
If the timeseries begins (or ends) exactly at phi, then the first
(or last) oscillation will be included.
Arguments:
ts: Timeseries (single variable)
The timeseries of an angle variable (radians)
phi (float): A single oscillation starts and ends at phase phi (by
default zero). | [
"For",
"a",
"single",
"variable",
"timeseries",
"representing",
"the",
"phase",
"of",
"an",
"oscillator",
"measure",
"the",
"period",
"of",
"each",
"successive",
"oscillation",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/phase.py#L85-L106 | train |
mattja/nsim | nsim/analyses1/phase.py | circmean | def circmean(ts, axis=2):
"""Circular mean phase"""
return np.exp(1.0j * ts).mean(axis=axis).angle() | python | def circmean(ts, axis=2):
"""Circular mean phase"""
return np.exp(1.0j * ts).mean(axis=axis).angle() | [
"def",
"circmean",
"(",
"ts",
",",
"axis",
"=",
"2",
")",
":",
"return",
"np",
".",
"exp",
"(",
"1.0j",
"*",
"ts",
")",
".",
"mean",
"(",
"axis",
"=",
"axis",
")",
".",
"angle",
"(",
")"
] | Circular mean phase | [
"Circular",
"mean",
"phase"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/phase.py#L109-L111 | train |
mattja/nsim | nsim/analyses1/phase.py | order_param | def order_param(ts, axis=2):
"""Order parameter of phase synchronization"""
return np.abs(np.exp(1.0j * ts).mean(axis=axis)) | python | def order_param(ts, axis=2):
"""Order parameter of phase synchronization"""
return np.abs(np.exp(1.0j * ts).mean(axis=axis)) | [
"def",
"order_param",
"(",
"ts",
",",
"axis",
"=",
"2",
")",
":",
"return",
"np",
".",
"abs",
"(",
"np",
".",
"exp",
"(",
"1.0j",
"*",
"ts",
")",
".",
"mean",
"(",
"axis",
"=",
"axis",
")",
")"
] | Order parameter of phase synchronization | [
"Order",
"parameter",
"of",
"phase",
"synchronization"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/phase.py#L114-L116 | train |
mattja/nsim | nsim/analyses1/_cwtmorlet.py | cwtmorlet | def cwtmorlet(points, width):
"""complex morlet wavelet function compatible with scipy.signal.cwt
Parameters: points: int
Number of points in `vector`.
width: scalar
Width parameter of wavelet.
Equals (sample rate / fundamental frequency of wavelet)
Returns: `vector`: complex-valued ndarray of shape (points,)
"""
omega = 5.0
s = points / (2.0 * omega * width)
return wavelets.morlet(points, omega, s, complete=True) | python | def cwtmorlet(points, width):
"""complex morlet wavelet function compatible with scipy.signal.cwt
Parameters: points: int
Number of points in `vector`.
width: scalar
Width parameter of wavelet.
Equals (sample rate / fundamental frequency of wavelet)
Returns: `vector`: complex-valued ndarray of shape (points,)
"""
omega = 5.0
s = points / (2.0 * omega * width)
return wavelets.morlet(points, omega, s, complete=True) | [
"def",
"cwtmorlet",
"(",
"points",
",",
"width",
")",
":",
"omega",
"=",
"5.0",
"s",
"=",
"points",
"/",
"(",
"2.0",
"*",
"omega",
"*",
"width",
")",
"return",
"wavelets",
".",
"morlet",
"(",
"points",
",",
"omega",
",",
"s",
",",
"complete",
"=",
... | complex morlet wavelet function compatible with scipy.signal.cwt
Parameters: points: int
Number of points in `vector`.
width: scalar
Width parameter of wavelet.
Equals (sample rate / fundamental frequency of wavelet)
Returns: `vector`: complex-valued ndarray of shape (points,) | [
"complex",
"morlet",
"wavelet",
"function",
"compatible",
"with",
"scipy",
".",
"signal",
".",
"cwt",
"Parameters",
":",
"points",
":",
"int",
"Number",
"of",
"points",
"in",
"vector",
".",
"width",
":",
"scalar",
"Width",
"parameter",
"of",
"wavelet",
".",
... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/_cwtmorlet.py#L5-L16 | train |
mattja/nsim | nsim/analyses1/_cwtmorlet.py | roughcwt | def roughcwt(data, wavelet, widths):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(width,length)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(data), len(widths)).
Notes
-----
>>> length = min(10 * width[ii], len(data))
>>> cwt[ii,:] = scipy.signal.convolve(data, wavelet(length,
... width[ii]), mode='same')
Examples
--------
>>> from scipy import signal
>>> sig = np.random.rand(20) - 0.5
>>> wavelet = signal.ricker
>>> widths = np.arange(1, 11)
>>> cwtmatr = signal.cwt(sig, wavelet, widths)
"""
out_dtype = wavelet(widths[0], widths[0]).dtype
output = np.zeros([len(widths), len(data)], dtype=out_dtype)
for ind, width in enumerate(widths):
wavelet_data = wavelet(min(3 * width, len(data)), width)
output[ind, :] = convolve(data, wavelet_data,
mode='same')
return output | python | def roughcwt(data, wavelet, widths):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(width,length)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(data), len(widths)).
Notes
-----
>>> length = min(10 * width[ii], len(data))
>>> cwt[ii,:] = scipy.signal.convolve(data, wavelet(length,
... width[ii]), mode='same')
Examples
--------
>>> from scipy import signal
>>> sig = np.random.rand(20) - 0.5
>>> wavelet = signal.ricker
>>> widths = np.arange(1, 11)
>>> cwtmatr = signal.cwt(sig, wavelet, widths)
"""
out_dtype = wavelet(widths[0], widths[0]).dtype
output = np.zeros([len(widths), len(data)], dtype=out_dtype)
for ind, width in enumerate(widths):
wavelet_data = wavelet(min(3 * width, len(data)), width)
output[ind, :] = convolve(data, wavelet_data,
mode='same')
return output | [
"def",
"roughcwt",
"(",
"data",
",",
"wavelet",
",",
"widths",
")",
":",
"out_dtype",
"=",
"wavelet",
"(",
"widths",
"[",
"0",
"]",
",",
"widths",
"[",
"0",
"]",
")",
".",
"dtype",
"output",
"=",
"np",
".",
"zeros",
"(",
"[",
"len",
"(",
"widths"... | Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(width,length)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(data), len(widths)).
Notes
-----
>>> length = min(10 * width[ii], len(data))
>>> cwt[ii,:] = scipy.signal.convolve(data, wavelet(length,
... width[ii]), mode='same')
Examples
--------
>>> from scipy import signal
>>> sig = np.random.rand(20) - 0.5
>>> wavelet = signal.ricker
>>> widths = np.arange(1, 11)
>>> cwtmatr = signal.cwt(sig, wavelet, widths) | [
"Continuous",
"wavelet",
"transform",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/_cwtmorlet.py#L20-L69 | train |
mattja/nsim | nsim/analyses1/epochs.py | variability_fp | def variability_fp(ts, freqs=None, ncycles=6, plot=True):
"""Example variability function.
Gives two continuous, time-resolved measures of the variability of a
time series, ranging between -1 and 1.
The two measures are based on variance of the centroid frequency and
variance of the height of the spectral peak, respectively.
(Centroid frequency meaning the power-weighted average frequency)
These measures are calculated over sliding time windows of variable size.
See also: Blenkinsop et al. (2012) The dynamic evolution of focal-onset
epilepsies - combining theoretical and clinical observations
Args:
ts Timeseries of m variables, shape (n, m). Assumed constant timestep.
freqs (optional) List of frequencies to examine. If None, defaults to
50 frequency bands ranging 1Hz to 60Hz, logarithmically spaced.
ncycles Window size, in number of cycles of the centroid frequency.
plot bool Whether to display the output
Returns:
variability Timeseries of shape (n, m, 2)
variability[:, :, 0] gives a measure of variability
between -1 and 1 based on variance of centroid frequency.
variability[:, :, 1] gives a measure of variability
between -1 and 1 based on variance of maximum power.
"""
if freqs is None:
freqs = np.logspace(np.log10(1.0), np.log10(60.0), 50)
else:
freqs = np.array(freqs)
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
channels = ts.shape[1]
n = len(ts)
dt = (1.0*ts.tspan[-1] - ts.tspan[0]) / (n - 1)
fs = 1.0 / dt
dtype = ts.dtype
# Estimate time-resolved power spectra using continuous wavelet transform
coefs = ts.cwt(freqs, wavelet=cwtmorlet, plot=False)
# this is a huge array so try to do operations in place
powers = np.square(np.abs(coefs, coefs), coefs).real.astype(dtype,
copy=False)
del coefs
max_power = np.max(powers, axis=1)
total_power = np.sum(powers, axis=1, keepdims=True)
rel_power = np.divide(powers, total_power, powers)
del powers
centroid_freq = np.tensordot(freqs, rel_power, axes=(0, 1)) # shape (n, m)
del rel_power
# hw is half window size (in number of samples)
hw = np.int64(np.ceil(0.5 * ncycles * fs / centroid_freq)) # shape (n, m)
allchannels_variability = np.zeros((n, channels, 2), dtype) # output array
for i in range(channels):
logvar_centfreq = np.zeros(n, dtype)
logvar_maxpower = np.zeros(n, dtype)
for j in range(n):
# compute variance of two chosen signal properties over a
# window of 2*hw+1 samples centered on sample number j
wstart = j - hw[j, i]
wend = j + hw[j, i]
if wstart >= 0 and wend < n:
logvar_centfreq[j] = np.log(centroid_freq[wstart:wend+1].var())
logvar_maxpower[j] = np.log(max_power[wstart:wend+1].var())
else:
logvar_centfreq[j] = np.nan
logvar_maxpower[j] = np.nan
allchannels_variability[:, i, 0] = _rescale(logvar_centfreq)
allchannels_variability[:, i, 1] = _rescale(logvar_maxpower)
allchannels_variability = Timeseries(allchannels_variability,
ts.tspan, labels=ts.labels)
if plot:
_plot_variability(ts, allchannels_variability)
return allchannels_variability | python | def variability_fp(ts, freqs=None, ncycles=6, plot=True):
"""Example variability function.
Gives two continuous, time-resolved measures of the variability of a
time series, ranging between -1 and 1.
The two measures are based on variance of the centroid frequency and
variance of the height of the spectral peak, respectively.
(Centroid frequency meaning the power-weighted average frequency)
These measures are calculated over sliding time windows of variable size.
See also: Blenkinsop et al. (2012) The dynamic evolution of focal-onset
epilepsies - combining theoretical and clinical observations
Args:
ts Timeseries of m variables, shape (n, m). Assumed constant timestep.
freqs (optional) List of frequencies to examine. If None, defaults to
50 frequency bands ranging 1Hz to 60Hz, logarithmically spaced.
ncycles Window size, in number of cycles of the centroid frequency.
plot bool Whether to display the output
Returns:
variability Timeseries of shape (n, m, 2)
variability[:, :, 0] gives a measure of variability
between -1 and 1 based on variance of centroid frequency.
variability[:, :, 1] gives a measure of variability
between -1 and 1 based on variance of maximum power.
"""
if freqs is None:
freqs = np.logspace(np.log10(1.0), np.log10(60.0), 50)
else:
freqs = np.array(freqs)
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
channels = ts.shape[1]
n = len(ts)
dt = (1.0*ts.tspan[-1] - ts.tspan[0]) / (n - 1)
fs = 1.0 / dt
dtype = ts.dtype
# Estimate time-resolved power spectra using continuous wavelet transform
coefs = ts.cwt(freqs, wavelet=cwtmorlet, plot=False)
# this is a huge array so try to do operations in place
powers = np.square(np.abs(coefs, coefs), coefs).real.astype(dtype,
copy=False)
del coefs
max_power = np.max(powers, axis=1)
total_power = np.sum(powers, axis=1, keepdims=True)
rel_power = np.divide(powers, total_power, powers)
del powers
centroid_freq = np.tensordot(freqs, rel_power, axes=(0, 1)) # shape (n, m)
del rel_power
# hw is half window size (in number of samples)
hw = np.int64(np.ceil(0.5 * ncycles * fs / centroid_freq)) # shape (n, m)
allchannels_variability = np.zeros((n, channels, 2), dtype) # output array
for i in range(channels):
logvar_centfreq = np.zeros(n, dtype)
logvar_maxpower = np.zeros(n, dtype)
for j in range(n):
# compute variance of two chosen signal properties over a
# window of 2*hw+1 samples centered on sample number j
wstart = j - hw[j, i]
wend = j + hw[j, i]
if wstart >= 0 and wend < n:
logvar_centfreq[j] = np.log(centroid_freq[wstart:wend+1].var())
logvar_maxpower[j] = np.log(max_power[wstart:wend+1].var())
else:
logvar_centfreq[j] = np.nan
logvar_maxpower[j] = np.nan
allchannels_variability[:, i, 0] = _rescale(logvar_centfreq)
allchannels_variability[:, i, 1] = _rescale(logvar_maxpower)
allchannels_variability = Timeseries(allchannels_variability,
ts.tspan, labels=ts.labels)
if plot:
_plot_variability(ts, allchannels_variability)
return allchannels_variability | [
"def",
"variability_fp",
"(",
"ts",
",",
"freqs",
"=",
"None",
",",
"ncycles",
"=",
"6",
",",
"plot",
"=",
"True",
")",
":",
"if",
"freqs",
"is",
"None",
":",
"freqs",
"=",
"np",
".",
"logspace",
"(",
"np",
".",
"log10",
"(",
"1.0",
")",
",",
"... | Example variability function.
Gives two continuous, time-resolved measures of the variability of a
time series, ranging between -1 and 1.
The two measures are based on variance of the centroid frequency and
variance of the height of the spectral peak, respectively.
(Centroid frequency meaning the power-weighted average frequency)
These measures are calculated over sliding time windows of variable size.
See also: Blenkinsop et al. (2012) The dynamic evolution of focal-onset
epilepsies - combining theoretical and clinical observations
Args:
ts Timeseries of m variables, shape (n, m). Assumed constant timestep.
freqs (optional) List of frequencies to examine. If None, defaults to
50 frequency bands ranging 1Hz to 60Hz, logarithmically spaced.
ncycles Window size, in number of cycles of the centroid frequency.
plot bool Whether to display the output
Returns:
variability Timeseries of shape (n, m, 2)
variability[:, :, 0] gives a measure of variability
between -1 and 1 based on variance of centroid frequency.
variability[:, :, 1] gives a measure of variability
between -1 and 1 based on variance of maximum power. | [
"Example",
"variability",
"function",
".",
"Gives",
"two",
"continuous",
"time",
"-",
"resolved",
"measures",
"of",
"the",
"variability",
"of",
"a",
"time",
"series",
"ranging",
"between",
"-",
"1",
"and",
"1",
".",
"The",
"two",
"measures",
"are",
"based",
... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/epochs.py#L31-L102 | train |
mattja/nsim | nsim/analyses1/epochs.py | _rescale | def _rescale(ar):
"""Shift and rescale array ar to the interval [-1, 1]"""
max = np.nanmax(ar)
min = np.nanmin(ar)
midpoint = (max + min) / 2.0
return 2.0 * (ar - midpoint) / (max - min) | python | def _rescale(ar):
"""Shift and rescale array ar to the interval [-1, 1]"""
max = np.nanmax(ar)
min = np.nanmin(ar)
midpoint = (max + min) / 2.0
return 2.0 * (ar - midpoint) / (max - min) | [
"def",
"_rescale",
"(",
"ar",
")",
":",
"max",
"=",
"np",
".",
"nanmax",
"(",
"ar",
")",
"min",
"=",
"np",
".",
"nanmin",
"(",
"ar",
")",
"midpoint",
"=",
"(",
"max",
"+",
"min",
")",
"/",
"2.0",
"return",
"2.0",
"*",
"(",
"ar",
"-",
"midpoin... | Shift and rescale array ar to the interval [-1, 1] | [
"Shift",
"and",
"rescale",
"array",
"ar",
"to",
"the",
"interval",
"[",
"-",
"1",
"1",
"]"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/epochs.py#L105-L110 | train |
mattja/nsim | nsim/analyses1/epochs.py | _get_color_list | def _get_color_list():
"""Get cycle of colors in a way compatible with all matplotlib versions"""
if 'axes.prop_cycle' in plt.rcParams:
return [p['color'] for p in list(plt.rcParams['axes.prop_cycle'])]
else:
return plt.rcParams['axes.color_cycle'] | python | def _get_color_list():
"""Get cycle of colors in a way compatible with all matplotlib versions"""
if 'axes.prop_cycle' in plt.rcParams:
return [p['color'] for p in list(plt.rcParams['axes.prop_cycle'])]
else:
return plt.rcParams['axes.color_cycle'] | [
"def",
"_get_color_list",
"(",
")",
":",
"if",
"'axes.prop_cycle'",
"in",
"plt",
".",
"rcParams",
":",
"return",
"[",
"p",
"[",
"'color'",
"]",
"for",
"p",
"in",
"list",
"(",
"plt",
".",
"rcParams",
"[",
"'axes.prop_cycle'",
"]",
")",
"]",
"else",
":",... | Get cycle of colors in a way compatible with all matplotlib versions | [
"Get",
"cycle",
"of",
"colors",
"in",
"a",
"way",
"compatible",
"with",
"all",
"matplotlib",
"versions"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/epochs.py#L113-L118 | train |
mattja/nsim | nsim/analyses1/epochs.py | _plot_variability | def _plot_variability(ts, variability, threshold=None, epochs=None):
"""Plot the timeseries and variability. Optionally plot epochs."""
import matplotlib.style
import matplotlib as mpl
mpl.style.use('classic')
import matplotlib.pyplot as plt
if variability.ndim is 1:
variability = variability[:, np.newaxis, np.newaxis]
elif variability.ndim is 2:
variability = variability[:, np.newaxis, :]
vmeasures = variability.shape[2]
channels = ts.shape[1]
dt = (1.0*ts.tspan[-1] - ts.tspan[0]) / (len(ts) - 1)
fig = plt.figure()
ylabelprops = dict(rotation=0,
horizontalalignment='right',
verticalalignment='center',
x=-0.01)
for i in range(channels):
rect = (0.1, 0.85*(channels - i - 1)/channels + 0.1,
0.8, 0.85/channels)
axprops = dict()
if channels > 10:
axprops['yticks'] = []
ax = fig.add_axes(rect, **axprops)
ax.plot(ts.tspan, ts[:, i])
if ts.labels[1] is None:
ax.set_ylabel(u'channel %d' % i, **ylabelprops)
else:
ax.set_ylabel(ts.labels[1][i], **ylabelprops)
plt.setp(ax.get_xticklabels(), visible=False)
if i is channels - 1:
plt.setp(ax.get_xticklabels(), visible=True)
ax.set_xlabel('time (s)')
ax2 = ax.twinx()
if vmeasures > 1:
mean_v = np.nanmean(variability[:, i, :], axis=1)
ax2.plot(ts.tspan, mean_v, color='g')
colors = _get_color_list()
for j in range(vmeasures):
ax2.plot(ts.tspan, variability[:, i, j], linestyle='dotted',
color=colors[(3 + j) % len(colors)])
if i is 0:
ax2.legend(['variability (mean)'] +
['variability %d' % j for j in range(vmeasures)],
loc='best')
else:
ax2.plot(ts.tspan, variability[:, i, 0])
ax2.legend(('variability',), loc='best')
if threshold is not None:
ax2.axhline(y=threshold, color='Gray', linestyle='dashed')
ax2.set_ylabel('variability')
ymin = np.nanmin(ts[:, i])
ymax = np.nanmax(ts[:, i])
tstart = ts.tspan[0]
if epochs:
# highlight epochs using rectangular patches
for e in epochs[i]:
t1 = tstart + (e[0] - 1) * dt
ax.add_patch(mpl.patches.Rectangle(
(t1, ymin), (e[1] - e[0])*dt, ymax - ymin, alpha=0.2,
color='green', ec='none'))
fig.axes[0].set_title(u'variability (threshold = %g)' % threshold)
fig.show() | python | def _plot_variability(ts, variability, threshold=None, epochs=None):
"""Plot the timeseries and variability. Optionally plot epochs."""
import matplotlib.style
import matplotlib as mpl
mpl.style.use('classic')
import matplotlib.pyplot as plt
if variability.ndim is 1:
variability = variability[:, np.newaxis, np.newaxis]
elif variability.ndim is 2:
variability = variability[:, np.newaxis, :]
vmeasures = variability.shape[2]
channels = ts.shape[1]
dt = (1.0*ts.tspan[-1] - ts.tspan[0]) / (len(ts) - 1)
fig = plt.figure()
ylabelprops = dict(rotation=0,
horizontalalignment='right',
verticalalignment='center',
x=-0.01)
for i in range(channels):
rect = (0.1, 0.85*(channels - i - 1)/channels + 0.1,
0.8, 0.85/channels)
axprops = dict()
if channels > 10:
axprops['yticks'] = []
ax = fig.add_axes(rect, **axprops)
ax.plot(ts.tspan, ts[:, i])
if ts.labels[1] is None:
ax.set_ylabel(u'channel %d' % i, **ylabelprops)
else:
ax.set_ylabel(ts.labels[1][i], **ylabelprops)
plt.setp(ax.get_xticklabels(), visible=False)
if i is channels - 1:
plt.setp(ax.get_xticklabels(), visible=True)
ax.set_xlabel('time (s)')
ax2 = ax.twinx()
if vmeasures > 1:
mean_v = np.nanmean(variability[:, i, :], axis=1)
ax2.plot(ts.tspan, mean_v, color='g')
colors = _get_color_list()
for j in range(vmeasures):
ax2.plot(ts.tspan, variability[:, i, j], linestyle='dotted',
color=colors[(3 + j) % len(colors)])
if i is 0:
ax2.legend(['variability (mean)'] +
['variability %d' % j for j in range(vmeasures)],
loc='best')
else:
ax2.plot(ts.tspan, variability[:, i, 0])
ax2.legend(('variability',), loc='best')
if threshold is not None:
ax2.axhline(y=threshold, color='Gray', linestyle='dashed')
ax2.set_ylabel('variability')
ymin = np.nanmin(ts[:, i])
ymax = np.nanmax(ts[:, i])
tstart = ts.tspan[0]
if epochs:
# highlight epochs using rectangular patches
for e in epochs[i]:
t1 = tstart + (e[0] - 1) * dt
ax.add_patch(mpl.patches.Rectangle(
(t1, ymin), (e[1] - e[0])*dt, ymax - ymin, alpha=0.2,
color='green', ec='none'))
fig.axes[0].set_title(u'variability (threshold = %g)' % threshold)
fig.show() | [
"def",
"_plot_variability",
"(",
"ts",
",",
"variability",
",",
"threshold",
"=",
"None",
",",
"epochs",
"=",
"None",
")",
":",
"import",
"matplotlib",
".",
"style",
"import",
"matplotlib",
"as",
"mpl",
"mpl",
".",
"style",
".",
"use",
"(",
"'classic'",
... | Plot the timeseries and variability. Optionally plot epochs. | [
"Plot",
"the",
"timeseries",
"and",
"variability",
".",
"Optionally",
"plot",
"epochs",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/epochs.py#L121-L184 | train |
mattja/nsim | nsim/analyses1/epochs.py | epochs | def epochs(ts, variability=None, threshold=0.0, minlength=1.0, plot=True):
"""Identify "stationary" epochs within a time series, based on a
continuous measure of variability.
Epochs are defined to contain the points of minimal variability, and to
extend as wide as possible with variability not exceeding the threshold.
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m, q), giving q scalar
measures of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
Epochs require the mean of these to be below the threshold.
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
plot bool Whether to display the output
Returns: (variability, allchannels_epochs)
variability: as above
allchannels_epochs: (list of) list of tuples
For each variable, a list of tuples (start, end) that give the
starting and ending indices of stationary epochs.
(epochs are inclusive of start point but not the end point)
"""
if variability is None:
variability = ts.variability_fp(plot=False)
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
if variability.ndim is 1:
variability = variability[:, np.newaxis, np.newaxis]
elif variability.ndim is 2:
variability = variability[:, np.newaxis, :]
channels = ts.shape[1]
n = len(ts)
dt = (1.0*ts.tspan[-1] - ts.tspan[0]) / (n - 1)
fs = 1.0 / dt
allchannels_epochs = []
for i in range(channels):
v = variability[:, i, :]
v = np.nanmean(v, axis=1) # mean of q different variability measures
# then smooth the variability with a low-pass filter
nonnan_ix = np.nonzero(~np.isnan(v))[0]
nonnans = slice(nonnan_ix.min(), nonnan_ix.max())
crit_freq = 1.0 # Hz
b, a = signal.butter(3, 2.0 * crit_freq / fs)
#v[nonnans] = signal.filtfilt(b, a, v[nonnans])
v[nonnan_ix] = signal.filtfilt(b, a, v[nonnan_ix])
# find all local minima of the variability not exceeding the threshold
m = v[1:-1]
l = v[0:-2]
r = v[2:]
minima = np.nonzero(~np.isnan(m) & ~np.isnan(l) & ~np.isnan(r) &
(m <= threshold) & (m-l < 0) & (r-m > 0))[0] + 1
if len(minima) is 0:
print(u'Channel %d: no epochs found using threshold %g' % (
i, threshold))
allchannels_epochs.append([])
else:
# Sort the list of minima by ascending variability
minima = minima[np.argsort(v[minima])]
epochs = []
for m in minima:
# Check this minimum is not inside an existing epoch
overlap = False
for e in epochs:
if m >= e[0] and m <= e[1]:
overlap = True
break
if not overlap:
# Get largest subthreshold interval surrounding the minimum
startix = m - 1
endix = m + 1
for startix in range(m - 1, 0, -1):
if np.isnan(v[startix]) or v[startix] > threshold:
startix += 1
break
for endix in range(m + 1, len(v), 1):
if np.isnan(v[endix]) or v[endix] > threshold:
break
if (endix - startix) * dt >= minlength:
epochs.append((startix, endix))
allchannels_epochs.append(epochs)
if plot:
_plot_variability(ts, variability, threshold, allchannels_epochs)
if orig_ndim is 1:
allchannels_epochs = allchannels_epochs[0]
return (variability, allchannels_epochs) | python | def epochs(ts, variability=None, threshold=0.0, minlength=1.0, plot=True):
"""Identify "stationary" epochs within a time series, based on a
continuous measure of variability.
Epochs are defined to contain the points of minimal variability, and to
extend as wide as possible with variability not exceeding the threshold.
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m, q), giving q scalar
measures of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
Epochs require the mean of these to be below the threshold.
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
plot bool Whether to display the output
Returns: (variability, allchannels_epochs)
variability: as above
allchannels_epochs: (list of) list of tuples
For each variable, a list of tuples (start, end) that give the
starting and ending indices of stationary epochs.
(epochs are inclusive of start point but not the end point)
"""
if variability is None:
variability = ts.variability_fp(plot=False)
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
if variability.ndim is 1:
variability = variability[:, np.newaxis, np.newaxis]
elif variability.ndim is 2:
variability = variability[:, np.newaxis, :]
channels = ts.shape[1]
n = len(ts)
dt = (1.0*ts.tspan[-1] - ts.tspan[0]) / (n - 1)
fs = 1.0 / dt
allchannels_epochs = []
for i in range(channels):
v = variability[:, i, :]
v = np.nanmean(v, axis=1) # mean of q different variability measures
# then smooth the variability with a low-pass filter
nonnan_ix = np.nonzero(~np.isnan(v))[0]
nonnans = slice(nonnan_ix.min(), nonnan_ix.max())
crit_freq = 1.0 # Hz
b, a = signal.butter(3, 2.0 * crit_freq / fs)
#v[nonnans] = signal.filtfilt(b, a, v[nonnans])
v[nonnan_ix] = signal.filtfilt(b, a, v[nonnan_ix])
# find all local minima of the variability not exceeding the threshold
m = v[1:-1]
l = v[0:-2]
r = v[2:]
minima = np.nonzero(~np.isnan(m) & ~np.isnan(l) & ~np.isnan(r) &
(m <= threshold) & (m-l < 0) & (r-m > 0))[0] + 1
if len(minima) is 0:
print(u'Channel %d: no epochs found using threshold %g' % (
i, threshold))
allchannels_epochs.append([])
else:
# Sort the list of minima by ascending variability
minima = minima[np.argsort(v[minima])]
epochs = []
for m in minima:
# Check this minimum is not inside an existing epoch
overlap = False
for e in epochs:
if m >= e[0] and m <= e[1]:
overlap = True
break
if not overlap:
# Get largest subthreshold interval surrounding the minimum
startix = m - 1
endix = m + 1
for startix in range(m - 1, 0, -1):
if np.isnan(v[startix]) or v[startix] > threshold:
startix += 1
break
for endix in range(m + 1, len(v), 1):
if np.isnan(v[endix]) or v[endix] > threshold:
break
if (endix - startix) * dt >= minlength:
epochs.append((startix, endix))
allchannels_epochs.append(epochs)
if plot:
_plot_variability(ts, variability, threshold, allchannels_epochs)
if orig_ndim is 1:
allchannels_epochs = allchannels_epochs[0]
return (variability, allchannels_epochs) | [
"def",
"epochs",
"(",
"ts",
",",
"variability",
"=",
"None",
",",
"threshold",
"=",
"0.0",
",",
"minlength",
"=",
"1.0",
",",
"plot",
"=",
"True",
")",
":",
"if",
"variability",
"is",
"None",
":",
"variability",
"=",
"ts",
".",
"variability_fp",
"(",
... | Identify "stationary" epochs within a time series, based on a
continuous measure of variability.
Epochs are defined to contain the points of minimal variability, and to
extend as wide as possible with variability not exceeding the threshold.
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m, q), giving q scalar
measures of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
Epochs require the mean of these to be below the threshold.
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
plot bool Whether to display the output
Returns: (variability, allchannels_epochs)
variability: as above
allchannels_epochs: (list of) list of tuples
For each variable, a list of tuples (start, end) that give the
starting and ending indices of stationary epochs.
(epochs are inclusive of start point but not the end point) | [
"Identify",
"stationary",
"epochs",
"within",
"a",
"time",
"series",
"based",
"on",
"a",
"continuous",
"measure",
"of",
"variability",
".",
"Epochs",
"are",
"defined",
"to",
"contain",
"the",
"points",
"of",
"minimal",
"variability",
"and",
"to",
"extend",
"as... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/epochs.py#L187-L273 | train |
mattja/nsim | nsim/analyses1/epochs.py | epochs_distributed | def epochs_distributed(ts, variability=None, threshold=0.0, minlength=1.0,
plot=True):
"""Same as `epochs()`, but computes channels in parallel for speed.
(Note: This requires an IPython cluster to be started first,
e.g. on a workstation type 'ipcluster start')
Identify "stationary" epochs within a time series, based on a
continuous measure of variability.
Epochs are defined to contain the points of minimal variability, and to
extend as wide as possible with variability not exceeding the threshold.
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m, q), giving q scalar
measures of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
Epochs require the mean of these to be below the threshold.
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
plot bool Whether to display the output
Returns: (variability, allchannels_epochs)
variability: as above
allchannels_epochs: (list of) list of tuples
For each variable, a list of tuples (start, end) that give the
starting and ending indices of stationary epochs.
(epochs are inclusive of start point but not the end point)
"""
import distob
if ts.ndim is 1:
ts = ts[:, np.newaxis]
if variability is None:
dts = distob.scatter(ts, axis=1)
vepochs = distob.vectorize(epochs)
results = vepochs(dts, None, threshold, minlength, plot=False)
else:
def f(pair):
return epochs(pair[0], pair[1], threshold, minlength, plot=False)
allpairs = [(ts[:, i], variability[:, i]) for i in range(ts.shape[1])]
vf = distob.vectorize(f)
results = vf(allpairs)
vars, allchannels_epochs = zip(*results)
variability = distob.hstack(vars)
if plot:
_plot_variability(ts, variability, threshold, allchannels_epochs)
return (variability, allchannels_epochs) | python | def epochs_distributed(ts, variability=None, threshold=0.0, minlength=1.0,
plot=True):
"""Same as `epochs()`, but computes channels in parallel for speed.
(Note: This requires an IPython cluster to be started first,
e.g. on a workstation type 'ipcluster start')
Identify "stationary" epochs within a time series, based on a
continuous measure of variability.
Epochs are defined to contain the points of minimal variability, and to
extend as wide as possible with variability not exceeding the threshold.
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m, q), giving q scalar
measures of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
Epochs require the mean of these to be below the threshold.
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
plot bool Whether to display the output
Returns: (variability, allchannels_epochs)
variability: as above
allchannels_epochs: (list of) list of tuples
For each variable, a list of tuples (start, end) that give the
starting and ending indices of stationary epochs.
(epochs are inclusive of start point but not the end point)
"""
import distob
if ts.ndim is 1:
ts = ts[:, np.newaxis]
if variability is None:
dts = distob.scatter(ts, axis=1)
vepochs = distob.vectorize(epochs)
results = vepochs(dts, None, threshold, minlength, plot=False)
else:
def f(pair):
return epochs(pair[0], pair[1], threshold, minlength, plot=False)
allpairs = [(ts[:, i], variability[:, i]) for i in range(ts.shape[1])]
vf = distob.vectorize(f)
results = vf(allpairs)
vars, allchannels_epochs = zip(*results)
variability = distob.hstack(vars)
if plot:
_plot_variability(ts, variability, threshold, allchannels_epochs)
return (variability, allchannels_epochs) | [
"def",
"epochs_distributed",
"(",
"ts",
",",
"variability",
"=",
"None",
",",
"threshold",
"=",
"0.0",
",",
"minlength",
"=",
"1.0",
",",
"plot",
"=",
"True",
")",
":",
"import",
"distob",
"if",
"ts",
".",
"ndim",
"is",
"1",
":",
"ts",
"=",
"ts",
"... | Same as `epochs()`, but computes channels in parallel for speed.
(Note: This requires an IPython cluster to be started first,
e.g. on a workstation type 'ipcluster start')
Identify "stationary" epochs within a time series, based on a
continuous measure of variability.
Epochs are defined to contain the points of minimal variability, and to
extend as wide as possible with variability not exceeding the threshold.
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m, q), giving q scalar
measures of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
Epochs require the mean of these to be below the threshold.
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
plot bool Whether to display the output
Returns: (variability, allchannels_epochs)
variability: as above
allchannels_epochs: (list of) list of tuples
For each variable, a list of tuples (start, end) that give the
starting and ending indices of stationary epochs.
(epochs are inclusive of start point but not the end point) | [
"Same",
"as",
"epochs",
"()",
"but",
"computes",
"channels",
"in",
"parallel",
"for",
"speed",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/epochs.py#L276-L322 | train |
mattja/nsim | nsim/analyses1/epochs.py | epochs_joint | def epochs_joint(ts, variability=None, threshold=0.0, minlength=1.0,
proportion=0.75, plot=True):
"""Identify epochs within a multivariate time series where at least a
certain proportion of channels are "stationary", based on a previously
computed variability measure.
(Note: This requires an IPython cluster to be started first,
e.g. on a workstation type 'ipcluster start')
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m), giving a scalar
measure of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
proportion Require at least this fraction of channels to be "stationary"
plot bool Whether to display the output
Returns: (variability, joint_epochs)
joint_epochs: list of tuples
A list of tuples (start, end) that give the starting and ending indices
of time epochs that are stationary for at least `proportion` of channels.
(epochs are inclusive of start point but not the end point)
"""
variability, allchannels_epochs = ts.epochs_distributed(
variability, threshold, minlength, plot=False)
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
allchannels_epochs = [allchannels_epochs]
variability = variability[:, np.newaxis]
channels = ts.shape[1]
dt = (1.0*ts.tspan[-1] - ts.tspan[0]) / (len(ts) - 1)
starts = [(e[0], 1) for channel in allchannels_epochs for e in channel]
ends = [(e[1], -1) for channel in allchannels_epochs for e in channel]
all = sorted(starts + ends)
joint_epochs = []
in_joint_epoch = False
joint_start = 0.0
inside_count = 0
for bound in all:
inside_count += bound[1]
if not in_joint_epoch and 1.0*inside_count/channels >= proportion:
in_joint_epoch = True
joint_start = bound[0]
if in_joint_epoch and 1.0*inside_count/channels < proportion:
in_joint_epoch = False
joint_end = bound[0]
if (joint_end - joint_start)*dt >= minlength:
joint_epochs.append((joint_start, joint_end))
if plot:
joint_epochs_repeated = [joint_epochs] * channels
_plot_variability(ts, variability, threshold, joint_epochs_repeated)
return (variability, joint_epochs) | python | def epochs_joint(ts, variability=None, threshold=0.0, minlength=1.0,
proportion=0.75, plot=True):
"""Identify epochs within a multivariate time series where at least a
certain proportion of channels are "stationary", based on a previously
computed variability measure.
(Note: This requires an IPython cluster to be started first,
e.g. on a workstation type 'ipcluster start')
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m), giving a scalar
measure of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
proportion Require at least this fraction of channels to be "stationary"
plot bool Whether to display the output
Returns: (variability, joint_epochs)
joint_epochs: list of tuples
A list of tuples (start, end) that give the starting and ending indices
of time epochs that are stationary for at least `proportion` of channels.
(epochs are inclusive of start point but not the end point)
"""
variability, allchannels_epochs = ts.epochs_distributed(
variability, threshold, minlength, plot=False)
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
allchannels_epochs = [allchannels_epochs]
variability = variability[:, np.newaxis]
channels = ts.shape[1]
dt = (1.0*ts.tspan[-1] - ts.tspan[0]) / (len(ts) - 1)
starts = [(e[0], 1) for channel in allchannels_epochs for e in channel]
ends = [(e[1], -1) for channel in allchannels_epochs for e in channel]
all = sorted(starts + ends)
joint_epochs = []
in_joint_epoch = False
joint_start = 0.0
inside_count = 0
for bound in all:
inside_count += bound[1]
if not in_joint_epoch and 1.0*inside_count/channels >= proportion:
in_joint_epoch = True
joint_start = bound[0]
if in_joint_epoch and 1.0*inside_count/channels < proportion:
in_joint_epoch = False
joint_end = bound[0]
if (joint_end - joint_start)*dt >= minlength:
joint_epochs.append((joint_start, joint_end))
if plot:
joint_epochs_repeated = [joint_epochs] * channels
_plot_variability(ts, variability, threshold, joint_epochs_repeated)
return (variability, joint_epochs) | [
"def",
"epochs_joint",
"(",
"ts",
",",
"variability",
"=",
"None",
",",
"threshold",
"=",
"0.0",
",",
"minlength",
"=",
"1.0",
",",
"proportion",
"=",
"0.75",
",",
"plot",
"=",
"True",
")",
":",
"variability",
",",
"allchannels_epochs",
"=",
"ts",
".",
... | Identify epochs within a multivariate time series where at least a
certain proportion of channels are "stationary", based on a previously
computed variability measure.
(Note: This requires an IPython cluster to be started first,
e.g. on a workstation type 'ipcluster start')
Args:
ts Timeseries of m variables, shape (n, m).
variability (optional) Timeseries of shape (n, m), giving a scalar
measure of the variability of timeseries `ts` near each
point in time. (if None, we will use variability_fp())
threshold The maximum variability permitted in stationary epochs.
minlength Shortest acceptable epoch length (in seconds)
proportion Require at least this fraction of channels to be "stationary"
plot bool Whether to display the output
Returns: (variability, joint_epochs)
joint_epochs: list of tuples
A list of tuples (start, end) that give the starting and ending indices
of time epochs that are stationary for at least `proportion` of channels.
(epochs are inclusive of start point but not the end point) | [
"Identify",
"epochs",
"within",
"a",
"multivariate",
"time",
"series",
"where",
"at",
"least",
"a",
"certain",
"proportion",
"of",
"channels",
"are",
"stationary",
"based",
"on",
"a",
"previously",
"computed",
"variability",
"measure",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/epochs.py#L325-L379 | train |
mattja/nsim | nsim/analyses1/plots.py | plot | def plot(ts, title=None, show=True):
"""Plot a Timeseries
Args:
ts Timeseries
title str
show bool whether to display the figure or just return a figure object
"""
ts = _remove_pi_crossings(ts)
fig = plt.figure()
ylabelprops = dict(rotation=0,
horizontalalignment='right',
verticalalignment='center',
x=-0.01)
if ts.ndim > 2: # multiple sim timeseries. collapse vars onto each subplot.
num_subplots = ts.shape[ts.ndim - 1]
if title is None:
title = u'time series at each node'
for i in range(num_subplots):
ax = fig.add_subplot(num_subplots, 1, i+1)
ax.plot(ts.tspan, ts[...,i])
if ts.labels[-1] is not None:
ax.set_ylabel(ts.labels[-1][i], **ylabelprops)
else:
ax.set_ylabel('node ' + str(i), **ylabelprops)
plt.setp(ax.get_xticklabels(), visible=False)
fig.axes[0].set_title(title)
plt.setp(fig.axes[num_subplots-1].get_xticklabels(), visible=True)
fig.axes[num_subplots-1].set_xlabel('time (s)')
else: # single sim timeseries. show each variable separately.
if ts.ndim is 1:
ts = ts.reshape((-1, 1))
num_ax = ts.shape[1]
if title is None:
title=u'time series'
axprops = dict()
if num_ax > 10:
axprops['yticks'] = []
colors = _get_color_list()
for i in range(num_ax):
rect = 0.1, 0.85*(num_ax - i - 1)/num_ax + 0.1, 0.8, 0.85/num_ax
ax = fig.add_axes(rect, **axprops)
ax.plot(ts.tspan, ts[...,i], color=colors[i % len(colors)])
plt.setp(ax.get_xticklabels(), visible=False)
if ts.labels[1] is not None:
ax.set_ylabel(ts.labels[1][i], **ylabelprops)
fig.axes[0].set_title(title)
plt.setp(fig.axes[num_ax-1].get_xticklabels(), visible=True)
fig.axes[num_ax-1].set_xlabel('time (s)')
if show:
fig.show()
return fig | python | def plot(ts, title=None, show=True):
"""Plot a Timeseries
Args:
ts Timeseries
title str
show bool whether to display the figure or just return a figure object
"""
ts = _remove_pi_crossings(ts)
fig = plt.figure()
ylabelprops = dict(rotation=0,
horizontalalignment='right',
verticalalignment='center',
x=-0.01)
if ts.ndim > 2: # multiple sim timeseries. collapse vars onto each subplot.
num_subplots = ts.shape[ts.ndim - 1]
if title is None:
title = u'time series at each node'
for i in range(num_subplots):
ax = fig.add_subplot(num_subplots, 1, i+1)
ax.plot(ts.tspan, ts[...,i])
if ts.labels[-1] is not None:
ax.set_ylabel(ts.labels[-1][i], **ylabelprops)
else:
ax.set_ylabel('node ' + str(i), **ylabelprops)
plt.setp(ax.get_xticklabels(), visible=False)
fig.axes[0].set_title(title)
plt.setp(fig.axes[num_subplots-1].get_xticklabels(), visible=True)
fig.axes[num_subplots-1].set_xlabel('time (s)')
else: # single sim timeseries. show each variable separately.
if ts.ndim is 1:
ts = ts.reshape((-1, 1))
num_ax = ts.shape[1]
if title is None:
title=u'time series'
axprops = dict()
if num_ax > 10:
axprops['yticks'] = []
colors = _get_color_list()
for i in range(num_ax):
rect = 0.1, 0.85*(num_ax - i - 1)/num_ax + 0.1, 0.8, 0.85/num_ax
ax = fig.add_axes(rect, **axprops)
ax.plot(ts.tspan, ts[...,i], color=colors[i % len(colors)])
plt.setp(ax.get_xticklabels(), visible=False)
if ts.labels[1] is not None:
ax.set_ylabel(ts.labels[1][i], **ylabelprops)
fig.axes[0].set_title(title)
plt.setp(fig.axes[num_ax-1].get_xticklabels(), visible=True)
fig.axes[num_ax-1].set_xlabel('time (s)')
if show:
fig.show()
return fig | [
"def",
"plot",
"(",
"ts",
",",
"title",
"=",
"None",
",",
"show",
"=",
"True",
")",
":",
"ts",
"=",
"_remove_pi_crossings",
"(",
"ts",
")",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"ylabelprops",
"=",
"dict",
"(",
"rotation",
"=",
"0",
",",
"h... | Plot a Timeseries
Args:
ts Timeseries
title str
show bool whether to display the figure or just return a figure object | [
"Plot",
"a",
"Timeseries",
"Args",
":",
"ts",
"Timeseries",
"title",
"str",
"show",
"bool",
"whether",
"to",
"display",
"the",
"figure",
"or",
"just",
"return",
"a",
"figure",
"object"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/plots.py#L31-L81 | train |
mattja/nsim | nsim/analyses1/plots.py | _remove_pi_crossings | def _remove_pi_crossings(ts):
"""For each variable in the Timeseries, checks whether it represents
a phase variable ranging from -pi to pi. If so, set all points where the
phase crosses pi to 'nan' so that spurious lines will not be plotted.
If ts does not need adjustment, then return ts.
Otherwise return a modified copy.
"""
orig_ts = ts
if ts.ndim is 1:
ts = ts[:, np.newaxis, np.newaxis]
elif ts.ndim is 2:
ts = ts[:, np.newaxis]
# Get the indices of those variables that have range of approx -pi to pi
tsmax = ts.max(axis=0)
tsmin = ts.min(axis=0)
phase_vars = np.transpose(np.nonzero((np.abs(tsmax - np.pi) < 0.01) &
(np.abs(tsmin + np.pi) < 0.01)))
if len(phase_vars) is 0:
return orig_ts
else:
ts = ts.copy()
for v in phase_vars:
ts1 = np.asarray(ts[:, v[0], v[1]]) # time series of single variable
ts1a = ts1[0:-1]
ts1b = ts1[1:]
p2 = np.pi/2
# Find time indices where phase crosses pi. Set those values to nan.
pc = np.nonzero((ts1a > p2) & (ts1b < -p2) |
(ts1a < -p2) & (ts1b > p2))[0] + 1
ts1[pc] = np.nan
ts[:, v[0], v[1]] = ts1
return ts | python | def _remove_pi_crossings(ts):
"""For each variable in the Timeseries, checks whether it represents
a phase variable ranging from -pi to pi. If so, set all points where the
phase crosses pi to 'nan' so that spurious lines will not be plotted.
If ts does not need adjustment, then return ts.
Otherwise return a modified copy.
"""
orig_ts = ts
if ts.ndim is 1:
ts = ts[:, np.newaxis, np.newaxis]
elif ts.ndim is 2:
ts = ts[:, np.newaxis]
# Get the indices of those variables that have range of approx -pi to pi
tsmax = ts.max(axis=0)
tsmin = ts.min(axis=0)
phase_vars = np.transpose(np.nonzero((np.abs(tsmax - np.pi) < 0.01) &
(np.abs(tsmin + np.pi) < 0.01)))
if len(phase_vars) is 0:
return orig_ts
else:
ts = ts.copy()
for v in phase_vars:
ts1 = np.asarray(ts[:, v[0], v[1]]) # time series of single variable
ts1a = ts1[0:-1]
ts1b = ts1[1:]
p2 = np.pi/2
# Find time indices where phase crosses pi. Set those values to nan.
pc = np.nonzero((ts1a > p2) & (ts1b < -p2) |
(ts1a < -p2) & (ts1b > p2))[0] + 1
ts1[pc] = np.nan
ts[:, v[0], v[1]] = ts1
return ts | [
"def",
"_remove_pi_crossings",
"(",
"ts",
")",
":",
"orig_ts",
"=",
"ts",
"if",
"ts",
".",
"ndim",
"is",
"1",
":",
"ts",
"=",
"ts",
"[",
":",
",",
"np",
".",
"newaxis",
",",
"np",
".",
"newaxis",
"]",
"elif",
"ts",
".",
"ndim",
"is",
"2",
":",
... | For each variable in the Timeseries, checks whether it represents
a phase variable ranging from -pi to pi. If so, set all points where the
phase crosses pi to 'nan' so that spurious lines will not be plotted.
If ts does not need adjustment, then return ts.
Otherwise return a modified copy. | [
"For",
"each",
"variable",
"in",
"the",
"Timeseries",
"checks",
"whether",
"it",
"represents",
"a",
"phase",
"variable",
"ranging",
"from",
"-",
"pi",
"to",
"pi",
".",
"If",
"so",
"set",
"all",
"points",
"where",
"the",
"phase",
"crosses",
"pi",
"to",
"n... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/plots.py#L84-L116 | train |
mattja/nsim | nsim/readfile.py | timeseries_from_mat | def timeseries_from_mat(filename, varname=None, fs=1.0):
"""load a multi-channel Timeseries from a MATLAB .mat file
Args:
filename (str): .mat file to load
varname (str): variable name. only needed if there is more than one
variable saved in the .mat file
fs (scalar): sample rate of timeseries in Hz. (constant timestep assumed)
Returns:
Timeseries
"""
import scipy.io as sio
if varname is None:
mat_dict = sio.loadmat(filename)
if len(mat_dict) > 1:
raise ValueError('Must specify varname: file contains '
'more than one variable. ')
else:
mat_dict = sio.loadmat(filename, variable_names=(varname,))
array = mat_dict.popitem()[1]
return Timeseries(array, fs=fs) | python | def timeseries_from_mat(filename, varname=None, fs=1.0):
"""load a multi-channel Timeseries from a MATLAB .mat file
Args:
filename (str): .mat file to load
varname (str): variable name. only needed if there is more than one
variable saved in the .mat file
fs (scalar): sample rate of timeseries in Hz. (constant timestep assumed)
Returns:
Timeseries
"""
import scipy.io as sio
if varname is None:
mat_dict = sio.loadmat(filename)
if len(mat_dict) > 1:
raise ValueError('Must specify varname: file contains '
'more than one variable. ')
else:
mat_dict = sio.loadmat(filename, variable_names=(varname,))
array = mat_dict.popitem()[1]
return Timeseries(array, fs=fs) | [
"def",
"timeseries_from_mat",
"(",
"filename",
",",
"varname",
"=",
"None",
",",
"fs",
"=",
"1.0",
")",
":",
"import",
"scipy",
".",
"io",
"as",
"sio",
"if",
"varname",
"is",
"None",
":",
"mat_dict",
"=",
"sio",
".",
"loadmat",
"(",
"filename",
")",
... | load a multi-channel Timeseries from a MATLAB .mat file
Args:
filename (str): .mat file to load
varname (str): variable name. only needed if there is more than one
variable saved in the .mat file
fs (scalar): sample rate of timeseries in Hz. (constant timestep assumed)
Returns:
Timeseries | [
"load",
"a",
"multi",
"-",
"channel",
"Timeseries",
"from",
"a",
"MATLAB",
".",
"mat",
"file"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/readfile.py#L21-L42 | train |
mattja/nsim | nsim/readfile.py | save_mat | def save_mat(ts, filename):
"""save a Timeseries to a MATLAB .mat file
Args:
ts (Timeseries): the timeseries to save
filename (str): .mat filename to save to
"""
import scipy.io as sio
tspan = ts.tspan
fs = (1.0*len(tspan) - 1) / (tspan[-1] - tspan[0])
mat_dict = {'data': np.asarray(ts),
'fs': fs,
'labels': ts.labels[1]}
sio.savemat(filename, mat_dict, do_compression=True)
return | python | def save_mat(ts, filename):
"""save a Timeseries to a MATLAB .mat file
Args:
ts (Timeseries): the timeseries to save
filename (str): .mat filename to save to
"""
import scipy.io as sio
tspan = ts.tspan
fs = (1.0*len(tspan) - 1) / (tspan[-1] - tspan[0])
mat_dict = {'data': np.asarray(ts),
'fs': fs,
'labels': ts.labels[1]}
sio.savemat(filename, mat_dict, do_compression=True)
return | [
"def",
"save_mat",
"(",
"ts",
",",
"filename",
")",
":",
"import",
"scipy",
".",
"io",
"as",
"sio",
"tspan",
"=",
"ts",
".",
"tspan",
"fs",
"=",
"(",
"1.0",
"*",
"len",
"(",
"tspan",
")",
"-",
"1",
")",
"/",
"(",
"tspan",
"[",
"-",
"1",
"]",
... | save a Timeseries to a MATLAB .mat file
Args:
ts (Timeseries): the timeseries to save
filename (str): .mat filename to save to | [
"save",
"a",
"Timeseries",
"to",
"a",
"MATLAB",
".",
"mat",
"file",
"Args",
":",
"ts",
"(",
"Timeseries",
")",
":",
"the",
"timeseries",
"to",
"save",
"filename",
"(",
"str",
")",
":",
".",
"mat",
"filename",
"to",
"save",
"to"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/readfile.py#L45-L58 | train |
mattja/nsim | nsim/readfile.py | timeseries_from_file | def timeseries_from_file(filename):
"""Load a multi-channel Timeseries from any file type supported by `biosig`
Supported file formats include EDF/EDF+, BDF/BDF+, EEG, CNT and GDF.
Full list is here: http://pub.ist.ac.at/~schloegl/biosig/TESTED
For EDF, EDF+, BDF and BDF+ files, we will use python-edf
if it is installed, otherwise will fall back to python-biosig.
Args:
filename
Returns:
Timeseries
"""
if not path.isfile(filename):
raise Error("file not found: '%s'" % filename)
is_edf_bdf = (filename[-4:].lower() in ['.edf', '.bdf'])
if is_edf_bdf:
try:
import edflib
return _load_edflib(filename)
except ImportError:
print('python-edf not installed. trying python-biosig instead...')
try:
import biosig
return _load_biosig(filename)
except ImportError:
message = (
"""To load timeseries from file, ensure python-biosig is installed
e.g. on Ubuntu or Debian type `apt-get install python-biosig`
or get it from http://biosig.sf.net/download.html""")
if is_edf_bdf:
message += """\n(For EDF/BDF files, can instead install python-edf:
https://bitbucket.org/cleemesser/python-edf/ )"""
raise Error(message) | python | def timeseries_from_file(filename):
"""Load a multi-channel Timeseries from any file type supported by `biosig`
Supported file formats include EDF/EDF+, BDF/BDF+, EEG, CNT and GDF.
Full list is here: http://pub.ist.ac.at/~schloegl/biosig/TESTED
For EDF, EDF+, BDF and BDF+ files, we will use python-edf
if it is installed, otherwise will fall back to python-biosig.
Args:
filename
Returns:
Timeseries
"""
if not path.isfile(filename):
raise Error("file not found: '%s'" % filename)
is_edf_bdf = (filename[-4:].lower() in ['.edf', '.bdf'])
if is_edf_bdf:
try:
import edflib
return _load_edflib(filename)
except ImportError:
print('python-edf not installed. trying python-biosig instead...')
try:
import biosig
return _load_biosig(filename)
except ImportError:
message = (
"""To load timeseries from file, ensure python-biosig is installed
e.g. on Ubuntu or Debian type `apt-get install python-biosig`
or get it from http://biosig.sf.net/download.html""")
if is_edf_bdf:
message += """\n(For EDF/BDF files, can instead install python-edf:
https://bitbucket.org/cleemesser/python-edf/ )"""
raise Error(message) | [
"def",
"timeseries_from_file",
"(",
"filename",
")",
":",
"if",
"not",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"raise",
"Error",
"(",
"\"file not found: '%s'\"",
"%",
"filename",
")",
"is_edf_bdf",
"=",
"(",
"filename",
"[",
"-",
"4",
":",
"]",
... | Load a multi-channel Timeseries from any file type supported by `biosig`
Supported file formats include EDF/EDF+, BDF/BDF+, EEG, CNT and GDF.
Full list is here: http://pub.ist.ac.at/~schloegl/biosig/TESTED
For EDF, EDF+, BDF and BDF+ files, we will use python-edf
if it is installed, otherwise will fall back to python-biosig.
Args:
filename
Returns:
Timeseries | [
"Load",
"a",
"multi",
"-",
"channel",
"Timeseries",
"from",
"any",
"file",
"type",
"supported",
"by",
"biosig"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/readfile.py#L61-L96 | train |
mattja/nsim | nsim/readfile.py | _load_edflib | def _load_edflib(filename):
"""load a multi-channel Timeseries from an EDF (European Data Format) file
or EDF+ file, using edflib.
Args:
filename: EDF+ file
Returns:
Timeseries
"""
import edflib
e = edflib.EdfReader(filename, annotations_mode='all')
if np.ptp(e.get_samples_per_signal()) != 0:
raise Error('channels have differing numbers of samples')
if np.ptp(e.get_signal_freqs()) != 0:
raise Error('channels have differing sample rates')
n = e.samples_in_file(0)
m = e.signals_in_file
channelnames = e.get_signal_text_labels()
dt = 1.0/e.samplefrequency(0)
# EDF files hold <=16 bits of information for each sample. Representing as
# double precision (64bit) is unnecessary use of memory. use 32 bit float:
ar = np.zeros((n, m), dtype=np.float32)
# edflib requires input buffer of float64s
buf = np.zeros((n,), dtype=np.float64)
for i in range(m):
e.read_phys_signal(i, 0, n, buf)
ar[:,i] = buf
tspan = np.arange(0, (n - 1 + 0.5) * dt, dt, dtype=np.float32)
return Timeseries(ar, tspan, labels=[None, channelnames]) | python | def _load_edflib(filename):
"""load a multi-channel Timeseries from an EDF (European Data Format) file
or EDF+ file, using edflib.
Args:
filename: EDF+ file
Returns:
Timeseries
"""
import edflib
e = edflib.EdfReader(filename, annotations_mode='all')
if np.ptp(e.get_samples_per_signal()) != 0:
raise Error('channels have differing numbers of samples')
if np.ptp(e.get_signal_freqs()) != 0:
raise Error('channels have differing sample rates')
n = e.samples_in_file(0)
m = e.signals_in_file
channelnames = e.get_signal_text_labels()
dt = 1.0/e.samplefrequency(0)
# EDF files hold <=16 bits of information for each sample. Representing as
# double precision (64bit) is unnecessary use of memory. use 32 bit float:
ar = np.zeros((n, m), dtype=np.float32)
# edflib requires input buffer of float64s
buf = np.zeros((n,), dtype=np.float64)
for i in range(m):
e.read_phys_signal(i, 0, n, buf)
ar[:,i] = buf
tspan = np.arange(0, (n - 1 + 0.5) * dt, dt, dtype=np.float32)
return Timeseries(ar, tspan, labels=[None, channelnames]) | [
"def",
"_load_edflib",
"(",
"filename",
")",
":",
"import",
"edflib",
"e",
"=",
"edflib",
".",
"EdfReader",
"(",
"filename",
",",
"annotations_mode",
"=",
"'all'",
")",
"if",
"np",
".",
"ptp",
"(",
"e",
".",
"get_samples_per_signal",
"(",
")",
")",
"!=",... | load a multi-channel Timeseries from an EDF (European Data Format) file
or EDF+ file, using edflib.
Args:
filename: EDF+ file
Returns:
Timeseries | [
"load",
"a",
"multi",
"-",
"channel",
"Timeseries",
"from",
"an",
"EDF",
"(",
"European",
"Data",
"Format",
")",
"file",
"or",
"EDF",
"+",
"file",
"using",
"edflib",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/readfile.py#L125-L154 | train |
mattja/nsim | nsim/readfile.py | annotations_from_file | def annotations_from_file(filename):
"""Get a list of event annotations from an EDF (European Data Format file
or EDF+ file, using edflib.
Args:
filename: EDF+ file
Returns:
list: annotation events, each in the form [start_time, duration, text]
"""
import edflib
e = edflib.EdfReader(filename, annotations_mode='all')
return e.read_annotations() | python | def annotations_from_file(filename):
"""Get a list of event annotations from an EDF (European Data Format file
or EDF+ file, using edflib.
Args:
filename: EDF+ file
Returns:
list: annotation events, each in the form [start_time, duration, text]
"""
import edflib
e = edflib.EdfReader(filename, annotations_mode='all')
return e.read_annotations() | [
"def",
"annotations_from_file",
"(",
"filename",
")",
":",
"import",
"edflib",
"e",
"=",
"edflib",
".",
"EdfReader",
"(",
"filename",
",",
"annotations_mode",
"=",
"'all'",
")",
"return",
"e",
".",
"read_annotations",
"(",
")"
] | Get a list of event annotations from an EDF (European Data Format file
or EDF+ file, using edflib.
Args:
filename: EDF+ file
Returns:
list: annotation events, each in the form [start_time, duration, text] | [
"Get",
"a",
"list",
"of",
"event",
"annotations",
"from",
"an",
"EDF",
"(",
"European",
"Data",
"Format",
"file",
"or",
"EDF",
"+",
"file",
"using",
"edflib",
"."
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/readfile.py#L157-L169 | train |
mattja/nsim | nsim/nsim.py | _ufunc_wrap | def _ufunc_wrap(out_arr, ufunc, method, i, inputs, **kwargs):
"""After using the superclass __numpy_ufunc__ to route ufunc computations
on the array data, convert any resulting ndarray, RemoteArray and DistArray
instances into Timeseries, RemoteTimeseries and DistTimeseries instances
if appropriate"""
# Assigns tspan/labels to an axis only if inputs do not disagree on them.
shape = out_arr.shape
ndim = out_arr.ndim
if ndim is 0 or shape[0] is 0:
# not a timeseries
return out_arr
candidates = [a.tspan for a in inputs if (hasattr(a, 'tspan') and
a.shape[0] == shape[0])]
# Expensive to validate all tspans are the same. check start and end t
starts = [tspan[0] for tspan in candidates]
ends = [tspan[-1] for tspan in candidates]
if len(set(starts)) != 1 or len(set(ends)) != 1:
# inputs cannot agree on tspan
return out_arr
else:
new_tspan = candidates[0]
new_labels = [None]
for i in range(1, ndim):
candidates = [a.labels[i] for a in inputs if (hasattr(a, 'labels') and
a.shape[i] == shape[i] and a.labels[i] is not None)]
if len(candidates) is 1:
new_labels.append(candidates[0])
elif (len(candidates) > 1 and all(labs[j] == candidates[0][j] for
labs in candidates[1:] for j in range(shape[i]))):
new_labels.append(candidates[0])
else:
new_labels.append(None)
if isinstance(out_arr, np.ndarray):
return Timeseries(out_arr, new_tspan, new_labels)
elif isinstance(out_arr, distob.RemoteArray):
return _rts_from_ra(out_arr, new_tspan, new_labels)
elif (isinstance(out_arr, distob.DistArray) and
all(isinstance(ra, RemoteTimeseries) for ra in out_arr._subarrays)):
return _dts_from_da(out_arr, new_tspan, new_labels)
else:
return out_arr | python | def _ufunc_wrap(out_arr, ufunc, method, i, inputs, **kwargs):
"""After using the superclass __numpy_ufunc__ to route ufunc computations
on the array data, convert any resulting ndarray, RemoteArray and DistArray
instances into Timeseries, RemoteTimeseries and DistTimeseries instances
if appropriate"""
# Assigns tspan/labels to an axis only if inputs do not disagree on them.
shape = out_arr.shape
ndim = out_arr.ndim
if ndim is 0 or shape[0] is 0:
# not a timeseries
return out_arr
candidates = [a.tspan for a in inputs if (hasattr(a, 'tspan') and
a.shape[0] == shape[0])]
# Expensive to validate all tspans are the same. check start and end t
starts = [tspan[0] for tspan in candidates]
ends = [tspan[-1] for tspan in candidates]
if len(set(starts)) != 1 or len(set(ends)) != 1:
# inputs cannot agree on tspan
return out_arr
else:
new_tspan = candidates[0]
new_labels = [None]
for i in range(1, ndim):
candidates = [a.labels[i] for a in inputs if (hasattr(a, 'labels') and
a.shape[i] == shape[i] and a.labels[i] is not None)]
if len(candidates) is 1:
new_labels.append(candidates[0])
elif (len(candidates) > 1 and all(labs[j] == candidates[0][j] for
labs in candidates[1:] for j in range(shape[i]))):
new_labels.append(candidates[0])
else:
new_labels.append(None)
if isinstance(out_arr, np.ndarray):
return Timeseries(out_arr, new_tspan, new_labels)
elif isinstance(out_arr, distob.RemoteArray):
return _rts_from_ra(out_arr, new_tspan, new_labels)
elif (isinstance(out_arr, distob.DistArray) and
all(isinstance(ra, RemoteTimeseries) for ra in out_arr._subarrays)):
return _dts_from_da(out_arr, new_tspan, new_labels)
else:
return out_arr | [
"def",
"_ufunc_wrap",
"(",
"out_arr",
",",
"ufunc",
",",
"method",
",",
"i",
",",
"inputs",
",",
"*",
"*",
"kwargs",
")",
":",
"# Assigns tspan/labels to an axis only if inputs do not disagree on them.",
"shape",
"=",
"out_arr",
".",
"shape",
"ndim",
"=",
"out_arr... | After using the superclass __numpy_ufunc__ to route ufunc computations
on the array data, convert any resulting ndarray, RemoteArray and DistArray
instances into Timeseries, RemoteTimeseries and DistTimeseries instances
if appropriate | [
"After",
"using",
"the",
"superclass",
"__numpy_ufunc__",
"to",
"route",
"ufunc",
"computations",
"on",
"the",
"array",
"data",
"convert",
"any",
"resulting",
"ndarray",
"RemoteArray",
"and",
"DistArray",
"instances",
"into",
"Timeseries",
"RemoteTimeseries",
"and",
... | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/nsim.py#L660-L700 | train |
mattja/nsim | nsim/nsim.py | _rts_from_ra | def _rts_from_ra(ra, tspan, labels, block=True):
"""construct a RemoteTimeseries from a RemoteArray"""
def _convert(a, tspan, labels):
from nsim import Timeseries
return Timeseries(a, tspan, labels)
return distob.call(
_convert, ra, tspan, labels, prefer_local=False, block=block) | python | def _rts_from_ra(ra, tspan, labels, block=True):
"""construct a RemoteTimeseries from a RemoteArray"""
def _convert(a, tspan, labels):
from nsim import Timeseries
return Timeseries(a, tspan, labels)
return distob.call(
_convert, ra, tspan, labels, prefer_local=False, block=block) | [
"def",
"_rts_from_ra",
"(",
"ra",
",",
"tspan",
",",
"labels",
",",
"block",
"=",
"True",
")",
":",
"def",
"_convert",
"(",
"a",
",",
"tspan",
",",
"labels",
")",
":",
"from",
"nsim",
"import",
"Timeseries",
"return",
"Timeseries",
"(",
"a",
",",
"ts... | construct a RemoteTimeseries from a RemoteArray | [
"construct",
"a",
"RemoteTimeseries",
"from",
"a",
"RemoteArray"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/nsim.py#L703-L709 | train |
mattja/nsim | nsim/nsim.py | _dts_from_da | def _dts_from_da(da, tspan, labels):
"""construct a DistTimeseries from a DistArray"""
sublabels = labels[:]
new_subarrays = []
for i, ra in enumerate(da._subarrays):
if isinstance(ra, RemoteTimeseries):
new_subarrays.append(ra)
else:
if labels[da._distaxis]:
sublabels[da._distaxis] = labels[da._distaxis][
da._si[i]:da._si[i+1]]
new_subarrays.append(_rts_from_ra(ra, tspan, sublabels, False))
new_subarrays = [distob.convert_result(ar) for ar in new_subarrays]
da._subarrays = new_subarrays
da.__class__ = DistTimeseries
da.tspan = tspan
da.labels = labels
da.t = _Timeslice(da)
return da | python | def _dts_from_da(da, tspan, labels):
"""construct a DistTimeseries from a DistArray"""
sublabels = labels[:]
new_subarrays = []
for i, ra in enumerate(da._subarrays):
if isinstance(ra, RemoteTimeseries):
new_subarrays.append(ra)
else:
if labels[da._distaxis]:
sublabels[da._distaxis] = labels[da._distaxis][
da._si[i]:da._si[i+1]]
new_subarrays.append(_rts_from_ra(ra, tspan, sublabels, False))
new_subarrays = [distob.convert_result(ar) for ar in new_subarrays]
da._subarrays = new_subarrays
da.__class__ = DistTimeseries
da.tspan = tspan
da.labels = labels
da.t = _Timeslice(da)
return da | [
"def",
"_dts_from_da",
"(",
"da",
",",
"tspan",
",",
"labels",
")",
":",
"sublabels",
"=",
"labels",
"[",
":",
"]",
"new_subarrays",
"=",
"[",
"]",
"for",
"i",
",",
"ra",
"in",
"enumerate",
"(",
"da",
".",
"_subarrays",
")",
":",
"if",
"isinstance",
... | construct a DistTimeseries from a DistArray | [
"construct",
"a",
"DistTimeseries",
"from",
"a",
"DistArray"
] | ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0 | https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/nsim.py#L712-L730 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.